content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import os
import sys
import glob
import json
import scipy.signal as signal
import numpy.ma as ma
import numpy as np
import matplotlib
import matplotlib.pylab as plt
import matplotlib.dates as mdates
import datetime
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
From http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
matplotlib.rcParams['font.size'] = 8
def process(f, i):
path = 'time_series_images/' + os.path.basename(f) + '.png'
if os.path.exists(path):
print('Exists, skipping ...')
return
j = json.loads(open(f).read())
p = j['features'][0]['properties']
# fr = p['water_area_filled_fraction']
t = p['water_area_time']
v1 = p['water_area_value']
v2 = p['water_area_filled']
t_jrc = p['water_area_time_jrc']
v_jrc = p['water_area_value_jrc']
filled_fr = list(zip(v1, v2))
filled_fr = [(o[1]-o[0])/o[1] for o in filled_fr]
mask = ma.masked_greater_equal(filled_fr, 0.5)
# t = list(ma.masked_array(t, mask).compressed())
# v1 = list(ma.masked_array(v1, mask).compressed())
# v2 = list(ma.masked_array(v2, mask).compressed())
if not len(t):
print('Empty, skipping ...')
return
years = mdates.YearLocator() # every year
v2_filtered = savitzky_golay(np.array(v2), window_size=15, order=4)
# v2_filtered = signal.medfilt(v2, 7)
# v2_filtered = lowess(v2, t)
# v2_filtered = lowess(v2, t, frac=1./50)
t = [datetime.datetime.fromtimestamp(tt / 1000) for tt in t]
t_jrc = [datetime.datetime.fromtimestamp(tt_jrc / 1000) for tt_jrc in t_jrc]
s_scale = 'Scale: {:.2f}'.format(p['scale']) + '$m$'
s_area = 'Area: {:.2f}'.format(p['area']/(1000*1000)) + '$km^2$, ' + '{:.2f}'.format(100 * p['area']/(1000*1000)) + '$ha$'
title = s_scale + ', ' + s_area
fig = plt.figure(figsize=(11, 4))
ax = fig.add_subplot(111)
ax.xaxis.set_major_locator(years)
# fig.autofmt_xdate()
ax.set_xlim([datetime.date(1985, 1, 1), datetime.date(2019, 1, 1)])
ax.grid(color='k', linestyle='-', linewidth=1, alpha=0.2)
plt.title(title)
plt.xticks(rotation=90)
ax.plot(t_jrc, v_jrc, marker='.', c='r', markersize=2, linewidth=0, alpha=0.05)
ax.plot(t, v1, marker='.', c='b', markersize=2, linewidth=0, alpha=0.05)
ax.plot(t, v2, marker='.', c='k', markersize=3, linewidth=0, alpha=0.8)
# for SG
if len(t) != len(v2_filtered):
print('Bad, shapes are not equal, skipping line plotting ...')
else:
ax.plot(t, v2_filtered, marker='.', c='k', markersize=0, linewidth=2, alpha=0.1)
# for LOWESS
# v2_filtered_t = [datetime.datetime.fromtimestamp(t / 1000) for t in v2_filtered[:, 0]]
# ax.plot(v2_filtered_t, v2_filtered[:, 1], marker='.', c='k', markersize=0, linewidth=2, alpha=0.1)
path = 'time_series_images/' + os.path.basename(f) + '.png'
print(str(i) + ' ' + path)
plt.tight_layout()
plt.savefig(path, dpi=150)
plt.close()
# ========================== JRC
# fig = plt.figure(figsize=(11, 4))
# ax = fig.add_subplot(111)
# ax.xaxis.set_major_locator(years)
# ax.set_xlim([datetime.date(1985, 1, 1), datetime.date(2019, 1, 1)])
# ax.grid(color='k', linestyle='-', linewidth=1, alpha=0.2)
# plt.title(title)
# plt.xticks(rotation=90)
# ax.plot(t_jrc, v_jrc, marker='.', c='r', markersize=2, linewidth=0, alpha=0.8)
# ax.plot(t, v1, marker='.', c='b', markersize=2, linewidth=0, alpha=0.05)
# ax.plot(t, v2, marker='.', c='k', markersize=3, linewidth=0, alpha=0.05)
# for SG
# if len(t) != len(v2_filtered):
# print('Bad, shapes are not equal, skipping line plotting ...')
# else:
# ax.plot(t, v2_filtered, marker='.', c='k', markersize=0, linewidth=2, alpha=0.1)
# path = 'time_series_images/' + os.path.basename(f) + '-jrc.png'
# print(str(i) + ' ' + path)
# plt.tight_layout()
# plt.savefig(path, dpi=150)
# plt.close()
offset = 0
for (i, f) in enumerate(glob.glob('time_series/*.geojson')[offset:]):
print('Processing ' + str(i) + ' ...')
process(f, i + offset)
|
python
|
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
from ybops.cloud.common.base import AbstractPerCloudCommand
from ybops.cloud.common.command import InstanceCommand
from ybops.cloud.common.method import ConfigureInstancesMethod, ListInstancesMethod, \
InitYSQLMethod, CronCheckMethod
from ybops.cloud.onprem.method import OnPremCreateInstancesMethod, OnPremDestroyInstancesMethod, \
OnPremProvisionInstancesMethod, OnPremValidateMethod, \
OnPremFillInstanceProvisionTemplateMethod, OnPremListInstancesMethod
class OnPremInstanceCommand(InstanceCommand):
"""Subclass for on premise specific instance command baseclass. Supplies overrides for method
hooks.
"""
def __init__(self):
super(OnPremInstanceCommand, self).__init__()
def add_methods(self):
self.add_method(OnPremProvisionInstancesMethod(self))
self.add_method(OnPremCreateInstancesMethod(self))
self.add_method(ConfigureInstancesMethod(self))
self.add_method(OnPremDestroyInstancesMethod(self))
self.add_method(OnPremListInstancesMethod(self))
self.add_method(OnPremValidateMethod(self))
self.add_method(OnPremFillInstanceProvisionTemplateMethod(self))
self.add_method(InitYSQLMethod(self))
self.add_method(CronCheckMethod(self))
|
python
|
from __future__ import print_function
import json
import logging
import sys
import os
this_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append("{0}/../lib".format(this_dir))
sys.path.append("{0}/../src".format(this_dir))
from jsonschema import validate
from generator.generator import convert_to_imacro
log = logging.getLogger()
log.setLevel(logging.DEBUG)
def handler(event, context):
# input_json = json.dumps(event)
with open(os.path.join(this_dir, '../resources/schema.json'), 'r') as myfile:
schema = json.loads(myfile.read())
try:
validate(event, schema)
except Exception as e:
return "The input failed validation\n{0}".format(repr(e))
try:
output = convert_to_imacro(event)
except Exception as e:
return "An internal error occured during response generation\n{0}".format(repr(e))
return output
|
python
|
import argparse
import traceback
import warnings
import torch
import wandb
from gym_carla.envs.carla_env import CarlaEnv
from gym_carla.envs.carla_pid_env import CarlaPidEnv
from termcolor import colored
from torch.utils.data import DataLoader
from bc.train_bc import get_collate_fn
from models.carlaAffordancesDataset import HLCAffordanceDataset, AffordancesDataset
from sac.replay_buffer import OnlineReplayBuffer
from sac.sac_agent import SACAgent
from sac.trainer import SACTrainer
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="SAC Trainer",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# carla parameters
carla_config = parser.add_argument_group('CARLA config')
carla_config.add_argument('--host', default='172.18.0.1', type=str, help='IP address of CARLA host.')
carla_config.add_argument('--port', default=2008, type=int, help='Port number of CARLA host.')
carla_config.add_argument('--vehicles', default=100, type=int, help='Number of vehicles in the simulation.')
carla_config.add_argument('--walkers', default=50, type=int, help='Number of walkers in the simulation.')
# SAC parameters
rl_group = parser.add_argument_group('RL Config')
rl_group.add_argument('--num-seed', default=2000, type=int, help='Number of seed steps before starting to train.')
rl_group.add_argument('--control-frequency', default=4, type=int, help='Number of times that a control signal'
'is going to be repeated to the environment')
rl_group.add_argument('--act-mode', default="pid", type=str, help="Action space.")
rl_group.add_argument('--max-episode-steps', default=200, type=int, help='Maximum number of steps per episode.')
rl_group.add_argument('--num-eval-episodes', default=3, type=int, help='Number of evaluation episodes.')
rl_group.add_argument('--num-train-steps', default=1e6, type=int, help='Number of training steps.')
rl_group.add_argument('--eval-frequency', default=10, type=int, help='number of episodes between evaluations.')
rl_group.add_argument('--learn-temperature', action='store_true', help='Whether to lean alpha value or not.')
rl_group.add_argument('--reward-scale', default=1, type=float, help='Reward scale factor (positive)')
rl_group.add_argument('--speed-reward-weight', default=1, type=float, help='Speed reward weight.')
rl_group.add_argument('--collision-reward-weight', default=1, type=float, help='Collision reward weight')
rl_group.add_argument('--lane-distance-reward-weight', default=1, type=float, help='Lane distance reward weight')
models_parameters = parser.add_argument_group('Actor-Critic config')
models_parameters.add_argument('--actor-hidden-dim', type=int, default=128, help='Size of hidden layer in the '
'actor model.')
models_parameters.add_argument('--critic-hidden-dim', type=int, default=128, help='Size of hidden layer in the '
'critic model.')
models_parameters.add_argument('--actor-weights', type=str, default=None, help='Path to actor weights')
models_parameters.add_argument('--critic-weights', type=str, default=None, help='Path to critic weights')
loss_parameters = parser.add_argument_group('Loss parameters')
loss_parameters.add_argument('--actor-l2', type=float, default=4e-2,
help='L2 regularization for the actor model.')
loss_parameters.add_argument('--critic-l2', type=float, default=4e-2,
help='L2 regularization for the critic model.')
buffer_group = parser.add_argument_group('Buffer config')
buffer_group.add_argument('--batch-size', default=1024, type=int, help='Batch size.')
buffer_group.add_argument('--online-memory-size', default=8192, type=int, help='Number of steps to be stored in the'
'online buffer')
# in case of using behavioral cloning
bc_group = parser.add_argument_group('Behavioral cloning config')
bc_group.add_argument('--bc', default=None, type=str, help='path to dataset (without extensions)')
bc_group.add_argument('--wandb', action='store_true', help='Whether or not to use wandb')
args = parser.parse_args()
warnings.filterwarnings("ignore")
device = 'cuda' if torch.cuda.is_available() else 'cpu'
control_action_dim = 2 if args.act_mode == "pid" else 3
action_range = (-1, 1) if args.act_mode == "raw" else (-1, 5)
offline_dataset_path = args.bc
if args.wandb:
wandb.init(project='tsad', entity='autonomous-driving')
carla_env = None
if args.eval_frequency > 0:
print(colored("[*] Initializing environment", "white"))
desired_speed = 6
env_params = {
# carla connection parameters+
'host': args.host,
'port': args.port, # connection port
'town': 'Town01', # which town to simulate
'traffic_manager_port': 8000,
# simulation parameters
'verbose': False,
'vehicles': args.vehicles, # number of vehicles in the simulation
'walkers': args.walkers, # number of walkers in the simulation
'obs_size': 224, # sensor width and height
'max_past_step': 1, # the number of past steps to draw
'dt': 1 / 30, # time interval between two frames
'reward_weights': [1, 1, 1], # reward weights [speed, collision, lane distance]
'continuous_steer_range': [-1, 1],
'ego_vehicle_filter': 'vehicle.lincoln*', # filter for defining ego vehicle
'max_time_episode': args.max_episode_steps, # maximum timesteps per episode
'max_waypt': 12, # maximum number of waypoints
'd_behind': 12, # distance behind the ego vehicle (meter)
'out_lane_thres': 2.0, # threshold for out of lane
'desired_speed': desired_speed, # desired speed (m/s)
'speed_reduction_at_intersection': 0.75,
'max_ego_spawn_times': 200, # maximum times to spawn ego vehicle
}
if args.act_mode == "pid":
env_params.update({
'continuous_speed_range': [0, desired_speed]
})
carla_env = CarlaPidEnv(env_params)
else:
env_params.update({
'continuous_throttle_range': [0, 1],
'continuous_brake_range': [0, 1]
})
carla_env = CarlaEnv(env_params)
carla_env.reset()
print(colored(f"[+] Environment ready "
f"(max_steps={args.max_episode_steps},"
f"action_frequency={args.control_frequency})!", "green"))
print(colored(f"[*] Initializing data structures", "white"))
online_replay_buffer = OnlineReplayBuffer(args.online_memory_size)
bc_loaders = None
if offline_dataset_path:
print(colored("RL + BC mode"))
dataset = AffordancesDataset(args.bc)
custom_collate_fn = get_collate_fn(args.act_mode)
bc_loaders = {hlc: DataLoader(HLCAffordanceDataset(dataset, hlc=hlc),
batch_size=args.batch_size,
collate_fn=custom_collate_fn,
shuffle=True) for hlc in [0, 1, 2, 3]}
else:
print(colored("Full DRL mode"))
print(colored("[*] Data structures are ready!", "green"))
agent = SACAgent(observation_dim=15,
action_range=action_range,
device=device,
action_dim=control_action_dim,
batch_size=args.batch_size,
actor_weight_decay=args.actor_l2,
critic_weight_decay=args.critic_l2,
learnable_temperature=args.learn_temperature)
agent.train(True)
print(colored("Training", "white"))
trainer = SACTrainer(env=carla_env,
agent=agent,
buffer=online_replay_buffer,
dataloaders=bc_loaders,
device=device,
eval_frequency=args.eval_frequency,
num_seed_steps=args.num_seed,
num_train_steps=args.num_train_steps,
num_eval_episodes=args.num_eval_episodes,
seed=42)
try:
trainer.run()
except Exception as e:
print(colored("\nEarly stopping due to exception", "red"))
traceback.print_exc()
print(e)
finally:
print(colored("\nTraning finished!", "green"))
trainer.end()
|
python
|
from odio_urdf import *
def assign_inertia(im):
return Inertia(ixx=im[0], ixy=im[1], ixz=im[2], iyy=im[3], iyz=im[4], izz=im[5])
my_robot = Robot("walker_a")
contact = Contact(Lateral_Friction("100"))
s = 1
inertia_matrix_body = [0.6363636364, 4.908571429, 4.51012987, 4.51012987, 0.6363636364, 4.908571429]
inertia_arm_0 = [3.11E-04, 0.003766478343, 0.007532956685, 0.007532956685, 2.25E-03, 0.003766478343]
inertia_arm_1 = [0.4103896104, 0.003291744093, 0.04189009822, 0.04189009822, 0.04194319087, 0.003291744093]
mass_body = str(40 * s * s * s)
mass_arm_0 = str(2 * s * s * s)
mass_arm_1 = str(2 * s * s * s)
joint_X_0_loc = [str( 0.35*s) +", " + str(-0.3 *s) + ", 0",
str( 0.35*s) +", " + str( 0.3 *s) + ", 0",
str(-0.35*s) +", " + str( 0.3 *s) + ", 0",
str(-0.35*s) +", " + str(-0.3 *s) + ", 0"]
joint_X_0_rot = ["0, 0, 0", "0, 0, 0", "0, 0, 3.14159", "0, 0, 3.14159"]
leg_X_0_inertial = str(0.05*s) + ", 0, 0"
joint_X_1_loc = str(0.15*s) + ", 0, 0"
leg_X_1_inertial = "0, 0, " + str(-0.21*s)
joint_X_2_loc = "0, 0, " + str(-0.50*s)
leg_X_2_inertial = "0, 0, " + str(-0.21*s)
inertia_matrix_body = [x * s * s for x in inertia_matrix_body]
inertia_arm_0 = [x * s * s for x in inertia_arm_0]
inertia_arm_1 = [x * s * s for x in inertia_arm_1]
inertia_body = assign_inertia(inertia_matrix_body)
inertia_arm_0 = assign_inertia(inertia_arm_0)
inertia_arm_1 = assign_inertia(inertia_arm_1)
base_link = Link("base_link", contact,
Inertial(inertia_body, Mass(mass_body)),
Visual(Geometry(Mesh(filename="body.obj", scale=f"{s} {s} {s}"))),
Collision(Geometry(Mesh(filename="body.obj", scale=f"{s} {s} {s}")))
)
link_X_0 = []
for i in range(4):
link_X_0.append(Link("link_" + str(i) + "_0", contact,
Inertial(inertia_arm_0, Mass(mass_arm_0), Origin(leg_X_0_inertial)),
Visual(Geometry(Mesh(filename="leg_X_0.obj", scale=f"{s} {s} {s}"))),
Collision(Geometry(Mesh(filename="leg_X_0.obj", scale=f"{s} {s} {s}")))
))
link_X_1 = []
for i in range(4):
link_X_1.append(Link(f"link_{i}_1", contact,
Inertial(inertia_arm_0, Mass(mass_arm_0), Origin(leg_X_1_inertial)),
Visual(Geometry(Mesh(filename="leg_X_1.obj", scale=f"{s} {s} {s}"))),
Collision(Geometry(Mesh(filename="leg_X_1.obj", scale=f"{s} {s} {s}")))
))
link_X_2 = []
for i in range(4):
link_X_2.append(Link(f"link_{i}_2", contact,
Inertial(inertia_arm_0, Mass(mass_arm_0), Origin(leg_X_1_inertial)),
Visual(Geometry(Mesh(filename="leg_X_1.obj", scale=f"{s} {s} {s}"))),
Collision(Geometry(Mesh(filename="leg_X_1.obj", scale=f"{s} {s} {s}")))
))
#Add first elements to robot
my_robot(base_link,
link_X_0[0], link_X_1[0], link_X_2[0],
link_X_0[1], link_X_1[1], link_X_2[1],
link_X_0[2], link_X_1[2], link_X_2[2],
link_X_0[3], link_X_1[3], link_X_2[3])
joint_X_0 = []
for i in range(4):
joint_X_0.append(Joint(Parent("base_link"), Child("link_" + str(i) + "_0"),
Origin(xyz=joint_X_0_loc[i], rpy=joint_X_0_rot[i]),
Axis("1, 0, 0"), type="continuous", name=f"joint_{i}_0"))
joint_X_1 = []
for i in range(4):
joint_X_1.append(Joint(Parent("link_{}_0".format(i)), Child(f"link_{i}_1"), Origin(xyz=joint_X_1_loc),
Axis("0, 1, 0"), type="continuous", name=f"joint_{i}_1"))
joint_X_2 = []
for i in range(4):
joint_X_2.append(Joint(Parent("link_{}_1".format(i)), Child(f"link_{i}_2"), Origin(xyz=joint_X_2_loc),
Axis("0, 1, 0"), type="continuous", name=f"joint_{i}_2"))
my_robot(joint_X_0[0], joint_X_1[0], joint_X_2[0],
joint_X_0[1], joint_X_1[1], joint_X_2[1],
joint_X_0[2], joint_X_1[2], joint_X_2[2],
joint_X_0[3], joint_X_1[3], joint_X_2[3],)
f = open("walker_a/urdf/walker_a_0_5.urdf", "w")
f.write(str(my_robot))
f.close()
|
python
|
import argparse
import constants
from data_support.tfrecord_wrapper import TFRecordWriter
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("data_dir", type=str, default='../resources/tf_data',
help="Directory where tfrecord files are stored")
parser.add_argument("--model",
default=f"bert-{constants.SIZE_BASE}-{constants.LANGUAGE_MULTILINGUAL}-{constants.CASING_CASED}",
help="Transformer model name (see: https://huggingface.co/transformers/pretrained_models.html)")
args = parser.parse_args()
models = [args.model]
data_spec = [
# ('train', 'en', 'dep_distance,dep_depth,lex_distance,lex_depth,pos_distance,pos_depth,rnd_distance,rnd_depth',
# "/net/data/universal-dependencies-2.6/UD_English-EWT/en_ewt-ud-train.conllu"),
# ('dev', 'en', 'dep_distance,dep_depth,lex_distance,lex_depth,pos_distance,pos_depth,rnd_distance,rnd_depth',
# "/net/data/universal-dependencies-2.6/UD_English-EWT/en_ewt-ud-dev.conllu"),
# ('test', 'en', 'dep_distance,dep_depth,lex_distance,lex_depth,pos_distance,pos_depth,rnd_distance,rnd_depth',
# "/net/data/universal-dependencies-2.6/UD_English-EWT/en_ewt-ud-test.conllu"),
# ('train', 'es','dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Spanish-AnCora/es_ancora-ud-train.conllu"),
# ('dev', 'es', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Spanish-AnCora/es_ancora-ud-dev.conllu"),
# ('test', 'es', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Spanish-AnCora/es_ancora-ud-test.conllu"),
# ('train', 'sl','dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Slovenian-SSJ/sl_ssj-ud-train.conllu"),
# ('dev', 'sl', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Slovenian-SSJ/sl_ssj-ud-dev.conllu"),
# ('test', 'sl', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Slovenian-SSJ/sl_ssj-ud-test.conllu"),
# ('train', 'zh','dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Chinese-GSD/zh_gsd-ud-train.conllu"),
# ('dev', 'zh', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Chinese-GSD/zh_gsd-ud-dev.conllu"),
# ('test', 'zh', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Chinese-GSD/zh_gsd-ud-test.conllu"),
# ('train', 'id', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Indonesian-GSD/id_gsd-ud-train.conllu"),
# ('dev', 'id', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Indonesian-GSD/id_gsd-ud-dev.conllu"),
# ('test', 'id', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Indonesian-GSD/id_gsd-ud-test.conllu")
('train', 'fi', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Finnish-TDT/fi_tdt-ud-train.conllu"),
('dev', 'fi', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Finnish-TDT/fi_tdt-ud-dev.conllu"),
('test', 'fi', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Finnish-TDT/fi_tdt-ud-test.conllu"),
('train', 'ar', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Arabic-PADT/ar_padt-ud-train.conllu"),
('dev', 'ar', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Arabic-PADT/ar_padt-ud-dev.conllu"),
('test', 'ar', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Arabic-PADT/ar_padt-ud-test.conllu"),
('train', 'fr', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_French-GSD/fr_gsd-ud-train.conllu"),
('dev', 'fr', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_French-GSD/fr_gsd-ud-dev.conllu"),
('test', 'fr', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_French-GSD/fr_gsd-ud-test.conllu"),
('train', 'eu', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Basque-BDT/eu_bdt-ud-train.conllu"),
('dev', 'eu', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Basque-BDT/eu_bdt-ud-dev.conllu"),
('test', 'eu', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Basque-BDT/eu_bdt-ud-test.conllu")
]
tf_writer = TFRecordWriter(models, data_spec, args.data_dir)
tf_writer.compute_and_save(args.data_dir)
|
python
|
import copy
import json
import time
from io import open
from .exceptions import (
WebpackBundleLookupError,
WebpackError,
WebpackLoaderBadStatsError,
WebpackLoaderTimeoutError,
)
class WebpackLoader(object):
_assets = {}
def __init__(self, name, config):
self.name = name
self.config = config
def load_assets(self):
# TODO
# poll when debugging and block request until bundle is compiled
# or the build times out
try:
with open(self.config["MANIFEST_FILE"], encoding="utf-8") as f:
return json.load(f)
except IOError:
raise IOError(
"Error reading {0}. Are you sure webpack has generated "
"the file and the path is correct?".format(self.config["MANIFEST_FILE"])
)
def get_assets(self):
if self.config["CACHE"]:
if self.name not in self._assets:
self._assets[self.name] = self.load_assets()
return self._assets[self.name]
return self.load_assets()
def filter_chunks(self, chunks):
for chunk in chunks:
ignore = any(regex.match(chunk["url"]) for regex in self.config["ignores"])
if not ignore:
chunk["url"] = self.get_chunk_url(chunk)
yield chunk
def get_chunk_url(self, chunk):
url = chunk["url"]
if self.config.get("web_framework", None) == "django":
from django.contrib.staticfiles.storage import staticfiles_storage
from django.conf import settings
if url.startswith("http"):
# webpack dev server
return url
else:
prefix = settings.STATIC_URL
url_without_static_prefix = url[
url.startswith(prefix) and len(prefix) :
]
return staticfiles_storage.url(url_without_static_prefix)
else:
return url
def get_bundle(self, bundle_name):
assets = copy.copy(self.get_assets())
try:
# keep the order
js = assets["entrypoints"][bundle_name]["assets"].get("js", [])
css = assets["entrypoints"][bundle_name]["assets"].get("css", [])
js_css = js + css
assets.pop("entrypoints")
# so url is the key
reversed_assets = {value: key for (key, value) in assets.items()}
chunks = [{"name": reversed_assets[url], "url": url,} for url in js_css]
except Exception:
raise WebpackBundleLookupError(
"Cannot resolve bundle {0}.".format(bundle_name)
)
return self.filter_chunks(chunks)
|
python
|
import os
import shutil
from ptest.assertion import assert_true
from ptest.decorator import TestClass, BeforeMethod, Test, AfterMethod
from watchdog.events import FileCreatedEvent
from shirp.event import EventConf
from shirp.handler import HDFSHandler
HDFS_GROUP = "grp-hdfs"
@TestClass(run_mode="singleline")
class HDFSHandlerTest:
def __init__(self, hdfs_put_handler=None, hdfs_get_handler=None, put_event_conf=None, get_event_conf=None):
"""
:param hdfs_put_handler:
:type hdfs_put_handler: HDFSHandler
:param hdfs_get_handler:
:type hdfs_get_handler: HDFSHandler
:param put_event_conf:
:type put_event_conf: EventConf
:param get_event_conf:
:type get_event_conf: EventConf
"""
self.hdfs_put_handler = hdfs_put_handler
self.hdfs_get_handler = hdfs_get_handler
self.put_event_conf = put_event_conf
self.get_event_conf = get_event_conf
self.current_dir = os.path.dirname(os.path.realpath(__file__))
self.result = False
@BeforeMethod(group=HDFS_GROUP)
def before_hdfs_test(self):
self.put_event_conf = EventConf(True, "test move", "hdfs", HDFSHandler.TYPE_PUT,
"D:\\Users\\Cedric\\PycharmProjects\\event-manager\\rep_test\\in",
["test_????.txt"], "/user/hduser",
{"hdfsUrl": "http://192.168.1.24:50070", "hdfsUser": "hduser"})
self.get_event_conf = EventConf(True, "test move", "hdfs", HDFSHandler.TYPE_GET, "/user/hduser",
["test_????.txt"],
"D:\\Users\\Cedric\\PycharmProjects\\event-manager\\rep_test\\out",
{"hdfsUrl": "http://192.168.1.24:50070", "hdfsUser": "hduser"})
HDFSHandler.FILE_LOG = self.current_dir + os.path.sep + "events.log"
self.hdfs_put_handler = HDFSHandler(self.put_event_conf, self.put_event_conf.subtype)
self.hdfs_get_handler = HDFSHandler(self.get_event_conf, self.get_event_conf.subtype)
@Test(group=HDFS_GROUP)
def move_test(self):
shutil.copy("D:\\Users\\Cedric\\PycharmProjects\\event-manager\\rep_test\\test_2208.txt",
self.put_event_conf.directory)
event = FileCreatedEvent(self.put_event_conf.directory + os.path.sep + "test_2208.txt")
assert_true(self.hdfs_put_handler.on_created(event))
assert_true(self.hdfs_get_handler.process("/user/hduser/test_2208.txt"))
assert_true(os.path.exists(self.get_event_conf.destination + os.path.sep + "test_2208.txt"))
@AfterMethod(group=HDFS_GROUP)
def after_hdfs_test(self):
os.remove(self.get_event_conf.destination + os.path.sep + "test_2208.txt")
|
python
|
"""Project-level configuration and state."""
import os.path
class Project(object):
"""A Project tracks the overall build configuration, filesystem paths,
registered plugins/keys, etc. and provides services that relate to that."""
def __init__(self, root, build_dir):
"""Creates a Project.
root: path to root of project structure.
build_dir: path to build directory.
"""
self.root = root
self.build_dir = build_dir
self.named_envs = {}
self.packages = {}
self.ninja_rules = {
'cobble_symlink_product': {
'command': 'ln -sf $target $out',
'description': 'SYMLINK $out',
},
}
# TODO: rename something like static_path?
def inpath(self, *parts):
"""Creates a path to an input resource within the project tree by
separating the given path components by the path separator
character."""
return os.path.join(self.root, *parts)
def outpath(self, env, *parts):
"""Creates a path to an output resource within the build directory.
Output resources are distinguished by their environments; the same
product may be built several times, in different environments, and
stored in separate places. Thus, 'outpath' requires the environment to
be provided.
"""
return os.path.join(self.build_dir, 'env', env.digest, *parts)
def linkpath(self, *parts):
"""Creates a path into the 'latest' symlinks in the build directory."""
return os.path.join(self.build_dir, 'latest', *parts)
def add_package(self, package):
"""Registers 'package' with the project."""
assert package.relpath not in self.packages, \
"duplicate package at %s" % package.relpath
assert package.project is self, "package project misconfigured"
self.packages[package.relpath] = package
def find_target(self, ident):
"""Finds the 'Target' named by an 'ident'.
'find_target' at the 'Project' level requires absolute identifiers,
e.g. '//foo/bar:target'.
"""
assert ident.startswith('//'), "bad identifier: %r" % ident
parts = ident[2:].split(':')
if len(parts) == 1:
# Target name not specified
pkg_path = parts[0]
target_name = os.path.basename(pkg_path)
elif len(parts) == 2:
# Explicit target name
pkg_path = parts[0]
target_name = parts[1]
else:
raise Exception('Too many colons in identifier: %r' % ident)
assert pkg_path in self.packages, \
"Reference to unknown package: %r" % ident
assert target_name in self.packages[pkg_path].targets, \
"Target %s not found in package %s" % (target_name, pkg_path)
return self.packages[pkg_path].targets[target_name]
def define_environment(self, name, env):
"""Defines a named environment in the project.
Named environments are defined in BUILD.conf, and provide the basis for
all other environments.
"""
assert name not in self.named_envs, \
"more than one environment named %s" % name
self.named_envs[name] = env
def add_ninja_rules(self, rules):
"""Extends the set of Ninja rules used in the project.
Ninja rules are represented as dicts with keys matching the attributes
of Ninja's rule syntax.
"""
for k, v in rules.items():
if k in self.ninja_rules:
assert v == self.ninja_rules[k], \
"ninja rule %s defined incompatibly in multiple places" % k
else:
self.ninja_rules[k] = v
def files(self):
"""Returns an iterator over the build files and BUILD.conf."""
yield self.inpath('BUILD.conf')
for p in self.packages.values():
yield p.inpath('BUILD')
def targets(self):
"""Returns an iterator over all Targets in the project."""
for p in self.packages.values():
for t in p.targets.values():
yield t
def concrete_targets(self):
"""Returns an iterator over the concrete Targets in the project."""
return filter(lambda t: t.concrete, self.targets())
class Package(object):
def __init__(self, project, relpath):
"""Creates a Package and registers it with 'project'."""
self.project = project
self.relpath = os.path.normpath(relpath)
self.targets = {}
project.add_package(self)
def add_target(self, target):
"""Adds a 'Target' to the package."""
assert target.name not in self.targets, \
"duplicate target %s in package %s" % (target.name, self.relpath)
self.targets[target.name] = target
def outpath(self, env, *parts):
"""Creates a path to an output resource within this package."""
return self.project.outpath(env, self.relpath, *parts)
def inpath(self, *parts):
"""Creates a path to an input resource within this package."""
return self.project.inpath(self.relpath, *parts)
def linkpath(self, *parts):
"""Creates a path into the 'latest' symlinks for this package."""
return self.project.linkpath(self.relpath, *parts)
def make_absolute(self, ident):
"""Makes an ident, which may be relative to this package, absolute."""
if ident.startswith('//'):
return ident
if ident.startswith(':'):
return '//' + self.relpath + ident
raise Exception('Unexpected ident: %r' % ident)
def find_target(self, ident):
"""Finds a target relative to this package. This enables local
references using the ':foo' syntax."""
if ident.startswith(':'):
return self.project.find_target('//' + self.relpath + ident)
return self.project.find_target(ident)
|
python
|
# -*- coding: utf-8 -*-
import json
from bs4 import BeautifulSoup
from django.contrib.auth import get_user_model
from django.test import TestCase
class BaseTestCase(TestCase):
fixtures = [
'users.json',
]
USER_PWD = 'password'
# Superuser - admin/adminpassword
# User - neo/password
@staticmethod
def get_soup(response):
return BeautifulSoup(response.content)
@staticmethod
def get_json(response):
return json.loads(response.content.decode('utf-8'))
def setUp(self):
User = get_user_model()
self.user = User.objects.get(username='neo')
def login(self, username='neo'):
self.client.logout()
self.client.login(
username=username,
password=self.USER_PWD
)
class BaseTestLoginCase(BaseTestCase):
def setUp(self):
super(BaseTestLoginCase, self).setUp()
self.login()
|
python
|
from django.contrib import sitemaps
from django.urls import reverse
from booru.models import Post
class PostSitemap(sitemaps.Sitemap):
priority = 0.8
changefreq = 'daily'
def items(self):
return Post.objects.approved()
def location(self, item):
return item.get_absolute_url()
def lastmod(self, item):
return item.update_timestamp
class TagsSitemap(sitemaps.Sitemap):
priority = 0.5
def items(self):
return Post.tags.most_common()[:25]
def location(self, item):
return item.get_search_url()
def lastmod(self, item):
return item.update_timestamp
class PostListSitemap(sitemaps.Sitemap):
priority = 0.6
changefreq = 'daily'
def items(self):
return ['posts']
def location(self, item):
return reverse('booru:posts')
|
python
|
from .utils import check_token
from .models import Entry
from .checks_models import EntryCheck
open_entry_checks = EntryCheck()
open_entry = Entry()
|
python
|
import requests
import pytest
from helpers import (create_user,
get_random_email,
login_user,
refresh_token, get_user)
from requests import HTTPError
HOST = 'localhost:5000'
def test_register():
email = get_random_email()
new_user = create_user(email, 'pass')
assert new_user['email'] == email
def test_register_user_twice():
email = get_random_email()
create_user(email, 'pass')
with pytest.raises(requests.HTTPError):
create_user(email, 'pass')
def test_login():
email = get_random_email()
create_user(email, 'pass')
token = login_user(email, 'pass')
token = refresh_token(token)
def test_token_guard():
email = get_random_email()
create_user(email, 'pass')
token = login_user(email, 'pass')
with pytest.raises(HTTPError):
token = refresh_token(token + '1')
def test_login_with_bad_password():
email = get_random_email()
create_user(email, 'pass')
with pytest.raises(HTTPError):
login_user(email, 'wrong_pass')
def test_get_current():
email = get_random_email()
create_user(email, 'pass')
token = login_user(email, 'pass')
user = get_user(token=token)
assert user['email'] == email
assert user['balance'] == 2.5
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. currentmodule:: test_Point
.. moduleauthor:: Pat Daburu <[email protected]>
This is a unit test module.
"""
import unittest
from djio.geometry import GeometryException
class TestGeometryExceptionSuite(unittest.TestCase):
def test_initWithoutInner_verify(self):
ge = GeometryException(message='Test Message')
self.assertEqual('Test Message', ge.message)
self.assertIsNone(ge.inner)
def test_initWithInner_verify(self):
inner = Exception()
ge = GeometryException(message='Test Message',
inner=inner)
self.assertEqual('Test Message', ge.message)
self.assertTrue(ge.inner == inner)
|
python
|
#!/usr/bin/env python
import os
import uuid
import time
import zlib
import random
import numpy as np
from string import ascii_lowercase
list_chars = list(c.encode('utf8') for c in ascii_lowercase)
# Number of objects
#num_files_list = [1]
num_files_list = [1, 100, 1000, 10000, 100000, 1000000]
# Hash functions
#compression_levels = [0, 1, 3, 5, 7, 9]
compression_levels = [1]
# Total target size
total_size_target = 100000000
for num_files in num_files_list:
size = total_size_target // num_files
data = {}
start = time.time()
for _ in range(num_files):
filename = str(uuid.uuid4().hex)
## Method 1
content = os.urandom(size)
## Method 2
#content = b"".join(np.random.choice(list_chars, size))
## Method 3
#with open('test.dat', 'rb') as fhandle:
# content = fhandle.read(size)
#content = (content + content)[:size]
#assert len(content) == size
data[filename] = content
tot_time = time.time() - start
total_size = sum(len(content) for content in data.values())
print('{} objects generated in {} s. Total size: {} bytes (~{:.3f} MB).'.format(num_files, tot_time, total_size, (total_size / 1024) / 1024))
for compression_level in compression_levels:
print('TESTING FOR ZLIB COMPRESSION WITH LEVEL {}'.format(compression_level))
v = {}
start = time.time()
for key, val in data.items():
v[key] = zlib.compress(val, compression_level)
tot_time = time.time() - start
tot_compressed_size = sum(len(compressed_string) for compressed_string in v.values())
print('Total time to compress {} objects: {} s (final size: {} MB, speed: {} MB/s)'.format(num_files, tot_time, tot_compressed_size / 1024 / 1024, total_size/1024/1024/tot_time))
# Decompress
start = time.time()
for compressed_string in v.values():
zlib.decompress(compressed_string)
tot_time = time.time() - start
print('Total time to decompress back: {} s (speed: {} MB/s)'.format(tot_time, total_size/1024/1024/tot_time))
print('-'*72)
print('='*72)
|
python
|
_base_ = ['./rotated_retinanet_obb_r50_fpn_1x_dota_le90.py']
fp16 = dict(loss_scale='dynamic')
|
python
|
from tksheet import Sheet
import tkinter as tk
class Sheet_Listbox(Sheet):
def __init__(self,
parent,
values = []):
Sheet.__init__(self,
parent = parent,
show_horizontal_grid = False,
show_vertical_grid = False,
show_header = False,
show_row_index = False,
show_top_left = False,
empty_horizontal = 0,
empty_vertical = 0)
if values:
self.values(values)
def values(self, values = []):
self.set_sheet_data([[v] for v in values],
reset_col_positions = False,
reset_row_positions = False,
redraw = False,
verify = False)
self.set_all_cell_sizes_to_text()
class demo(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.grid_columnconfigure(0,
weight = 1)
self.grid_rowconfigure(0,
weight = 1)
self.listbox = Sheet_Listbox(self,
values = [f"_________ Item {i} _________" for i in range(2000)])
self.listbox.grid(row = 0,
column = 0,
sticky = "nswe")
#self.listbox.values([f"new values {i}" for i in range(50)]) set values
app = demo()
app.mainloop()
|
python
|
# 1.
# C = float(input('输入摄氏温度'))
# F = (9/5)*C + 32
# print('%.2F 华氏度' %F)
# 2.
# import math
# r = float(input('输入圆柱半径:'))
# l = float(input('输入圆柱高:'))
# area = r*r*math.pi
# volume = area*l
# print('面积:%.2f' %area)
# print('体积:%.2f' %volume)
# 3.
# feet = float(input('请输入英尺数:'))
# meters = feet * 0.305
# print('%.1ffeet is %.4fmeters'%(feet,meters))
# 4.
# M = float(input('输入按千克计算的水量:'))
# initialTemperature = float(input('输入水的初始温度:'))
# finalTemperature = float(input('输入水的最终温度:'))
# Q = M * (finalTemperature-initialTemperature)*4184
# print('所需能量:%.1f%Q',Q)
# 5.
# balance = float(input('输入差额:'))
# interest_rate = float(input('输入年利率:'))
# interest = balance*(interest_rate/1200)
# print('下月需付利息:%.5f' %interest)
# 6.
# v0 = float(input('输入初始速度:'))
# v1 = float(input('输入末速度:'))
# t = float(input('输入速度变化所占用的时间:'))
# a =(v1-v0)/t
# print('平均加速度为:%.4f' %a)
# 7.
# num = float(input('输入每月存款数:'))
# rate =0.05/12
# interest = 1+rate
# count=[0]
# for i in range(6):
# month = (100+count[i]*interest)
# count.append(month)
# print('六个月后的账户总额:%.2f' %count[6])
8.
# num = int(input("请输入1-1000的一个整数:"))
# bai = int(num%10)
# shi = int(num/10%10)
# ge = int(num/100)
# sum = ge + shi + bai
# print('各位数字之和:' ,sum)
# 9.
# import math
# r = float(input('输入顶点到中心的距离:'))
# s = 2*r*math.sin(math.pi/5)
# area = 5*s*s/(4*math.tan(math.pi/5))
# print('五边形的面积%.2f' %area)
# 10.
# import math
# print ('输入第一个坐标:')
# x1 = float(input('>'))
# y1 = float(input('>'))
# print ('输入第二个坐标:')
# x2 = float(input('>'))
# y2 = float(input('>'))
# radius = 6371.01
# math.radians = float(input('输入地球表面的经度:'))
# math.arccoss = float(input('输入地球表面的纬度:'))
# d = math.radians * math.arccos(math.sin(math.radians(x1)) * math.sin(math.radians(x2)) + math.cos(math.radians(x1)) * math.cos(math.radians(x2)) * math.cos(math.radians(y1-y2))
# print ('%d' %d)
10.
# import math
# x1,y1 = eval(input('Please input point1(latitude and longitude) in degrees:'))
# x2,y2 = eval(input('Please input point2(latitude and longitude) in degrees:'))
# radius = 6371.01
# x11 = math.radians(x1) #math.radians()函数将度数转换成弧度数
# y11 = math.radians(y1)
# x22 = math.radians(x2)
# y22 = math.radians(y2)
# d = radius * math.acos(math.sin(x11) * math.sin(x22) + math.cos(x11) * math.cos(x22) * math.cos(y11-y22))
# print('The distance between the two points is %5.2f km'%d)
# 11.
# import math
# s = float(input('输入五角星的边长:'))
# area = (5*s*s)/(4*math.tan(math.pi/5))
# print('五角星的面积为:%.2f',area)
# 12.
# import math
# n = int(input('输入边数:'))
# s = float(input('输入正多边形的边长:'))
# area = (n * s * s) / (4 * math.tan (math.pi / n))
# print('%.2f',area)
# 13.
# ASCII = int(input('输入整数=>'))
# print(chr(ASCII))
# 14.
# name = (input('姓名:'))
# workhour = int(input('一周工作时间:'))
# many = float(input('每小时的报酬:'))
# lianbang = float(input('联邦预扣税率:'))
# zhou = float(input('州预扣税率:'))
# rate1 = workhour * many
# print(rate1)
# print('Deduction:')
# faderal = rate1 * lianbang
# print(faderal)
# state = rate1 * zhou
# print(state)
# zongmany = rate1 -(faderal + state)
# print(zongmany)
15.
# num = input('输入一个四位整数数字:')
# for i in range(len(num)):
# print(num[-i + len(num)-1],end='')
# # 16.
# import hashlib
# a = input('请输入一行文本:')
# m = hashlib.md5()
# b = a.encode(encoding='utf-8')
# m.update(b)
# a_md5 = m.hexdigest
# print('md5加密前为:'+a)
# print('md5加密前为:'+a_md5)
|
python
|
# You are provided with a code that raises many exceptions. Fix it, so it works correctly.
# numbers_list = input().split(", ")
# result = 0
#
# for i in range(numbers_list):
# number = numbers_list[i + 1]
# if number < 5:
# result *= number
# elif number > 5 and number > 10:
# result /= number
#
# print(result)
numbers_list = map(int, input().split(", "))
result = 1
for number in numbers_list:
if number <= 5:
result *= number
elif number <= 10:
result /= number
print(int(result))
|
python
|
#! /usr/bin/env python3
"""This is a prototype work manager which reads work requests from a file and
submits them as messages to a RabbitMQ queue.
This is development only. For a real system, you would get work from a
database or other entity.
"""
import os
import sys
import json
import logging
from argparse import ArgumentParser
from time import sleep
import proton
from proton import Message
from proton.utils import BlockingConnection
from proton.handlers import IncomingMessageHandler
logger = None
SYSTEM = 'PROTO'
COMPONENT = 'work-manager'
MSG_SERVICE_STRING = None
MSG_WORK_QUEUE = None
MSG_STATUS_QUEUE = None
class LoggingFilter(logging.Filter):
"""Standard logging filter for using Mesos
"""
def __init__(self, system='', component=''):
super(LoggingFilter, self).__init__()
self.system = system
self.component = component
def filter(self, record):
record.system = self.system
record.component = self.component
return True
class ExceptionFormatter(logging.Formatter):
"""Standard logging formatter with special execption formatting
"""
def __init__(self, fmt=None, datefmt=None):
std_fmt = ('%(asctime)s.%(msecs)03d'
' %(levelname)-8s'
' %(system)s'
' %(component)s'
' %(message)s')
std_datefmt = '%Y-%m-%dT%H:%M:%S'
if fmt is not None:
std_fmt = fmt
if datefmt is not None:
std_datefmt = datefmt
super(ExceptionFormatter, self).__init__(fmt=std_fmt,
datefmt=std_datefmt)
def formatException(self, exc_info):
result = super(ExceptionFormatter, self).formatException(exc_info)
return repr(result)
def format(self, record):
s = super(ExceptionFormatter, self).format(record)
if record.exc_text:
s = s.replace('\n', ' ')
s = s.replace('\\n', ' ')
return s
def setup_logging(args):
"""Configure the message logging components
"""
global logger
# Setup the logging level
logging_level = logging.INFO
if args.debug:
logging_level = args.debug
handler = logging.StreamHandler(sys.stdout)
msg_formatter = ExceptionFormatter()
msg_filter = LoggingFilter(SYSTEM, COMPONENT)
handler.setFormatter(msg_formatter)
handler.addFilter(msg_filter)
logger = logging.getLogger()
logger.setLevel(logging_level)
logger.addHandler(handler)
def retrieve_command_line():
"""Read and return the command line arguments
"""
description = 'Prototype Work Manager'
parser = ArgumentParser(description=description)
parser.add_argument('--job-filename',
action='store',
dest='job_filename',
required=False,
metavar='TEXT',
help='JSON job file to use')
parser.add_argument('--dev-mode',
action='store_true',
dest='dev_mode',
required=False,
default=False,
help='Run in developer mode')
parser.add_argument('--debug',
action='store',
dest='debug',
required=False,
type=int,
default=0,
metavar='DEBUG_LEVEL',
help='Log debug messages')
return parser.parse_args()
def get_env_var(variable, default):
"""Read variable from the environment and provide a default value
"""
result = os.environ.get(variable, default)
if not result:
raise RuntimeError('You must specify {} in the environment'
.format(variable))
return result
def get_jobs(job_filename):
"""Reads jobs from a known job file location
"""
jobs = list()
if job_filename and os.path.isfile(job_filename):
with open(job_filename, 'r') as input_fd:
data = input_fd.read()
job_dict = json.loads(data)
del data
for job in job_dict['jobs']:
jobs.append(job)
os.unlink(job_filename)
return jobs
def main():
"""Main processing for the application
"""
global MSG_SERVICE_STRING
global MSG_WORK_QUEUE
global MSG_STATUS_QUEUE
# Example connection string: amqp://<username>:<password>@<host>:<port>
MSG_SERVICE_STRING = get_env_var('PROTO_MSG_SERVICE_CONNECTION_STRING', None)
MSG_WORK_QUEUE = get_env_var('PROTO_MSG_WORK_QUEUE', None)
MSG_STATUS_QUEUE = get_env_var('PROTO_MSG_STATUS_QUEUE', None)
args = retrieve_command_line()
# Configure logging
setup_logging(args)
logger.info('Begin Processing')
try:
while True:
try:
# Create the connection
connection = BlockingConnection(MSG_SERVICE_STRING)
# Create a sender
sender = connection.create_sender(MSG_WORK_QUEUE)
jobs = get_jobs(args.job_filename)
for job in jobs:
message_json = json.dumps(job, ensure_ascii=False)
try:
sender.send(Message(body=message_json))
# TODO - This prototype doesn't care, but we
# TODO - should probably update the status at
# TODO - the work source.
print('Queued Message = {}'.format(message_json))
except proton.ConnectionException:
# TODO - This prototype doesn't care, but does
# TODO - something need to be done if this
# TODO - happens?
print('Returned Message = {}'.format(message_json))
finally:
connection.close()
sleep(60)
except KeyboardInterrupt:
pass
#except pika.exceptions.ConnectionClosed:
# pass
logger.info('Terminated Processing')
if __name__ == '__main__':
main()
|
python
|
# coding: utf-8
import numpy as np
from numpy import matrix as mat
import cv2
import os
import math
def undistort(img, # image data
fx, fy, cx, cy, # camera intrinsics
k1, k2, # radial distortion parameters
p1=None, p2=None, # tagential distortion parameters
radial_ud_only=True):
"""
undistort image using distort model
test gray-scale image only
"""
if img is None:
print('[Err]: empty image.')
return
is_bgr = len(img.shape) == 3
if is_bgr:
H, W, C = img.shape
elif len(img.shape) == 2:
H, W = img.shape
else:
print('[Err]: image format wrong!')
return
img_undistort = np.zeros_like(img, dtype=np.uint8)
# fill in each pixel in un-distorted image
for v in range(H):
for u in range(W): # u,v are pixel coordinates
# convert to camera coordinates by camera intrinsic parameters
x1 = (u - cx) / fx
y1 = (v - cy) / fy
r_square = (x1 * x1) + (y1 * y1)
r_quadric = r_square * r_square
if radial_ud_only: # do radial undistortion only
x2 = x1 * (1.0 + k1 * r_square + k2 * r_quadric)
y2 = y1 * (1.0 + k1 * r_square + k2 * r_quadric)
else: # do radial undistortion and tangential undistortion
x2 = x1 * (1.0 + k1 * r_square + k2 * r_quadric) + \
2.0 * p1 * x1 * y1 + p2 * (r_square + 2.0 * x1 * x1)
y2 = y1 * (1.0 + k1 * r_square + k2 * r_quadric) + \
p1 * (r_square + 2.0 * y1 * y1) + 2.0 * p2 * x1 * y1
# convert back to pixel coordinates
# using nearest neighbor interpolation
u_corrected = int(fx * x2 + cx + 0.5)
v_corrected = int(fy * y2 + cy + 0.5)
# @Todo: using bilinear interpolation...
# processing pixel outside the image area
if u_corrected < 0 or u_corrected >= W \
or v_corrected < 0 or v_corrected >= H:
if is_bgr:
img_undistort[v, u, :] = 0
else:
img_undistort[v, u] = 0
else:
if is_bgr:
img_undistort[v, u, :] = img[v_corrected,
u_corrected, :] # y, x
else:
img_undistort[v, u] = img[v_corrected, u_corrected] # y, x
return img_undistort.astype('uint8')
def test_undistort_img():
img_path = './distorted.png'
fx = 458.654
fy = 457.296
cx = 367.215
cy = 248.375
camera_intrinsics = [fx, fy, cx, cy]
k1 = -0.28340811
k2 = 0.07395907
p1 = 0.00019359
p2 = 1.76187114e-05
# Init parameters to be optimized
params = np.array([[-0.1],
[0.1]]) # k1k2
# ---------- Run LM optimization
LM_Optimize(params)
k1 = params[0][0]
k2 = params[1][0]
# ----------
undistort_img(img_path, camera_intrinsics, k1, k2, p1, p2)
def undistort_img(img_path,
camera_intrinsics,
k1, k2, p1=None, p2=None,
is_color=True):
"""
undistort of image
given camera matrix and distortion coefficients
"""
# LM_Optimize()
fx = camera_intrinsics[0]
fy = camera_intrinsics[1]
cx = camera_intrinsics[2]
cy = camera_intrinsics[3]
if not os.path.isfile(img_path):
print('[Err]: invalid image path.')
return
img_orig = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if is_color:
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
else:
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
if img is None:
print('[Err]: empty image.')
return
# ---------- Do undistortion
img_undistort = undistort(img,
fx, fy, cx, cy,
k1, k2, p1, p2)
# ----------
cv2.imshow('origin', img_orig)
cv2.imshow('undistort', img_undistort)
cv2.waitKey()
def show_points_of_curve():
"""
visualize points on the curve
"""
pts_on_curve = [
[546, 20], [545, 40], [543, 83],
[536, 159], [535, 170], [534, 180],
[531, 200], [530, 211], [529, 218],
[526, 236], [524, 253], [521, 269],
[519, 281], [517, 293], [515, 302],
[514, 310], [512, 320], [510, 329],
[508, 341], [506, 353], [505, 357]
]
print('Total {:d} points on the curve.'.format(len(pts_on_curve)))
img_path = './distorted.png'
if not os.path.isfile(img_path):
print('[Err]: invalid image path.')
return
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if img is None:
print('[Err]: empty image.')
return
# Draw points and centroid
centroid_x, centroid_y = 0.0, 0.0
for pt in pts_on_curve:
centroid_x += pt[0]
centroid_y += pt[1]
cv2.circle(img, tuple(pt), 5, (0, 255, 0), -1)
centroid_x /= float(len(pts_on_curve))
centroid_y /= float(len(pts_on_curve))
centroid_x = int(centroid_x + 0.5)
centroid_y = int(centroid_y + 0.5)
cv2.circle(img, (centroid_x, centroid_y), 7, (0, 0, 255), -1)
# Draw line of endpoints
cv2.line(img, tuple(pts_on_curve[0]), tuple(
pts_on_curve[-1]), (255, 0, 0), 2)
cv2.imshow('Curve', img)
cv2.waitKey()
def line_equation(first_x, first_y, second_x, second_y):
# Ax+By+C=0
A = second_y - first_y
B = first_x - second_x
C = second_x*first_y - first_x*second_y
# k = -1.0 * A / B
# b = -1.0 * C / B
return A, B, C
def dist_of_pt_to_line(pt, A, B, C):
"""
2D space point to line distance
"""
# tmp = abs(A*pt[0] + B*pt[1] + C) / math.sqrt(A*A + B*B)
tmp = -(A*pt[0] + B*pt[1] + C) / math.sqrt(A*A + B*B)
return tmp
# return math.sqrt(tmp * tmp)
def undistort_point(u, v,
fx, fy, cx, cy,
k1, k2, p1=None, p2=None,
radial_ud_only=True):
"""
"""
# convert to camera coordinates by camera intrinsic parameters
x1 = (u - cx) / fx
y1 = (v - cy) / fy
# compute r^2 and r^4
r_square = (x1 * x1) + (y1 * y1)
r_quadric = r_square * r_square
if radial_ud_only: # do radial undistortion only
x2 = x1 * (1.0 + k1 * r_square + k2 * r_quadric)
y2 = y1 * (1.0 + k1 * r_square + k2 * r_quadric)
else: # do radial undistortion and tangential undistortion
x2 = x1 * (1.0 + k1 * r_square + k2 * r_quadric) + \
2.0 * p1 * x1 * y1 + p2 * (r_square + 2.0 * x1 * x1)
y2 = y1 * (1.0 + k1 * r_square + k2 * r_quadric) + \
p1 * (r_square + 2.0 * y1 * y1) + 2.0 * p2 * x1 * y
# convert back to pixel coordinates
# using nearest neighbor interpolation
u_corrected = fx * x2 + cx
v_corrected = fy * y2 + cy
return [u_corrected, v_corrected]
# the function
def test_undistort_pts_on_curve():
"""
"""
fx = 458.654
fy = 457.296
cx = 367.215
cy = 248.375
k1 = -0.28340811
k2 = 0.07395907
k1k2 = np.array([[k1],
[k2]])
pts_orig = [
[546, 20], [545, 40], [543, 83],
[536, 159], [535, 170], [534, 180],
[531, 200], [530, 211], [529, 218],
[526, 236], [524, 253], [521, 269],
[519, 281], [517, 293], [515, 302],
[514, 310], [512, 320], [510, 329],
[508, 341], [506, 353], [505, 357]
]
pts_corrected = undistort_point(
pts_orig[:, 0], pts_orig[:, 1],
fx, fy, cx, cy,
k1k2[0][0], k1k2[1][0]
)
img_path = './distorted.png'
img_orig = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
def Func(fx, fy, cx, cy, k1k2, input_list):
ret = np.zeros(len(input_list))
for i, input_i in enumerate(input_list):
# using numpy array for SIMD
pts_orig = np.array(input_i) #
# applying undistortion of points
pts_corrected = undistort_point(
pts_orig[:, 0], pts_orig[:, 1],
fx, fy, cx, cy,
k1k2[0][0], k1k2[1][0]
)
# compute centroid of undistorted points
centroid = np.sum(pts_corrected, axis=1) # get sum by column
centroid /= float(pts_orig.shape[0])
# build line of undistorted endpoints
A, B, C = line_equation(pts_corrected[0][0], pts_corrected[0][1],
pts_corrected[-1][0], pts_corrected[-1][1])
# build loss function and return
dist = dist_of_pt_to_line(centroid, A, B, C)
ret[i] = dist
ret = np.array(ret)
ret = np.reshape(ret, (-1, 1))
return ret
def Deriv(fx, fy, cx, cy,
k1k2,
input_list,
i):
"""
"""
k1k2_delta_1 = k1k2.copy()
k1k2_delta_2 = k1k2.copy()
k1k2_delta_1[i, 0] -= 0.000001
k1k2_delta_2[i, 0] += 0.000001
p1 = Func(fx, fy, cx, cy, k1k2_delta_1, input_list)
p2 = Func(fx, fy, cx, cy, k1k2_delta_2, input_list)
d = (p2 - p1) * 1.0 / (0.000002)
return d
def test_func():
pts_orig = [
[546, 20], [545, 40], [543, 83],
[536, 159], [535, 170], [534, 180],
[531, 200], [530, 211], [529, 218],
[526, 236], [524, 253], [521, 269],
[519, 281], [517, 293], [515, 302],
[514, 310], [512, 320], [510, 329],
[508, 341], [506, 353], [505, 357]
]
input_list = []
input_list.append(pts_orig)
fx = 458.654
fy = 457.296
cx = 367.215
cy = 248.375
# k1k2 = np.array([[0.1],
# [0.1]])
k1 = -0.28340811
k2 = 0.07395907
k1k2 = np.array([[k1],
[k2]])
dists = Func(fx, fy, cx, cy, k1k2, input_list) # N×1
print('Dist: {:.3f}'.format(dists[0][0]))
def LM_Optimize(params, max_iter=100):
"""
"""
# Known parameters(camera intrinsics)
fx = 458.654
fy = 457.296
cx = 367.215
cy = 248.375
# Input
pts_orig = [
[546, 20], [545, 40], [543, 83],
[536, 159], [535, 170], [534, 180],
[531, 200], [530, 211], [529, 218],
[526, 236], [524, 253], [521, 269],
[519, 281], [517, 293], [515, 302],
[514, 310], [512, 320], [510, 329],
[508, 341], [506, 353], [505, 357]
]
input_list = []
input_list.append(pts_orig)
N = len(input_list) # 数据个数
print('Total {:d} data.'.format(N))
u, v = 1, 2
step = 0
last_mse = 0.0
while max_iter:
step += 1
mse, mse_tmp = 0.0, 0.0
# loss
loss = Func(fx, fy, cx, cy, params, input_list)
mse += sum(loss**2)
mse /= N # normalize
# build Jacobin matrix
J = mat(np.zeros((N, 2))) # 雅克比矩阵
for i in range(2):
J[:, i] = Deriv(fx, fy, cx, cy, params, input_list, i)
print('Jacobin matrix:\n', J)
H = J.T*J + u*np.eye(2) # 2×2
params_delta = -H.I * J.T*fx #
# update parameters
params_tmp = params.copy()
params_tmp += params_delta
# current loss
loss_tmp = Func(fx, fy, cx, cy, params_tmp, input_list)
mse_tmp = sum(loss_tmp[:, 0]**2)
mse_tmp /= N
# adaptive adjustment
q = float((mse - mse_tmp) /
((0.5*params_delta.T*(u*params_delta - J.T*loss))[0, 0]))
if q > 0:
s = 1.0 / 3.0
v = 2
mse = mse_tmp
params = params_tmp
temp = 1 - pow(2.0*q-1, 3)
if s > temp:
u = u*s
else:
u = u*temp
else:
u = u*v
v = 2*v
params = params_tmp
print("step = %d, abs(mse-lase_mse) = %.8f" %
(step, abs(mse-last_mse)))
if abs(mse - last_mse) < 0.000001:
break
last_mse = mse # 记录上一个 mse 的位置
max_iter -= 1
print('\nFinal optimized parameters:\n', params)
if __name__ == '__main__':
test_undistort_img()
# show_points_of_curve()
# test_func()
print('=> Test done.')
|
python
|
from http import HTTPStatus
import json
from src.common.encoder import PynamoDbEncoder
class HTTPResponse(object):
@classmethod
def to_json_response(cls, http_status, message=None):
"""
Access-Control-Allow-Origin is needed for CORS to work
Access-Control-Allow-Credentials is needed for cookies
"""
_message = http_status.description
if message:
_message = message
return {
"statusCode": http_status.value,
"headers": {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Credentials": True
},
"body": json.dumps({"message": _message})}
@classmethod
def to_ok_json(cls, body, encoder=PynamoDbEncoder):
return {
"statusCode": HTTPStatus.OK.value,
"headers": {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Credentials": True
},
"body": json.dumps(body, cls=encoder)
}
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-11-26 09:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('libretto', '0044_auto_20190917_1200'),
]
operations = [
migrations.AlterField(
model_name='source',
name='folio',
field=models.CharField(blank=True, help_text='Sans «\xa0f.\xa0». Exemple\xa0: «\xa03\xa0».', max_length=15, verbose_name='folio'),
),
migrations.AlterField(
model_name='source',
name='page',
field=models.CharField(blank=True, db_index=True, help_text='Sans «\xa0p.\xa0». Exemple\u202f: «\xa03\xa0»', max_length=15, verbose_name='page'),
),
]
|
python
|
from setuptools import setup, find_packages
classifiers = ['Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware']
setup(name='ST7735',
version='0.0.2',
description='Library to control an ST7735 168x80 TFT LCD display.',
long_description=open('README.rst').read() + '\n' + open('CHANGELOG.txt').read(),
license='MIT',
author='Philip Howard',
author_email='[email protected]',
classifiers=classifiers,
url='https://github.com/pimoroni/st7735-160x80-python/',
packages=find_packages())
|
python
|
"""============================================================================
The input is a file containing lines of the following form:
equation_name arg1 ...
For example:
energy 5.4 3.7 99
something 7 280.01
energy 88.94 73 21.2
whizbang 83.34 14.34 356.43 139593.7801
something .001 25
You must pass the name of the input file on the command-line. Do not hard-code
the input file name in the source code.
You must validate the name of the physics equation and the number of arguments.
If the name of the equation is invalid, write an error message and skip to the
next line. If the equation name is valid, but has the wrong number of
arguments, write an error message and skip to the next line.
If the equation name and number of arguments is correct, call the equation with
the arguments and print the answer like this:
physics_equation_name(arg1, arg2 ...) = answer
============================================================================"""
from physequations import grav_potential_energy, kin_energy, work_energy
from pprint import pprint
# print('<--checking equations hardcoded with rounding-->')
# print(grav_potential_energy(2, 6.4))
# print(round(grav_potential_energy(2, 6.4), 2))
# print(kin_energy(2, 5))
# print(work_energy(2, 5, 30))
# print(round(work_energy(2, 5, 30), 2))
# print()
def isint(s):
"""Checks to see if input is an interger"""
try:
int(s)
except:
return False
return True
# string = 'this is a string'
# print(string.split())
# print()
"""logic: find if the index element is not an int then start a new line"""
f = open('resources/equations_input.txt', 'r')
flines = f.readlines()
# pprint(flines)
equations = []
for line in flines:
spline = line.split()
# print(spline)
equations.append(spline)
# print('===> equations')
# pprint(equations)
for equation in equations:
eqname = equation[0]
# print(eqname)
if eqname != 'grav_potential_energy' and eqname != 'kin_energy' and \
eqname !='work_energy':
print(f'{eqname} is not valid')
# print(equation)
numargs = len(equation) - 1
if eqname == 'grav_potential_energy':
if numargs != 2:
print(f'Wrong number of arguments: {equation}')
else:
mass = float(equation[1])
height = float(equation[2])
ans = grav_potential_energy(float(equation[1]), float(equation[2]))
# {mass, height} creates a tuple
# ({mass}, {height}) is another way to format it
print(f'{eqname}{mass, height} = {ans}')
if eqname == 'kin_energy':
if numargs != 2:
print(f'Wrong number of arguments: {equation}')
else:
mass = float(equation[1])
velocity = float(equation[2])
ans = kin_energy(float(equation[1]), float(equation[2]))
# {mass, velocity} creates a tuple
# ({mass}, {velocity}) is another way to format it
print(f'{eqname}{mass, velocity} = {ans}')
if eqname == 'work_energy':
if numargs != 3:
print(f'Wrong number of arguments: {equation}')
else:
force = float(equation[1])
displacement = float(equation[2])
angle = float(equation[3])
ans = work_energy(float(equation[1]), float(equation[2]), float(equation[3]))
# {force, displacement, angle} creates a tuple
# ({force}, {displacement}, {angle}) is another way to format it
print(f'{eqname}{force, displacement, angle} = {ans}')
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sympy import init_printing,Integral,latex,pretty,pprint,sqrt,symbols,srepr
init_printing(use_unicode=True)
x,y,z = symbols('x y z')
print(Integral(sqrt(1/x),x))
print(srepr(Integral(sqrt(1/x), x)))
pprint(Integral(sqrt(1/x), x), use_unicode=False)
print(pretty(Integral(sqrt(1/x), x), use_unicode=False))
print(latex(Integral(sqrt(1/x), x)))
from sympy.printing.mathml import print_mathml
print_mathml(Integral(sqrt(1/x), x))
from sympy.printing.dot import dotprint
from sympy.abc import x
print(dotprint(x+2))
|
python
|
from django.contrib import admin
from django.contrib.admin import register
from bbbs.main.models import Main
from bbbs.users.utils import AdminOnlyPermissionsMixin
from .forms import MainAdminForm
@register(Main)
class MainAdmin(AdminOnlyPermissionsMixin, admin.ModelAdmin):
empty_value_display = "-пусто-"
filter_horizontal = ("questions", "articles", "movies")
form = MainAdminForm
def has_add_permission(self, request):
if Main.objects.first():
return False
return True
def has_delete_permission(self, request, obj=None):
return False
|
python
|
""" uTorrent migration to qBittorrent module """
from tkinter import Tk, StringVar, N, W, E, S, filedialog, messagebox, HORIZONTAL
from tkinter.ttk import Frame, Entry, Button, Label, Progressbar
from shutil import copy
from os import path
from hashlib import sha1
from time import time
from re import compile as re_compile
from tpp.bencodepy import encode as bencode
from tpp.bencodepy import decode as bdecode
from tpp.bencodepy import DecodingError
FIELD_MAP = {"active_time" : 0,
"added_time" : 0,
"allocation" : "full",
"announce_to_dht" : 1,
"announce_to_lsd" : 1,
"announce_to_trackers" : 1,
"auto_managed" : 1,
"banned_peers" : "",
"banned_peers6" : "",
"blocks per piece" : 0,
"completed_time" : 0,
"download_rate_limit" : 0,
"file sizes" : [[0, 0], [0, 0], [0, 0]],
"file-format" : "libtorrent resume file",
"file-version" : 1,
"file_priority" : [2, 0, 1],
"finished_time" : 0,
"info-hash" : "",
"last_download" : 0,
"last_scrape" : 0,
"last_seen_complete" : 0,
"last_upload" : 0,
"libtorrent-version" : "0.16.19.0",
"mapped_files" : ["relative\\path\\to\\file1.ext", "r\\p\\t\\file2.ext", "file3.ext"],
"max_connections" : 100,
"max_uploads" : 16777215,
"num_downloaders" : 16777215,
"num_incomplete" : 0,
"num_seeds" : 0,
"paused" : 0,
"peers" : "",
"peers6" : "",
"piece_priority" : "",
"pieces" : "",
"seed_mode" : 0,
"seeding_time" : 0,
"sequential_download" : 0,
"super_seeding" : 0,
"total_downloaded" : 0,
"total_uploaded" : 0,
"upload_rate_limit" : 0,
"trackers" : [["https://tracker"]]}
def mkfr(res, tor):
""" Creates libtorrent fast resume file.
@res uTorrent data.
@tor Torrent File.
"""
qbt_torrent = FIELD_MAP
time_now = int(time())
pieces_num = int(tor['info']['pieces'].size / 20) # SHA1 hash is 20 bytes
qbt_torrent['added_time'] = int(res['added_on'])
qbt_torrent['completed_time'] = int(res['completed_on'])
qbt_torrent['active_time'] = int(res['runtime'])
qbt_torrent['seeding_time'] = qbt_torrent['active_time']
qbt_torrent['blocks per piece'] = int(int(tor['info']['piece length']) / int(res['blocksize']))
qbt_torrent['info-hash'] = sha1(bencode(tor['info'])).digest()
qbt_torrent['paused'] = 1 if res['started'] == 0 else 0
qbt_torrent['auto_managed'] = 0
qbt_torrent['total_downloaded'] = int(res['downloaded'])
qbt_torrent['total_uploaded'] = int(res['uploaded'])
qbt_torrent['upload_rate_limit'] = int(res['upspeed'])
qbt_torrent['trackers'] = [[tracker] for tracker in res['trackers']]
#wat?
qbt_torrent['piece_priority'] = "".join(bin(hexik)[2:]*pieces_num for hexik in res["have"])
#wat?
qbt_torrent['pieces'] = qbt_torrent['piece_priority']
qbt_torrent['finished_time'] = time_now - qbt_torrent['completed_time']
qbt_torrent['last_seen_complete'] = int(time_now) if qbt_torrent["finished_time"] else 0
qbt_torrent['last_download'] = qbt_torrent['finished_time']
qbt_torrent['last_scrape'] = qbt_torrent['finished_time']
qbt_torrent['last_upload'] = qbt_torrent['finished_time']
qbt_torrent['mapped_files'] = []
qbt_torrent['file sizes'] = []
# Per file fields:
##########
# mapped_files
# file_priority
# file sizes
#wat?
get_hex = re_compile("[0-9A-Fa-f][0-9A-Fa-f]")
qbt_torrent["file_priority"] = [(1 if int(hex_number, 16) in range(1, 9) else
(2 if int(hex_number, 16) in range(9, 16) else
(0))) for hex_number in get_hex.split(res["prio"])]
fmt = 0
if "files" in tor['info']:
for file_index in range(len(tor['info']['files'])):
tor_file = tor['info']['files'][file_index]
qbt_torrent['mapped_files'].append(path.normpath(tor_file))
if not "modtimes" in res:
fmt = int(res['modtimes'][file_index])
else:
fmt = 0
res_file = path.join(res['path'], qbt_torrent['mapped_files'][-1])
if path.isfile(res_file) and not fmt:
fmt = int(path.getmtime(res_file))
if qbt_torrent['file_priority'][file_index]:
qbt_torrent['file sizes'].append([int(tor_file['length']), fmt])
else:
qbt_torrent['file sizes'].append([0, 0])
qbt_torrent['qBt-savePath'] = res['path']
else:
qbt_torrent['qBt-savePath'] = path.dirname(res['path'])
if "modtimes" in res:
fmt = int(res['modtimes'][0]) # file time to avoid checking / not presen in ut2.2
else:
fmt = 0
res_file = res['path']
if path.isfile(res_file) and not fmt:
fmt = int(path.getmtime(res_file))
if qbt_torrent['file_priority'][0]:
qbt_torrent['file sizes'].append([int(tor['info']['length']), fmt])
else:
qbt_torrent['file sizes'].append([0, 0])
##########
# qBittorrent 3.1+ Fields
##########
if "label" in res:
qbt_torrent['qBt-label'] = res['label']
qbt_torrent['qBt-queuePosition'] = -1 # -1 for completed
qbt_torrent['qBt-seedDate'] = qbt_torrent['completed_time']
qbt_torrent['qBt-ratioLimit'] = "-2" # -2 = Use Global, -1 = No limit, other number = actual ratio?
return qbt_torrent
def punchup(res, tor, dotracker=True, doname=False):
torrent = tor
if dotracker:
utrax = res['trackers']
if len(utrax) > 1:
if "announce-list" in torrent:
if not set(torrent['announce-list']) == set(utrax):
torrent['announce-list'] = [[element] for element in set(utrax)]
elif "announce" in torrent:
if not torrent['announce'] == utrax[0]:
torrent['announce'] = utrax[0]
if doname:
res_path = res['path']
if not "files" in torrent:
torrent['info']['name'] = path.basename(res_path)
return torrent
def convertor(ut_data: str, qbt_dir: str):
""" Converts from uTorrent resume.dat to qBt
@ut_data Path to uT resum.data
@qbt_dir Path to store results
"""
message = messagebox
"""
backup_data = ".".join((ut_data, "old"))
try:
copy(ut_data, backup_data)
except IOError:
if message.askyesno("Backup error", "Cannot back-up UT data\nIs it ok?"):
backup_data = ""
else:
return
"""
with open(ut_data, 'rb') as ut_fd:
data = ut_fd.read()
try:
torrents = bdecode(data)
except DecodingError as error:
message.showerror("Decoding error", "".join(("Cannot decode uTorrent data\n",
"Error: ", str(error))))
return
ut_folder = path.dirname(ut_data)
print(torrents)
for key, value in torrents.items():
torrent_file = path.join(ut_folder, key)
with open(torrent_file, 'rb') as ut_fd:
try:
bdecoded_data = bdecode(ut_fd.read())
except BTFailure:
continue
tor_file = punchup(value, bdecoded_data)
file_hash = sha1(bencode(tor_file["info"])).hexdigest().lower()
#paths
path_torrent_file = path.join(qbt_dir, ".".join((file_hash, "torrent")))
path_fast_resume = path.join(qbt_dir, ".".join((file_hash, "fastresume")))
if path.exists(path_torrent_file) or path.exists(path_fast_resume):
continue
fast_resume_file = mkfr(value, tor_file)
with open(path_torrent_file, "wb") as tor_file:
tor_file.write(bencode(tor_file))
with open(path_fast_resume, "wb") as tor_file:
tor_file.write(bencode(fast_resume_file))
class qbtConvertor(Tk):
""" GUI Application for migration from uTorrent to qBittorrent """
def __init__(self):
Tk.__init__(self)
self.title("uT to qBt convertor")
#main frame
self.main_frame = Frame(self, padding="3 3 12 12")
self.main_frame.grid(column=0, row=0, sticky=(N, W, E, S))
self.main_frame.columnconfigure(0, weight=1)
self.main_frame.rowconfigure(0, weight=1)
#uT part
self.ut_data = StringVar()
self.ut_label = Label(self.main_frame, text="uT data")
self.ut_label.grid(column=0, row=1, sticky=(W))
self.ut_entry = Entry(self.main_frame, width=100, textvariable=self.ut_data)
self.ut_entry.grid(column=1, row=1, sticky=(W))
self.ut_button = Button(self.main_frame, text="Browse", command=self.load_file)
self.ut_button.grid(column=2, row=1)
#qBt part
self.qbt_folder = StringVar()
self.qbt_label = Label(self.main_frame, text="qBt folder")
self.qbt_label.grid(column=0, row=4, sticky=(W))
self.qbt_entry = Entry(self.main_frame, width=100, textvariable=self.qbt_folder)
self.qbt_entry.grid(column=1, row=4, sticky=(W))
self.qbt_button = Button(self.main_frame, text="Browse", command=self.open_dir)
self.qbt_button.grid(column=2, row=4, sticky=(W, E))
#convertor
self.convertor_button = Button(self.main_frame, text="Convert", command=self.convert,
width=50)
self.convertor_button.grid(column=1, columnspan=2, row=5)
self.progress_bar = Progressbar(self.main_frame, orient=HORIZONTAL, length=300, mode="indeterminate")
self.progress_bar.grid(column=1, columnspan=3, row=6)
#set padding for each element
for child in self.main_frame.winfo_children():
child.grid_configure(padx=5, pady=5)
def convert(self):
message = messagebox
if not self.qbt_folder.get() or not self.ut_data.get():
message.showerror("ERROR", "Specify paths!")
return
self.progress_bar.start()
convertor(self.ut_data.get(), self.qbt_folder.get())
self.progress_bar.stop()
def load_file(self):
file_name = filedialog.askopenfilename(filetypes=(("UT resume file", "*.dat"),
("All", "*")))
if file_name:
self.ut_data.set(file_name)
def open_dir(self):
dir_name = filedialog.askdirectory()
if dir_name:
self.qbt_folder.set(dir_name)
if __name__ == "__main__":
app = qbtConvertor()
app.geometry("800x160")
app.mainloop()
|
python
|
#!/usr/bin/env python3
import argparse
import logging
import logging.config
import os
import sys
import time
import yaml
from cluster_manager import setup_exporter_thread, \
manager_iteration_histogram, \
register_stack_trace_dump, \
update_file_modification_time
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "../utils"))
from DataHandler import DataHandler
CLUSTER_STATUS_EXPIRY = 1
JOBS_EXPIRY = 180
logger = logging.getLogger(__name__)
def create_log(logdir='/var/log/dlworkspace'):
if not os.path.exists(logdir):
os.system("mkdir -p " + logdir)
with open('logging.yaml') as f:
logging_config = yaml.full_load(f)
log_filename = os.path.join(logdir, "db_manager.log")
logging_config["handlers"]["file"]["filename"] = log_filename
logging.config.dictConfig(logging_config)
def delete_old_cluster_status(days_ago):
table = "clusterstatus"
with DataHandler() as data_handler:
num_rows = data_handler.count_rows(table)
if num_rows <= 10: # Retain 10 rows for safety
return
logger.info("Deleting rows from table %s older than %s day(s)", table,
days_ago)
ret = data_handler.delete_rows_from_table_older_than_days(
table, days_ago)
ret_status = "succeeded" if ret is True else "failed"
logger.info("Deleting rows from table %s older than %s day(s) %s",
table, days_ago, ret_status)
def delete_old_inactive_jobs(days_ago):
table = "jobs"
with DataHandler() as data_handler:
logger.info(
"Deleting inactive job records from table %s older than %s "
"day(s)", table, days_ago)
cond = {"jobStatus": ("IN", ["finished", "failed", "killed", "error"])}
ret = data_handler.delete_rows_from_table_older_than_days(
table, days_ago, col="lastUpdated", cond=cond)
ret_status = "succeeded" if ret is True else "failed"
logger.info(
"Deleting inactive job records from table %s older than %s "
"day(s) %s", table, days_ago, ret_status)
def sleep_with_update(time_to_sleep, fn):
for _ in range(int(time_to_sleep / 100)):
fn()
time.sleep(100)
def run():
register_stack_trace_dump()
create_log()
update = lambda: update_file_modification_time("db_manager")
while True:
update()
with manager_iteration_histogram.labels("db_manager").time():
try:
delete_old_cluster_status(CLUSTER_STATUS_EXPIRY)
# query below is too time consuming since lastUpdated in job table is not indexed
# delete_old_inactive_jobs(JOBS_EXPIRY)
except:
logger.exception("Deleting old cluster status failed")
sleep_with_update(86400, update)
if __name__ == '__main__':
# TODO: This can be made as a separate service to GC DB and orphaned pods
parser = argparse.ArgumentParser()
parser.add_argument("--port",
"-p",
help="port of exporter",
type=int,
default=9209)
args = parser.parse_args()
setup_exporter_thread(args.port)
run()
|
python
|
import itertools
from numbers import Number
from graphgallery.utils.type_check import is_iterable
def repeat(src, length):
if src is None:
return [None for _ in range(length)]
if src == [] or src == ():
return []
if isinstance(src, (Number, str)):
return list(itertools.repeat(src, length))
if (len(src) > length):
return src[:length]
if (len(src) < length):
return list(src) + list(itertools.repeat(src[-1], length - len(src)))
return src
def get_length(obj):
if is_iterable(obj):
length = len(obj)
else:
length = 1
return length
|
python
|
from __future__ import absolute_import
from __future__ import unicode_literals
import inspect
import logging
LOG = logging.getLogger(__name__)
def is_generator(func):
"""Return True if `func` is a generator function."""
return inspect.isgeneratorfunction(func)
|
python
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# Copyright (C) 2012 by Xose Pérez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Xose Pérez"
__contact__ = "[email protected]"
__copyright__ = "Copyright (C) 2012-2013 Xose Pérez"
__license__ = 'GPL v3'
import yaml
class Config(object):
"""
Simple YAML configuration parser
"""
config = None
def __init__(self, filename):
"""
Constructor, parses and stores the configuration
"""
handler = file(filename, 'r')
self.config = yaml.load(handler)
handler.close()
def get(self, section, key=None, default=None):
"""
Retrieves a given section/key combination,
if not existent it return a default value
"""
try:
if key is None:
return self.config[section]
else:
return self.config[section][key]
except:
return default
|
python
|
def countPattern(genome, pattern):
"""
Find the number of specific pattern in a genome sequence
"""
count = 0
for index in range(0, len(genome)-len(pattern)+1):
if genome[index:index+len(pattern)] == pattern:
count += 1
return count
def findPattern(genome, pattern):
"""
find the indexes of the pattern in a given genome sequence
"""
indexes = []
for index in range(0, len(genome) - len(pattern) + 1):
if genome[index:index + len(pattern)] == pattern:
indexes.append(index)
return indexes
|
python
|
import logging
import asyncio
from ..Errors import *
from ..utils import Url
logger = logging.getLogger(__name__)
class Commander:
"""
Manages looping through the group wall and checking for commands or messages
Attributes
-----------
prefix: :class:`str`
The command prefix
"""
async def start_listening(self, client, commands, listening_to):
self.__commands = commands
self.__client = client
self.__listening_to = listening_to
self.__already_seen = []
self.__is_first = True
self.prefix = client.prefix
self.__access = Url("groups", "/v1/groups/%group_id%/wall/posts?limit=10&sortOrder=Desc", group_id=self.__listening_to.id)
await self.start_loop()
async def start_loop(self):
await self.__client._emit("start_listening", (self.__listening_to,))
while True:
await self.__client._emit("check_messages", (self.__listening_to,))
await self.check_messages()
await asyncio.sleep(5)
async def check_messages(self):
hook = await self.__access.get()
for msg in hook.json['data']:
if self.__is_first:
self.__already_seen.append(msg["id"])
if await self.check_entity(msg):
await self.process_new_message(msg)
if self.__is_first:
self.__is_first = False
async def check_entity(self, msg):
if not msg["id"] in self.__already_seen:
self.__already_seen.append(msg["id"])
return True
return False
async def process_new_message(self, msg):
text = msg["body"]
flags = str.split(text, " ")
ctx = await self.generate_context(msg)
await self.__client._emit("message", ctx)
if flags[0].startswith(self.prefix):
flags[0] = flags[0].replace(self.prefix, "")
await self.process_command(flags, ctx)
async def process_command(self, flags, ctx):
function_name = flags.pop(0)
args = tuple(flags)
try:
await self.__client.push_command(function_name, ctx, args)
except TypeError as e:
if await self.__client._emit("error", (ctx, e)):
return
raise BadArguments(
function_name
)
async def generate_context(self, msg):
try:
member = await self.__listening_to.get_member(msg["poster"]["username"])
except:
member = await self.__client.get_user(msg["poster"]["username"])
return Context(member, msg["body"])
class Context:
"""
Context object for message on group wall
.. note::
This objects checks if its `__user_or_member` has a group to determine wether it is a user or not
Attributes
-----------
user: :class:`.BloxUser`
The user that sent this message, may be :class:`None`
member: :class:`.BloxMember`
The member that sent this message, may be :class:`None`
content: :class:`str`
The content of the message sent
"""
def __init__(self, user, ctt):
self.__user_or_member = user
self.content = ctt
@property
def member(self):
if self.__user_or_member.group:
return self.__user_or_member
return None
@property
def user(self):
if not self.__user_or_member.group:
return self.__user_or_member
return None
|
python
|
"""Class implementation for the scale_x_from_center interface.
"""
from typing import Dict
from apysc._animation.animation_scale_x_from_center_interface import \
AnimationScaleXFromCenterInterface
from apysc._type.attr_linking_interface import AttrLinkingInterface
from apysc._type.number import Number
from apysc._type.revert_interface import RevertInterface
class ScaleXFromCenterInterface(
AnimationScaleXFromCenterInterface, RevertInterface,
AttrLinkingInterface):
_scale_x_from_center: Number
def _initialize_scale_x_from_center_if_not_initialized(self) -> None:
"""
Initialize the `_scale_x_from_center` attribute if it hasn't been
initialized yet.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.
_initialize_scale_x_from_center_if_not_initialized,
locals_=locals(),
module_name=__name__, class_=ScaleXFromCenterInterface):
if hasattr(self, '_scale_x_from_center'):
return
self._scale_x_from_center = ap.Number(1.0)
self._append_scale_x_from_center_attr_linking_setting()
def _append_scale_x_from_center_attr_linking_setting(self) -> None:
"""
Append a scale-x attribute linking setting.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.
_append_scale_x_from_center_attr_linking_setting,
locals_=locals(),
module_name=__name__, class_=ScaleXFromCenterInterface):
self._append_applying_new_attr_val_exp(
new_attr=self._scale_x_from_center,
attr_name='scale_x_from_center')
self._append_attr_to_linking_stack(
attr=self._scale_x_from_center,
attr_name='scale_x_from_center')
@property
def scale_x_from_center(self) -> Number:
"""
Get a scale-x value from the center of this instance.
Returns
-------
scale_x_from_center : ap.Number
Scale-x value from the center of this instance.
References
----------
- GraphicsBase scale_x_from_center and scale_y_from_center interfaces
- https://bit.ly/3ityoCX
Examples
--------
>>> import apysc as ap
>>> stage: ap.Stage = ap.Stage()
>>> sprite: ap.Sprite = ap.Sprite()
>>> sprite.graphics.begin_fill(color='#0af')
>>> rectangle: ap.Rectangle = sprite.graphics.draw_rect(
... x=50, y=50, width=50, height=50)
>>> rectangle.scale_x_from_center = ap.Number(1.5)
>>> rectangle.scale_x_from_center
Number(1.5)
"""
import apysc as ap
with ap.DebugInfo(
callable_='scale_x_from_center', locals_=locals(),
module_name=__name__, class_=ScaleXFromCenterInterface):
from apysc._type import value_util
self._initialize_scale_x_from_center_if_not_initialized()
return value_util.get_copy(value=self._scale_x_from_center)
@scale_x_from_center.setter
def scale_x_from_center(self, value: Number) -> None:
"""
Update a scale-x value from the center of this instance.
Parameters
----------
value : ap.Number
Scale-x value from the center of this instance.
References
----------
- GraphicsBase scale_x_from_center and scale_y_from_center interfaces
- https://bit.ly/3ityoCX
"""
import apysc as ap
with ap.DebugInfo(
callable_='scale_x_from_center', locals_=locals(),
module_name=__name__, class_=ScaleXFromCenterInterface):
from apysc._validation import number_validation
self._initialize_scale_x_from_center_if_not_initialized()
number_validation.validate_num(num=value)
if not isinstance(value, ap.Number):
value = ap.Number(value)
before_value: ap.Number = self._scale_x_from_center
self._scale_x_from_center = value
self._append_scale_x_from_center_update_expression(
before_value=before_value)
self._append_scale_x_from_center_attr_linking_setting()
def _append_scale_x_from_center_update_expression(
self, *, before_value: Number) -> None:
"""
Append the scale-x from the center of this instance
updating expression.
Parameters
----------
before_value : ap.Number
Before updating value.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_scale_x_from_center_update_expression,
locals_=locals(),
module_name=__name__, class_=ScaleXFromCenterInterface):
from apysc._type import value_util
before_value_str: str = value_util.get_value_str_for_expression(
value=before_value)
after_value_str: str = value_util.get_value_str_for_expression(
value=self._scale_x_from_center)
expression: str = (
f'{self.variable_name}.scale(1 / {before_value_str}, 1);'
f'\n{self.variable_name}.scale({after_value_str}, 1);'
f'\n{before_value_str} = {after_value_str};'
)
ap.append_js_expression(expression=expression)
_scale_x_from_center_snapshots: Dict[str, float]
def _make_snapshot(self, *, snapshot_name: str) -> None:
"""
Make a value's snapshot.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
self._initialize_scale_x_from_center_if_not_initialized()
self._set_single_snapshot_val_to_dict(
dict_name='_scale_x_from_center_snapshots',
value=self._scale_x_from_center._value,
snapshot_name=snapshot_name)
def _revert(self, *, snapshot_name: str) -> None:
"""
Revert a value if snapshot exists.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
if not self._snapshot_exists(snapshot_name=snapshot_name):
return
self._scale_x_from_center._value = \
self._scale_x_from_center_snapshots[snapshot_name]
|
python
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.shell.lint.shfmt.rules import ShfmtFieldSet, ShfmtRequest
from pants.backend.shell.lint.shfmt.rules import rules as shfmt_rules
from pants.backend.shell.target_types import ShellSourcesGeneratorTarget
from pants.backend.shell.target_types import rules as target_types_rules
from pants.core.goals.fmt import FmtResult
from pants.core.goals.lint import LintResult, LintResults
from pants.core.util_rules import config_files, external_tool, source_files
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.addresses import Address
from pants.engine.fs import CreateDigest, Digest, FileContent
from pants.engine.target import Target
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*shfmt_rules(),
*config_files.rules(),
*external_tool.rules(),
*source_files.rules(),
*target_types_rules(),
QueryRule(LintResults, [ShfmtRequest]),
QueryRule(FmtResult, [ShfmtRequest]),
QueryRule(SourceFiles, [SourceFilesRequest]),
],
target_types=[ShellSourcesGeneratorTarget],
)
GOOD_FILE = "! foo bar >a &\n"
BAD_FILE = "! foo bar >a &\n"
# If config is loaded correctly, shfmt will indent the case statements.
NEEDS_CONFIG_FILE = dedent(
"""\
case foo in
PATTERN_1)
\tbar
\t;;
*)
\tbaz
\t;;
esac
"""
)
FIXED_NEEDS_CONFIG_FILE = dedent(
"""\
case foo in
\tPATTERN_1)
\t\tbar
\t\t;;
\t*)
\t\tbaz
\t\t;;
esac
"""
)
def run_shfmt(
rule_runner: RuleRunner,
targets: list[Target],
*,
extra_args: list[str] | None = None,
) -> tuple[tuple[LintResult, ...], FmtResult]:
rule_runner.set_options(
["--backend-packages=pants.backend.shell.lint.shfmt", *(extra_args or ())],
env_inherit={"PATH"},
)
field_sets = [ShfmtFieldSet.create(tgt) for tgt in targets]
lint_results = rule_runner.request(LintResults, [ShfmtRequest(field_sets)])
input_sources = rule_runner.request(
SourceFiles,
[
SourceFilesRequest(field_set.sources for field_set in field_sets),
],
)
fmt_result = rule_runner.request(
FmtResult,
[
ShfmtRequest(field_sets, prior_formatter_result=input_sources.snapshot),
],
)
return lint_results.results, fmt_result
def get_digest(rule_runner: RuleRunner, source_files: dict[str, str]) -> Digest:
files = [FileContent(path, content.encode()) for path, content in source_files.items()]
return rule_runner.request(Digest, [CreateDigest(files)])
def test_passing(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"f.sh": GOOD_FILE, "BUILD": "shell_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.sh"))
lint_results, fmt_result = run_shfmt(rule_runner, [tgt])
assert len(lint_results) == 1
assert lint_results[0].exit_code == 0
assert lint_results[0].stderr == ""
assert fmt_result.stdout == ""
assert fmt_result.output == get_digest(rule_runner, {"f.sh": GOOD_FILE})
assert fmt_result.did_change is False
def test_failing(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"f.sh": BAD_FILE, "BUILD": "shell_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.sh"))
lint_results, fmt_result = run_shfmt(rule_runner, [tgt])
assert len(lint_results) == 1
assert lint_results[0].exit_code == 1
assert "f.sh.orig" in lint_results[0].stdout
assert fmt_result.stdout == "f.sh\n"
assert fmt_result.output == get_digest(rule_runner, {"f.sh": GOOD_FILE})
assert fmt_result.did_change is True
def test_multiple_targets(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{"good.sh": GOOD_FILE, "bad.sh": BAD_FILE, "BUILD": "shell_sources(name='t')"}
)
tgts = [
rule_runner.get_target(Address("", target_name="t", relative_file_path="good.sh")),
rule_runner.get_target(Address("", target_name="t", relative_file_path="bad.sh")),
]
lint_results, fmt_result = run_shfmt(rule_runner, tgts)
assert len(lint_results) == 1
assert lint_results[0].exit_code == 1
assert "bad.sh.orig" in lint_results[0].stdout
assert "good.sh" not in lint_results[0].stdout
assert "bad.sh\n" == fmt_result.stdout
assert fmt_result.output == get_digest(rule_runner, {"good.sh": GOOD_FILE, "bad.sh": GOOD_FILE})
assert fmt_result.did_change is True
def test_config_files(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"a/f.sh": NEEDS_CONFIG_FILE,
"a/BUILD": "shell_sources()",
"a/.editorconfig": "[*.sh]\nswitch_case_indent = true\n",
"b/f.sh": NEEDS_CONFIG_FILE,
"b/BUILD": "shell_sources()",
}
)
tgts = [
rule_runner.get_target(Address("a", relative_file_path="f.sh")),
rule_runner.get_target(Address("b", relative_file_path="f.sh")),
]
lint_results, fmt_result = run_shfmt(rule_runner, tgts)
assert len(lint_results) == 1
assert lint_results[0].exit_code == 1
assert "a/f.sh.orig" in lint_results[0].stdout
assert "b/f.sh.orig" not in lint_results[0].stdout
assert fmt_result.stdout == "a/f.sh\n"
assert fmt_result.output == get_digest(
rule_runner, {"a/f.sh": FIXED_NEEDS_CONFIG_FILE, "b/f.sh": NEEDS_CONFIG_FILE}
)
assert fmt_result.did_change is True
def test_passthrough_args(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"f.sh": NEEDS_CONFIG_FILE, "BUILD": "shell_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.sh"))
lint_results, fmt_result = run_shfmt(rule_runner, [tgt], extra_args=["--shfmt-args=-ci"])
assert len(lint_results) == 1
assert lint_results[0].exit_code == 1
assert "f.sh.orig" in lint_results[0].stdout
assert fmt_result.stdout == "f.sh\n"
assert fmt_result.output == get_digest(rule_runner, {"f.sh": FIXED_NEEDS_CONFIG_FILE})
assert fmt_result.did_change is True
def test_skip(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"f.sh": BAD_FILE, "BUILD": "shell_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.sh"))
lint_results, fmt_result = run_shfmt(rule_runner, [tgt], extra_args=["--shfmt-skip"])
assert not lint_results
assert fmt_result.skipped is True
assert fmt_result.did_change is False
|
python
|
#!/usr/bin/env python
__author__ = "Yaroslav Litvinov"
__copyright__ = "Copyright 2016, Rackspace Inc."
__email__ = "[email protected]"
from os import system
import psycopg2
import argparse
import configparser
from pymongo import DESCENDING
from collections import namedtuple
from datetime import datetime
from mongo_reader.reader import mongo_reader_from_settings
from gizer.psql_requests import PsqlRequests
from gizer.psql_requests import psql_conn_from_settings
from gizer.all_schema_engines import get_schema_engines_as_dict
from gizer.etlstatus_table import PsqlEtlStatusTable
from gizer.etlstatus_table import PsqlEtlStatusTableManager
from gizer.etlstatus_table import STATUS_INITIAL_LOAD
from gizer.etlstatus_table import STATUS_OPLOG_SYNC
from gizer.etlstatus_table import STATUS_OPLOG_APPLY
from gizer.etlstatus_table import STATUS_OPLOG_RESYNC
from gizer.opconfig import psql_settings_from_config
from gizer.opconfig import load_mongo_replicas_from_setting
def getargs():
""" get args from cmdline """
default_request = '{}'
parser = argparse.ArgumentParser()
parser.add_argument("-psql-schema-name", help="", type=str)
parser.add_argument("-psql-table-name-prefix", help="", type=str)
args = parser.parse_args()
return args
def main():
""" main """
parser = argparse.ArgumentParser()
parser.add_argument("--config-file", action="store",
help="Config file with settings",
type=file, required=True)
parser.add_argument("-init-load-status", action="store_true",
help="will get exit status=0 if init load not needed,\
or status=-1 if otherwise; Also print 1 - if in progress, 0 - if not.")
parser.add_argument("-init-load-start-save-ts", action="store_true",
help='Save latest oplog timestamp to psql etlstatus table')
parser.add_argument("-init-load-finish",
help='values are: "ok" or "error"', type=str)
args = parser.parse_args()
config = configparser.ConfigParser()
config.read_file(args.config_file)
psql_settings = psql_settings_from_config(config, 'psql')
psql_main = PsqlRequests(psql_conn_from_settings(psql_settings))
oplog_settings = load_mongo_replicas_from_setting(config, 'mongo-oplog')
status_table = PsqlEtlStatusTable(psql_main.cursor,
config['psql']['psql-schema-name'],
sorted(oplog_settings.keys()))
res = 0
if args.init_load_status:
status = status_table.get_recent()
if status:
if (status.status == STATUS_OPLOG_SYNC or \
status.status == STATUS_OPLOG_APPLY or \
status.status == STATUS_INITIAL_LOAD or \
status.status == STATUS_OPLOG_RESYNC) and not status.error:
delta = datetime.now() - status.time_start
# if operation is running to long
if status.time_end:
res = 0
elif delta.total_seconds() < 32400: # < 9 hours
res = 0
if not status.time_end:
print 1 # means etl in progress
else:
print 0 # means not etl in progress
else:
# takes to much time -> do init load
res = -1
else:
# error status -> do init load
res = -1
else:
# empty status table -> do init load
res = -1
elif args.init_load_start_save_ts:
# create oplog read transport/s to acquire ts
max_ts_dict = {}
for oplog_name, settings_list in oplog_settings.iteritems():
print 'Fetch timestamp from oplog: %s' % oplog_name
# settings list is a replica set (must be at least one in list)
reader = mongo_reader_from_settings(settings_list, 'oplog.rs', {})
reader.make_new_request({})
reader.cursor.sort('ts', DESCENDING)
reader.cursor.limit(1)
timestamp = reader.next()
if timestamp:
max_ts_dict[oplog_name] = timestamp['ts']
else:
max_ts_dict[oplog_name] = None
print 'Initload ts: %s, oplog: %s' % (max_ts_dict[oplog_name],
oplog_name)
status_manager = PsqlEtlStatusTableManager(status_table)
status_manager.init_load_start(max_ts_dict)
elif args.init_load_finish:
status_manager = PsqlEtlStatusTableManager(status_table)
if args.init_load_finish == "ok":
status_manager.init_load_finish(False) # ok
else:
status_manager.init_load_finish(True) # error
return res
if __name__ == "__main__":
exit(main())
|
python
|
# -*- coding: utf-8 -*-
from django.urls import path,re_path
from . import views
app_name = 'ajunivel'
urlpatterns = [
path('', views.index, name='index'),
path('index.html', views.index, name='index'),
path('index', views.index, name='index'),
path('menu', views.menu, name='menu'),
]
|
python
|
import os
import socket
from pathlib import Path
class Config(object):
"""
Basic configuration, like socket default timeout, headers
"""
def __init__(self):
super(Config, self).__init__()
self.socket_timeout = 20
# set socket layer timeout as 20s
socket.setdefaulttimeout(self.socket_timeout)
# self.headers = {'User-Agent': 'Mozilla/5.0'}
self.url = "http://www.tianqihoubao.com/aqi/"
self.headers = {'user-agent': 'my-app/0.0.1'}
self.folder_json = self.makedirs('json')
self.folder_csv = self.makedirs('csv')
self.log_path = self.makedirs('logging')
self.timeout = 500
self.max_retries = 30
def makedirs(self, path):
path = Path.cwd().parent.joinpath(path)
if not path.exists():
os.makedirs(path)
return path
|
python
|
from dataclasses import dataclass
@dataclass
class ModelException:
pass
|
python
|
import numpy as np
from torchmeta.utils.data import Task, MetaDataset
class Relu(MetaDataset):
"""
Parameters
----------
num_samples_per_task : int
Number of examples per task.
num_tasks : int (default: 2)
Overall number of tasks to sample.
noise_std : float, optional
Amount of noise to include in the targets for each task. If `None`, then
nos noise is included, and the target is either a sine function, or a
linear function of the input.
transform : callable, optional
A function/transform that takes a numpy array of size (1,) and returns a
transformed version of the input.
target_transform : callable, optional
A function/transform that takes a numpy array of size (1,) and returns a
transformed version of the target.
dataset_transform : callable, optional
A function/transform that takes a dataset (ie. a task), and returns a
transformed version of it. E.g. `torchmeta.transforms.ClassSplitter()`.
"""
def __init__(self, num_samples_per_task, num_tasks=2,
noise_std=None, transform=None, target_transform=None,
dataset_transform=None, seed=None):
super(Relu, self).__init__(meta_split='train',
target_transform=target_transform, dataset_transform=dataset_transform,
seed=seed)
self.num_samples_per_task = num_samples_per_task
self.num_tasks = num_tasks
self.noise_std = noise_std
self.transform = transform
self._input_range = np.array([-5.0, 5.0])
self._signs = None
@property
def signs(self):
if self._signs is None:
self._signs = np.ones((self.num_tasks,), dtype=np.int)
self._signs[self.num_tasks // 2:] = -1
self.np_random.shuffle(self._signs)
return self._signs
def __len__(self):
return self.num_tasks
def __getitem__(self, index):
task = ReluTask(index, self.signs[index], self._input_range,
self.noise_std, self.num_samples_per_task, self.transform,
self.target_transform, np_random=self.np_random)
if self.dataset_transform is not None:
task = self.dataset_transform(task)
return task
class ReluTask(Task):
def __init__(self, index, sign, input_range, noise_std,
num_samples, transform=None, target_transform=None,
np_random=None):
super(ReluTask, self).__init__(index, None) # Regression task
self.sign = sign
self.input_range = input_range
self.num_samples = num_samples
self.noise_std = noise_std
self.transform = transform
self.target_transform = target_transform
if np_random is None:
np_random = np.random.RandomState(None)
self._inputs = np_random.uniform(input_range[0], input_range[1],
size=(num_samples, 1))
self._targets = sign * np.maximum(self._inputs, 0)
if (noise_std is not None) and (noise_std > 0.):
self._targets += noise_std * np_random.randn(num_samples, 1)
def __len__(self):
return self.num_samples
def __getitem__(self, index):
input, target = self._inputs[index], self._targets[index]
if self.transform is not None:
input = self.transform(input)
if self.target_transform is not None:
target = self.target_transform(target)
return (input, target)
|
python
|
# flake8: noqa
CLOUDWATCH_EMF_SCHEMA = {
"properties": {
"_aws": {
"$id": "#/properties/_aws",
"properties": {
"CloudWatchMetrics": {
"$id": "#/properties/_aws/properties/CloudWatchMetrics",
"items": {
"$id": "#/properties/_aws/properties/CloudWatchMetrics/items",
"properties": {
"Dimensions": {
"$id": "#/properties/_aws/properties/CloudWatchMetrics/items/properties/Dimensions",
"items": {
"$id": "#/properties/_aws/properties/CloudWatchMetrics/items/properties/Dimensions/items",
"items": {
"$id": "#/properties/_aws/properties/CloudWatchMetrics/items/properties/Dimensions/items/items",
"examples": ["Operation"],
"minItems": 1,
"pattern": "^(.*)$",
"title": "DimensionReference",
"type": "string",
},
"maxItems": 9,
"minItems": 1,
"title": "DimensionSet",
"type": "array",
},
"minItems": 1,
"title": "The " "Dimensions " "Schema",
"type": "array",
},
"Metrics": {
"$id": "#/properties/_aws/properties/CloudWatchMetrics/items/properties/Metrics",
"items": {
"$id": "#/properties/_aws/properties/CloudWatchMetrics/items/properties/Metrics/items",
"minItems": 1,
"properties": {
"Name": {
"$id": "#/properties/_aws/properties/CloudWatchMetrics/items/properties/Metrics/items/properties/Name",
"examples": ["ProcessingLatency"],
"minLength": 1,
"pattern": "^(.*)$",
"title": "MetricName",
"type": "string",
},
"Unit": {
"$id": "#/properties/_aws/properties/CloudWatchMetrics/items/properties/Metrics/items/properties/Unit",
"examples": ["Milliseconds"],
"pattern": "^(Seconds|Microseconds|Milliseconds|Bytes|Kilobytes|Megabytes|Gigabytes|Terabytes|Bits|Kilobits|Megabits|Gigabits|Terabits|Percent|Count|Bytes\\/Second|Kilobytes\\/Second|Megabytes\\/Second|Gigabytes\\/Second|Terabytes\\/Second|Bits\\/Second|Kilobits\\/Second|Megabits\\/Second|Gigabits\\/Second|Terabits\\/Second|Count\\/Second|None)$",
"title": "MetricUnit",
"type": "string",
},
},
"required": ["Name"],
"title": "MetricDefinition",
"type": "object",
},
"minItems": 1,
"title": "MetricDefinitions",
"type": "array",
},
"Namespace": {
"$id": "#/properties/_aws/properties/CloudWatchMetrics/items/properties/Namespace",
"examples": ["MyApp"],
"minLength": 1,
"pattern": "^(.*)$",
"title": "CloudWatch " "Metrics " "Namespace",
"type": "string",
},
},
"required": ["Namespace", "Dimensions", "Metrics"],
"title": "MetricDirective",
"type": "object",
},
"title": "MetricDirectives",
"type": "array",
},
"Timestamp": {
"$id": "#/properties/_aws/properties/Timestamp",
"examples": [1565375354953],
"title": "The Timestamp " "Schema",
"type": "integer",
},
},
"required": ["Timestamp", "CloudWatchMetrics"],
"title": "Metadata",
"type": "object",
}
},
"required": ["_aws"],
"title": "Root Node",
"type": "object",
}
|
python
|
import torch
from torchvision import transforms, datasets
import numpy as np
from PIL import Image
from skimage.color import rgb2lab, rgb2gray, lab2rgb
def count_params(model):
'''
returns the number of trainable parameters in some model
'''
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class GrayscaleImageFolder(datasets.ImageFolder):
'''
Custom dataloader for various operations on images before loading them.
'''
def __getitem__(self, index):
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img_orig = self.transform(img) # apply transforms
img_orig = np.asarray(img_orig) # convert to numpy array
img_lab = rgb2lab(img_orig) # convert RGB image to LAB
img_ab = img_lab[:, :, 1:3] # separate AB channels from LAB
img_ab = (img_ab + 128) / 255 # normalize the pixel values
# transpose image from HxWxC to CxHxW and turn it into a tensor
img_ab = torch.from_numpy(img_ab.transpose((2, 0, 1))).float()
img_orig = rgb2gray(img_orig) # convert RGB to grayscale
# add a channel axis to grascale image and turn it into a tensor
img_orig = torch.from_numpy(img_orig).unsqueeze(0).float()
if self.target_transform is not None:
target = self.target_transform(target)
return img_orig, img_ab, target
def load_gray(path, max_size=360, shape=None):
'''
load an image as grayscale, change the shape as per input,
perform transformations and convert it to model compatable shape.
'''
img_gray = Image.open(path).convert('L')
if max(img_gray.size) > max_size:
size = max_size
else:
size = max(img_gray.size)
if shape is not None:
size = shape
img_transform = transforms.Compose([
transforms.Resize(size),
transforms.ToTensor()
])
img_gray = img_transform(img_gray).unsqueeze(0)
return img_gray
def to_rgb(img_l, img_ab):
'''
concatinates Lightness (grayscale) and AB channels,
and converts the resulting LAB image to RGB
'''
if img_l.shape == img_ab.shape:
img_lab = torch.cat((img_l, img_ab), 1).numpy().squeeze()
else:
img_lab = torch.cat(
(img_l, img_ab[:, :, :img_l.size(2), :img_l.size(3)]),
dim=1
).numpy().squeeze()
img_lab = img_lab.transpose(1, 2, 0) # transpose image to HxWxC
img_lab[:, :, 0] = img_lab[:, :, 0] * 100 # range pixel values from 0-100
img_lab[:, :, 1:] = img_lab[:, :, 1:] * 255 - 128 # un-normalize
img_rgb = lab2rgb(img_lab.astype(np.float64)) # convert LAB image to RGB
return img_rgb
|
python
|
from datetime import date
ano = int (input ('Digite o ano de nascimento: '))
idade = date.today().year - ano
if idade <= 9:
print ('Sua idade {}, Até 9 anos: Mirim'.format(idade))
elif idade > 9 and idade <= 14:
print ('Sua idade {}, Até 14 anos: Infantil'.format(idade))
elif idade > 14 and idade <= 19:
print ('Sua idade {}, Até 19 anos: Junior'.format(idade))
elif idade == 20:
print ('Sua idade {}, Até 20 anos: Sênior'.format(idade))
else:
print ('Sua idade {}, Acima de 20 anos: Master'.format(idade))
|
python
|
# -*- coding: utf-8 -*-
"""
"""
from flask import flash, redirect, url_for, render_template, request
from sayhello import app, db
from sayhello.forms import HelloForm
from sayhello.models import Message
@app.route('/', methods=['GET', 'POST'])
def index():
"""
# TODO 分页BUG未解决
"""
form = HelloForm()
if form.validate_on_submit():
name = form.name.data
body = form.body.data
message = Message(body=body, name=name)
db.session.add(message)
db.session.commit()
flash('添加成功')
return redirect(url_for('index'))
messages = Message.query.order_by(Message.timestamp.desc()).all()
total_page = divmod(len(messages),10)[0]+1 if divmod(len(messages),10)[1] else divmod(len(messages),10)[0]
page_num = request.args.get('page_num') and int(request.args.get('page_num'))
li_list = []
if not page_num:
start_page = 1
end_page = 5
page_num = 0
else:
start_page = int(request.args.get('start_page'))
end_page = int(request.args.get('end_page'))
mid_page = (int(start_page) + int(end_page)) // 2
offset_page = page_num - mid_page
if offset_page > 0:
start_page += offset_page
end_page += offset_page
if end_page > total_page:
end_page = total_page
for i in range(start_page, end_page+1):
standard_li = '<li><a href="/?page_num={0}&start_page={1}&end_page={2}">{0}</a></li>'.format(i,start_page,end_page)
li_list.append(standard_li)
page_block = "".join(li_list)
messages = Message.query.order_by(Message.timestamp.desc()).offset(page_num).limit(100000).all()
return render_template('index.html', form=form, messages=messages,page_block=page_block)
|
python
|
from aws_google_auth import exit_if_unsupported_python
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import unittest
import sys
import mock
class TestPythonFailOnVersion(unittest.TestCase):
@mock.patch('sys.stdout', new_callable=StringIO)
def test_python26(self, mock_stdout):
with mock.patch.object(sys, 'version_info') as v_info:
v_info.major = 2
v_info.minor = 6
with self.assertRaises(SystemExit):
exit_if_unsupported_python()
self.assertIn("aws-google-auth requires Python 2.7 or higher.", mock_stdout.getvalue())
def test_python27(self):
with mock.patch.object(sys, 'version_info') as v_info:
v_info.major = 2
v_info.minor = 7
try:
exit_if_unsupported_python()
except SystemExit:
self.fail("exit_if_unsupported_python() raised SystemExit unexpectedly!")
def test_python30(self):
with mock.patch.object(sys, 'version_info') as v_info:
v_info.major = 3
v_info.minor = 0
try:
exit_if_unsupported_python()
except SystemExit:
self.fail("exit_if_unsupported_python() raised SystemExit unexpectedly!")
|
python
|
import json
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import JsonResponse
from django.shortcuts import render, redirect
import re
import logging
from apps.goods.models import SKU
logger = logging.getLogger('django')
# Create your views here.
from django.urls import reverse
from django.views import View
from django_redis import get_redis_connection
from apps.areas.models import Area
from apps.users.models import User, Address
from apps.users.utils import check_active_token, generic_access_token_url
from utils.response_code import RETCODE
class RegisterView(View):
def get(self,request):
return render(request,'register.html')
def post(self,request):
data = request.POST
username = data.get('username')
password=data.get('password')
password2=data.get('password2')
mobile=data.get('mobile')
sms_code = data.get('sms_code')
if not all([username,password,password2,mobile]):
return HttpResponseBadRequest('参数不全')
if not re.match(r'[a-zA-Z0-9]{5,20}',username):
return HttpResponseBadRequest('用户名不满足条件')
if not re.match(r'[a-zA-Z0-9]{8,20}',password):
return HttpResponseBadRequest('密码不符合规则')
if password2 != password:
return HttpResponseBadRequest('密码不一致')
if not re.match(r'^1[3-9]\d{9}$',mobile):
return HttpResponseBadRequest('手机号错误')
redis_conn = get_redis_connection('code')
smskey = 'sms_%s'%mobile
a = redis_conn.get(smskey)
print(a)
print(type(a))
print(sms_code)
if redis_conn.get(smskey).decode() != sms_code:
return HttpResponseBadRequest('验证码错误')
# ③ 保存数据
user = User.objects.create_user(username=username,
password=password,
mobile=mobile)
login(request, user)
return redirect(reverse('contents:index'))
class isUnique(View):
def get(self, request, username):
count = User.objects.filter(username=username).count()
return JsonResponse({'count': count,'username':username})
class MobileUnique(View):
def get(self,request,mobile):
print(mobile)
count = User.objects.filter(mobile=mobile).count()
return JsonResponse({'count':count,'mobile':mobile})
class LoginView(View):
def get(self,request):
return render(request,'login.html')
# 1.相应状态码可以帮助我们分析问题
# 2.面试会问
# 405 Method Not Allowed 没有实现对应的请求方法
def post(self,request):
# ① 接收数据
username=request.POST.get('username')
password=request.POST.get('pwd')
remembered = request.POST.get('remembered')
# ② 验证数据 (参数是否齐全,是否符合规则)
if not all([username,password]):
return HttpResponseBadRequest('参数不全')
# 用户名,密码是否符合正则,此处省略
# ③ 再判断用户名和密码是否匹配一致
from django.contrib.auth import authenticate
# 认证成功返回User对象
# 认证失败返回None
from django.contrib.auth.backends import ModelBackend
user = authenticate(username=username,password=password)
if user is None:
return HttpResponseBadRequest('用户名或密码错误')
# ④ 状态保持
login(request,user)
# ⑤ 记住登陆
if remembered == 'on':
#记住登陆 2周
request.session.set_expiry(None)
else:
#不记住登陆
request.session.set_expiry(0)
# ⑥ 返回相应
response = redirect(reverse('contents:index'))
#设置cookie
response.set_cookie('username',user.username,max_age=3600*24*14)
from apps.carts.utils import merge_cookie_to_redis
response=merge_cookie_to_redis(request,user,response)
return response
class LogoutView(View):
def get(self,request):
logout(request)
response = redirect(reverse('contents:index'))
response.delete_cookie('username')
return response
class UserCenterInfoView(LoginRequiredMixin,View):
def get(self,request):
context = {
'username':request.user.username,
'mobile':request.user.mobile,
'email':request.user.email,
'email_active':request.user.email_active,
}
return render(request,'user_center_info.html',context=context)
class EmailView(View):
def put(self,request):
data = json.loads(request.body.decode())
email = data.get('email')
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$',email):
return JsonResponse({{'code':RETCODE.PARAMERR,'errmsg':'邮箱不符合规则'}})
request.user.email = email
request.user.save()
# from django.core.mail import send_mail
#
# #subject, message, from_email, recipient_list,
# #subject 主题
# subject='美多商场激活邮件'
# #message, 内容
# message=''
# #from_email, 谁发的
# from_email = '欢乐玩家<[email protected]>'
# #recipient_list, 收件人列表
# recipient_list = ['[email protected]']
#
# html_mesage="<a href='http://www.huyouni.com'>戳我有惊喜</a>"
#
# send_mail(subject=subject,
# message=message,
# from_email=from_email,
# recipient_list=recipient_list,
# html_message=html_mesage)
from celery_tasks.email.tasks import send_active_email
send_active_email.delay(request.user.id, email)
print(email)
# ⑤ 返回相应
return JsonResponse({'code':RETCODE.OK,'errmsg':'ok'})
class EmailActiveView(View):
def get(self,request):
token = request.GET.get('token')
if token is None:
return HttpResponseBadRequest('缺少参数')
data = check_active_token(token)
if data == None:
return HttpResponseBadRequest('验证失败')
id = data.get('id')
email = data.get('email')
try:
user = User.objects.get(id=id,email=email)
except User.DoesNotExist:
return HttpResponseBadRequest('验证失败')
user.email_active = True
user.save()
return redirect(reverse('users:center'))
class UserCenterSiteView(View):
def get(self,request):
user=request.user
addresses = Address.objects.filter(user=user, is_deleted=False)
address_dict_list = []
for address in addresses:
address_dict = {
"id": address.id,
"title": address.title,
"receiver": address.receiver,
"province": address.province.name,
"province_id":address.province_id,
"city": address.city.name,
"city_id":address.city_id,
"district": address.district.name,
"district_id":address.district_id,
"place": address.place,
"mobile": address.mobile,
"tel": address.tel,
"email": address.email
}
address_dict_list.append(address_dict)
context = {
'default_address_id':user.default_address_id,
'addresses':address_dict_list
}
return render(request,'user_center_site.html',context=context)
class CreateView(View):
def post(self,request):
count = Address.objects.filter(user=request.user,is_deleted=False).count()
if count >= 20:
return JsonResponse({'code': RETCODE.THROTTLINGERR, 'errmsg': '超过地址数量上限'})
data = json.loads(request.body.decode())
receiver=data.get('receiver')
province_id=data.get('province_id')
city_id=data.get('city_id')
district_id=data.get('district_id')
place=data.get('place')
mobile=data.get('mobile')
tel=data.get('tel')
email=data.get('email')
if not all([receiver,province_id,city_id,district_id,place,mobile]):
return HttpResponseBadRequest('参数不全')
if not re.match(r'^1[3-9]\d{9}$', mobile):
return HttpResponseBadRequest('电话号码输入有误')
if tel:
if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', tel):
return HttpResponseBadRequest('参数tel有误')
if email:
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return HttpResponseBadRequest('参数email有误')
try:
ads = Address.objects.create(user=request.user,
title=receiver,
receiver=receiver,
province_id=province_id,
city_id=city_id,
district_id=district_id,
place=place,
mobile=mobile,
tel=tel,
email=email)
except Exception as e:
logger.error(e)
return HttpResponseBadRequest('保存失败')
address = {
"receiver": ads.receiver,
"province": ads.province.name,
"city": ads.city.name,
"district": ads.district.name,
"place": ads.place,
"mobile": ads.mobile,
"tel": ads.tel,
"email": ads.email,
"id": ads.id,
"title": ads.title,
}
return JsonResponse({'code': RETCODE.OK, 'errmsg': '新增地址成功', 'address': address})
class DefaultView(View):
def put(self,request,address_id):
try:
default_address = Address.objects.get(id=address_id)
request.user.default_address = default_address
request.user.save()
except Exception as e:
logger.error(e)
return HttpResponseBadRequest('出错')
return JsonResponse({'code':RETCODE.OK,'errmsg': '设置成功'})
class UpdateView(View):
def put(self,request,address_id):
data = json.loads(request.body.decode())
receiver=data.get('receiver')
province_id=data.get('province_id')
city_id=data.get('city_id')
district_id=data.get('district_id')
place=data.get('place')
mobile=data.get('mobile')
tel=data.get('tel')
email=data.get('email')
if not all([receiver,province_id,city_id,district_id,place,mobile]):
return HttpResponseBadRequest('参数不全')
if not re.match(r'^1[3-9]\d{9}$', mobile):
return HttpResponseBadRequest('电话号码输入有误')
if tel:
if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', tel):
return HttpResponseBadRequest('参数tel有误')
if email:
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return HttpResponseBadRequest('参数email有误')
try:
update_address = Address.objects.filter(id=address_id)
update_address.update(
user=request.user,
title=receiver,
receiver=receiver,
province_id=province_id,
city_id=city_id,
district_id=district_id,
place=place,
mobile=mobile,
tel=tel,
email=email,
)
except Exception as e:
logger.error(e)
return HttpResponseBadRequest('更新失败')
update_address = Address.objects.get(id=address_id)
address_dict = {
"id": update_address.id,
"title": update_address.title,
"receiver": update_address.receiver,
"province": update_address.province.name,
"city": update_address.city.name,
"district": update_address.district.name,
"place": update_address.place,
"mobile": update_address.mobile,
"tel": update_address.tel,
"email": update_address.email
}
return JsonResponse({'code': RETCODE.OK, 'errmsg': '更新地址成功', 'address': address_dict})
def delete(self,request,address_id):
try:
delete_address = Address.objects.filter(id=address_id)
delete_address.update(is_deleted=True)
except Exception as e:
logger.error(e)
return HttpResponseBadRequest('删除失败')
return JsonResponse({'code': RETCODE.OK, 'errmsg': '删除地址成功'})
class UpdateTitleView(View):
def put(self,request,address_id):
data = json.loads(request.body.decode())
title = data.get('title')
try:
update_title_address = Address.objects.filter(id=address_id)
update_title_address.update(title=title)
except Exception as e:
logger.error(e)
return HttpResponseBadRequest('修改标题失败')
return JsonResponse({'code': RETCODE.OK, 'errmsg': '设置地址标题成功'})
class ChangePassword(View):
def get(self,request):
return render(request,'user_center_pass.html')
def post(self, request):
"""实现修改密码逻辑"""
# 1.接收参数
old_password = request.POST.get('old_password')
new_password = request.POST.get('new_password')
new_password2 = request.POST.get('new_password2')
# 2.验证参数
if not all([old_password, new_password, new_password2]):
return HttpResponseBadRequest('缺少必传参数')
if not re.match(r'^[0-9A-Za-z]{8,20}$', new_password):
return HttpResponseBadRequest('密码最少8位,最长20位')
if new_password != new_password2:
return HttpResponseBadRequest('两次输入的密码不一致')
# 3.检验旧密码是否正确
if not request.user.check_password(old_password):
return render(request, 'user_center_pass.html', {'origin_password_errmsg': '原始密码错误'})
# 4.更新新密码
try:
request.user.set_password(new_password)
request.user.save()
except Exception as e:
logger.error(e)
return render(request, 'user_center_pass.html', {'change_password_errmsg': '修改密码失败'})
# 5.退出登陆,删除登陆信息
logout(request)
# 6.跳转到登陆页面
response = redirect(reverse('users:login'))
response.delete_cookie('username')
return response
class UserHistoryView(LoginRequiredMixin,View):
def post(self,request):
user = request.user
data = json.loads(request.body.decode())
sku_id = data.get('sku_id')
try:
sku = SKU.objects.get(id=sku_id)
except SKU.DoesNotExist:
return JsonResponse({'code':RETCODE.NODATAERR,'errmsg':'没有此商品'})
redis_conn = get_redis_connection('history')
pipeline = redis_conn.pipeline()
pipeline.lrem('history_%s'%user.id,0,sku_id)
pipeline.lpush('history_%s' % user.id, sku_id)
pipeline.ltrim('history_%s'%user.id,0,4)
pipeline.execute()
return JsonResponse({'code':RETCODE.OK,'errmsg':'ok'})
def get(self, request):
"""获取用户浏览记录"""
# 获取Redis存储的sku_id列表信息
redis_conn = get_redis_connection('history')
sku_ids = redis_conn.lrange('history_%s' % request.user.id, 0, -1)
# 根据sku_ids列表数据,查询出商品sku信息
skus = []
for sku_id in sku_ids:
sku = SKU.objects.get(id=sku_id)
skus.append({
'id': sku.id,
'name': sku.name,
'default_image_url': sku.default_image.url,
'price': sku.price
})
return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'skus': skus})
class FindPasswordView(View):
def get(self,request):
return render(request,'find_password.html')
class Form_1_On_Submit(View):
def get(self, request, username, user=None):
data = request.GET
text = data.get('text')
image_code_id = data.get('image_code_id')
if not all([text]):
return HttpResponseBadRequest('参数不全')
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return HttpResponseBadRequest('用户不存在')
redis_conn = get_redis_connection('code')
check_code = redis_conn.get('img_%s'%image_code_id).decode()
if check_code.lower() != text.lower():
return HttpResponseBadRequest('图片验证码错误')
mobile = user.mobile
access_token = generic_access_token_url(user.username,user.mobile)
return JsonResponse({'mobile':mobile,'access_token':access_token})
class Form_2_On_Submit(View):
def get(self,request,username):
sms_code = request.GET.get('sms_code')
try:
user = User.objects.get(username=username)
mobile = user.mobile
except User.DoesNotExist:
return HttpResponseBadRequest('用户不存在')
redis_conn = get_redis_connection('code')
code = redis_conn.get('find_sms_%s'%mobile)
if int(sms_code) != int(code):
return HttpResponseBadRequest('验证码错误')
access_token = generic_access_token_url(user.username, user.mobile)
return JsonResponse({
'user_id': user.id,
'access_token': access_token,
})
class FindChangePasswordView(View):
def post(self,request,userid):
data = json.loads(request.body.decode())
new_password = data.get('password')
re_password = data.get('password2')
access_token = data.get('access_token')
if new_password != re_password:
return HttpResponseBadRequest('输入不一致')
try:
user = User.objects.get(id=userid)
user.set_password(new_password)
user.save()
except Exception:
return HttpResponseBadRequest('失败')
return JsonResponse({'message':'ok'})
|
python
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: variable
Author: ken
CreateDate: 5/21/2018 AD
Description:
-------------------------------------------------
"""
__author__ = 'ken'
|
python
|
import chess
from .external_chess_player import ExternalChessPlayer
MAX_RETRIES = 3
class ChessPlayer(object):
def __init__(self, external_player):
"""
:param external_player:
:type external_player: ExternalChessPlayer
"""
self.ext_player = external_player
def end_game(self, board):
self.ext_player.end_game(board)
def send_move_uci(self, uci_move):
self.ext_player.send_move_uci(uci_move)
def make_move_uci(self, board):
try:
return True, self.try_get_uci_move()
except Exception:
return False, Exception
def try_get_uci_move(self, board):
tries = MAX_RETRIES
while tries > 0:
move = self.ext_player.make_move_uci(board)
if move in board.legal_moves:
return move
raise RuntimeError("Too many bad moves")
|
python
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
from meanfield import MeanField
def d_tanh(x):
"""Derivative of tanh."""
return 1. / np.cosh(x)**2
def simple_plot(x, y):
plt.plot(x, y)
plt.xlim(0.5, 3)
plt.ylim(0, 0.25)
plt.xlabel('$\sigma_\omega^2$', fontsize=16)
plt.ylabel('$\sigma_b^2$', fontsize=16)
plt.show()
def plot(x, y):
fontsize = 12
plt.figure(figsize=(4, 3.1))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('xtick', labelsize=int(fontsize / 1.5))
plt.rc('ytick', labelsize=int(fontsize / 1.5))
# plot critical line
plt.plot(x, y, linewidth=2, color='black')
# plot dashed line for sb= 0.05
x_c = np.interp(0.05, y, x) # 1.7603915227624916
line_dict = dict(linewidth=1.5, linestyle='dashed', color='black')
plt.plot([0.5, x_c], [0.05, 0.05], **line_dict)
plt.plot([x_c, x_c], [0.00, 0.05], **line_dict)
# fill ordered and chaotic phase
plt.fill_betweenx(y, x, 3.0, facecolor='#ffdad3')
plt.fill_betweenx(y, 0.5, x, facecolor='#d3e4ff')
# setting
fontsize = 12
plt.xlim(0.5, 3)
plt.ylim(0, 0.25)
plt.xlabel('$\sigma_\omega^2$', fontsize=fontsize)
plt.ylabel('$\sigma_b^2$', fontsize=fontsize)
plt.xlabel(r'$\sigma_w^2$', fontsize=fontsize)
plt.ylabel(r'$\sigma_b^2$', fontsize=fontsize)
# add text
text_dict = dict(fontsize=fontsize,
horizontalalignment='center',
verticalalignment='center')
plt.text(1.25, 0.15, r'\textbf{Ordered Phase}', **text_dict)
plt.text(1.25, 0.125, r'$\max(\chi_{q^*}, \chi_{c^*}) < 1$', **text_dict)
plt.text(2.475, 0.08, r'\textbf{Chaotic Phase}', **text_dict)
plt.text(2.475, 0.055, r'$\max(\chi_{q^*}, \chi_{c^*}) > 1$', **text_dict)
# show plot
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# run mean field experiment.
mf = MeanField(np.tanh, d_tanh)
qrange = np.linspace(1e-5, 2.25, 50)
sw_sbs = [mf.sw_sb(q, 1.0) for q in qrange]
sw = [sw_sb[0] for sw_sb in sw_sbs]
sb = [sw_sb[1] for sw_sb in sw_sbs]
# for simplified figure
simple_plot(sw, sb)
# for creating the actual figure in the paper.
plot(sw, sb)
|
python
|
from tests.conftest import JiraTestCase
class PrioritiesTests(JiraTestCase):
def test_priorities(self):
priorities = self.jira.priorities()
self.assertEqual(len(priorities), 5)
def test_priority(self):
priority = self.jira.priority("2")
self.assertEqual(priority.id, "2")
self.assertEqual(priority.name, "High")
|
python
|
"""
Please implement a `test` (e.g. pytest - this is up to you) for the method `compute_phenotype_similarity()`
- The details are up to you - use whatever testing framework you prefer.
"""
|
python
|
# Aula 10 - Desafio 31: Custo da viagem
# Pedir a distancia de uma viagem em seguida:
# se a viagem for até 200Km de distancia, o valor da passagem será de R$0,50 por Km rodado
# se for maior que 200 Km, o valor sera de R$0,45 por Km rodado
d = int(input('Informe a distancia em Km da sua viagem: '))
if d <= 200:
print(f'O preço da passagem eh de R${d*0.5:.2f}')
else:
print(f'O preço da passagem eh de R${d*0.45:.2f}')
'''
# Outra maneira
preço = d * 0.5 if d <= 200 else d * 0.45
print(f'O preço da passagem eh de R${preço:.2f}')
'''
|
python
|
import os,shutil
from .ExtensibleFileObject import ExtensibleFileObject
def file_list_dedup(file_list):
new_list=list(set(file_list))
new_list.sort(key=file_list.index)
return new_list
def relpath(a,b):
pass
def check_vfile(func):
pass
def refresh_directory(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def create_file(path,text):
text = [text] if isinstance(text,str) else list(text)
if os.path.exists(path):
os.remove(path)
fo = ExtensibleFileObject(keyword='UHDL')
fo.write('\n'.join(text))
fo.write_version('1.0.1')
fo.save(path=path)
#with open(path,'w') as fp:
# fp.write('\n'.join(text))
#fp.close()
#fp = open(path,'w')
return path
if __name__ == "__main__":
#ListProcess.relpath('a/b/c','d/e/f')
create_file('./test.v',['456'])
|
python
|
import nuke
t=nuke.menu("Nodes")
u=t.addMenu("Pixelfudger", icon="PxF_Menu.png")
t.addCommand( "Pixelfudger/PxF_Bandpass", "nuke.createNode('PxF_Bandpass')", icon="PxF_Bandpass.png" )
t.addCommand( "Pixelfudger/PxF_ChromaBlur", "nuke.createNode('PxF_ChromaBlur')", icon="PxF_ChromaBlur.png")
t.addCommand( "Pixelfudger/PxF_Distort", "nuke.createNode('PxF_Distort')", icon="PxF_Distort.png")
t.addCommand( "Pixelfudger/PxF_Erode", "nuke.createNode('PxF_Erode')", icon="PxF_Erode.png")
t.addCommand( "Pixelfudger/PxF_Filler", "nuke.createNode('PxF_Filler')", icon="PxF_Filler.png")
t.addCommand( "Pixelfudger/PxF_Grain", "nuke.createNode('PxF_Grain')", icon="PxF_Grain.png")
t.addCommand( "Pixelfudger/PxF_HueSat", "nuke.createNode('PxF_HueSat')", icon="PxF_HueSat.png")
t.addCommand( "Pixelfudger/PxF_IDefocus", "nuke.createNode('PxF_IDefocus')", icon="PxF_IDefocus.png")
t.addCommand( "Pixelfudger/PxF_KillSpill", "nuke.createNode('PxF_KillSpill')", icon="PxF_KillSpill.png")
t.addCommand( "Pixelfudger/PxF_Line", "nuke.createNode('PxF_Line')", icon="PxF_Line.png" )
t.addCommand( "Pixelfudger/PxF_MergeWrap", "nuke.createNode('PxF_MergeWrap')", icon="PxF_MergeWrap.png" )
t.addCommand( "Pixelfudger/PxF_ScreenClean", "nuke.createNode('PxF_ScreenClean')", icon="PxF_ScreenClean.png")
|
python
|
BOT_NAME = 'naver_movie'
SPIDER_MODULES = ['naver_movie.spiders']
NEWSPIDER_MODULE = 'naver_movie.spiders'
ROBOTSTXT_OBEY = False
DOWNLOAD_DELAY = 2
COOKIES_ENABLED = True
DEFAULT_REQUEST_HEADERS = {
"Referer": "https://movie.naver.com/"
}
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
'scrapy_fake_useragent.middleware.RandomUserAgentMiddleware': 400,
'scrapy_fake_useragent.middleware.RetryUserAgentMiddleware': 401,
}
RETRY_ENABLED = True
RETRY_TIMES = 2
ITEM_PIPELINES = {
'naver_movie.pipelines.NaverMoviePipeline': 300,
}
|
python
|
import asyncio
import uvloop
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from scrapper import scrap
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
if __name__ == "__main__":
scheduler = AsyncIOScheduler()
scheduler.add_job(scrap, 'interval', seconds=5)
scheduler.start()
try:
asyncio.get_event_loop().run_forever()
except (KeyboardInterrupt, SystemExit):
pass
|
python
|
from ..classes import WorkflowAction
class TestWorkflowAction(WorkflowAction):
label = 'test workflow state action'
def execute(self, context):
context['workflow_instance']._workflow_state_action_executed = True
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair and Paul Rougieux.
JRC biomass Project.
Unit D1 Bioeconomy.
"""
# Built-in modules #
import os
# Third party modules #
# First party modules #
from autopaths import Path
from autopaths.auto_paths import AutoPaths
from autopaths.dir_path import DirectoryPath
from plumbing.cache import property_cached
from plumbing.databases.access_database import AccessDatabase
# Internal modules #
from cbmcfs3_runner.pump.dataframes import multi_index_pivot
# Constants #
default_path = "C:/Program Files (x86)/Operational-Scale CBM-CFS3/Admin/DBs/ArchiveIndex_Beta_Install.mdb"
default_path = Path(default_path)
###############################################################################
class AIDB(object):
"""
This class enables us to switch the famous "ArchiveIndexDatabase", between
the Canadian standard and the European standard.
It also provides access to the data within this database.
"""
all_paths = """
/orig/aidb_eu.mdb
"""
def __init__(self, parent):
# Default attributes #
self.parent = parent
# Automatically access paths based on a string of many subpaths #
self.paths = AutoPaths(self.parent.data_dir, self.all_paths)
def __repr__(self):
return "%s object at '%s'" % (self.__class__, self.paths.aidb)
def switch(self):
default_path.remove()
self.paths.aidb.copy(default_path)
@property_cached
def database(self):
database = AccessDatabase(self.paths.aidb)
database.convert_col_names_to_snake = True
return database
@property_cached
def dm_table(self):
"""Main disturbance matrix."""
# Load #
df = self.database['tblDM']
# Rename #
df = df.rename(columns={ "name": "dist_desc_dm",
"description": "dist_desc_long"})
# Return #
return df
@property_cached
def source(self):
"""Name of source pools."""
# Load #
df = self.database['tblSourceName']
# Rename #
df = df.rename(columns={ 'row': 'dm_row',
'description': 'row_pool'})
# Return #
return df
@property_cached
def sink(self):
"""Name of sink pools."""
# Load #
df = self.database['tblSinkName']
# Rename #
df = df.rename(columns={ 'column': 'dm_column',
'description': 'column_pool'})
# Return #
return df
@property_cached
def lookup(self):
"""Proportion by source and sink."""
# Load #
df = self.database['tblDMValuesLookup']
# Return #
return df
@property_cached
def dist_type_default(self):
"""Link between dist_type_id and dist_desc_aidb."""
# Load #
df = self.database['tbldisturbancetypedefault']
# Rename #
df = df.rename(columns = {'dist_type_name': 'dist_desc_aidb'})
# Return #
return df
@property_cached
def dm_assoc_default(self):
"""
Link between default_dist_type_id, default_ec_id, and dmid
Pay attention to the tricky annual_order which might generate
errors in some cases (see also libcbm aidb import efforts)
Shape in the EU AIDB: 110180 rows × 6 columns
"""
# Load #
df = self.database['tbldmassociationdefault']
# Rename #
# TODO, check if dist_type_id is exactly the correct name
df = df.rename(columns = {'default_disturbance_type_id': 'dist_type_id',
'name': 'assoc_name',
'description': 'assoc_desc'})
# Return #
return df
@property_cached
def dm_assoc_default_short(self):
"""Same as above but with any "Annual order" > 1 dropped."""
# Load #
df = self.dm_assoc_default
# Collapse #
df = df.query("annual_order < 2").copy()
# Check that the combination of dist_type_id and dmid
# is unique on dist_type_id
a = len(set(df['dist_type_id']))
b = len(df[['dmid', 'dist_type_id']].drop_duplicates())
assert a == b
# Keep only a couple columns #
df = df[['dmid', 'dist_type_id']].drop_duplicates()
# Return #
return df
@property_cached
def dm_assoc_spu_default(self):
"""
Link between default_dist_type_id, spuid and dmid.
Warning, it contains only wildfire distances in the EU AIDB.
Shape in the EU aidb: 920 rows × 6 columns
"""
# Load #
df = self.database['tbldmassociationspudefault']
# Rename
# TODO check if dist_type_id is exactly the correct name
df = df.rename(columns = {'default_disturbance_type_id': 'dist_type_id',
'name': 'spu_name',
'description': 'spu_desc'})
# Return #
return df
@property_cached
def dist_matrix_long(self):
"""
Recreates the disturbance matrix in long format.
Join lookup and the disturbance matrix table 'tblDM',
Then join source and sink to add description of the origin and destination pools.
To be continued based on /notebooks/disturbance_matrix.ipynb
There is a many-to-one relationship between dist_type_name and dmid
(disturbance matrix id),
i.e for each dist_type_name there is one and only one dmid.
The opposite is not true, as there are more dist_type_name than dmid.
Columns are:
['dist_desc_input', 'dist_desc_aidb', 'dist_type_id', 'dmid',
'dm_column', 'dm_structure_id', 'dm_row', 'proportion', 'dist_desc_dm',
'dist_desc_long', 'row_pool', 'column_pool', 'on_off_switch',
'description', 'is_stand_replacing', 'is_multi_year',
'multi_year_count', 'dist_type_name'],
"""
# Load tables from the aidb #
dm_table = self.dm_table
source = self.source
sink = self.sink
lookup = self.lookup
assoc_short = self.dm_assoc_default_short
dist_default = self.dist_type_default
# Load tables from orig_data #
map_disturbance = self.parent.associations.map_disturbance
dist_types = self.parent.orig_data.disturbance_types
# Join lookup and dm_table to add the description for each `dmid` #
dm_lookup = (lookup
.set_index('dmid')
.join(dm_table.set_index('dmid'))
.reset_index())
# Indexes #
index_source = ['dm_row', 'dm_structure_id']
index_sink = ['dm_column', 'dm_structure_id']
# Add source and sink descriptions #
df = (dm_lookup.set_index(index_source)
.join(source.set_index(index_source))
.reset_index()
.set_index(index_sink)
.join(sink.set_index(index_sink))
.reset_index())
# Add 'dist_type_name' corresponding to orig/disturbance_types.csv
df = df.left_join(assoc_short, 'dmid')
df = df.left_join(dist_default, 'dist_type_id')
df = df.left_join(map_disturbance, 'dist_desc_aidb')
df = df.left_join(dist_types, 'dist_desc_input')
# Return #
return df
@property_cached
def dist_matrix(self):
"""
The disturbance matrix is reshaped in the form of a matrix
with source pools in rows and sink pools in columns.
"""
# Load #
df = self.dist_matrix_long.copy()
# Make pool description columns suitable as column names #
# Adds a number at the end of the disturbance name #
df['row_pool'] = (df['row_pool'].str.replace(' ', '_') + '_' +
df['dm_row'].astype(str))
df['column_pool'] = (df['column_pool'].str.replace(' ','_') + '_' +
df['dm_column'].astype(str))
# Filter proportions #
# TODO correct missing name from the index (see HU for example)
index = ['dmid', 'dm_structure_id', 'dm_row', 'name', 'row_pool']
df = (df
.set_index(index)
.query('proportion>0'))
# Pivot #
df = multi_index_pivot(df, columns='column_pool', values='proportion')
# Reorder columns by the last digit number
col_order = sorted(df.columns,
key=lambda x: str(x).replace("_", "0")[-2:])
# Exclude index columns from the re-ordering of columns
df = df.set_index(index)[col_order[:-5]].reset_index()
# Return #
return df
@property_cached
def merch_biom_rem(self):
"""
Retrieve the percentage of merchantable biomass removed
from every different disturbance type used in the silviculture
treatments.
The column "perc_merch_biom_rem" comes from silviculture.csv
The column "proportion" comes from aidb.mdb and multiple joins.
"""
# Load #
df = self.dist_matrix_long
dist_types = self.parent.orig_data.disturbance_types
treats = self.parent.silviculture.treatments
# Filter dist_mat to take only disturbances that are actually used #
selector = df['dist_type_name'].isin(dist_types['dist_type_name'])
df = df[selector].copy()
# Take only products #
df = df.query("column_pool == 'products'")
df = df.query("row_pool == 'Softwood merchantable' or row_pool == 'Hardwood merch'")
# Join #
df = treats.left_join(df, 'dist_type_name')
# Take columns of interest #
cols = ['dist_type_name', 'perc_merch_biom_rem', 'dist_desc_aidb', 'row_pool', 'proportion']
df = df[cols]
# Compute difference #
df['diff']= df['perc_merch_biom_rem'] - df['proportion']
# NaNs appear because of natural disturbances #
df = df.fillna(0)
# Check #
assert all(df['diff'].abs() < 1e-3)
# Return #
return df
@property_cached
def dmid_map(self):
"""Map the dist_type_name to its dmid for the current country.
Only returns the unique available combinations
of dmid and dist_type_name.
Note two dist_type_name can map to the same dmid.
Columns:
['dist_type_name', 'dmid', 'dist_desc_aidb']
"""
# Load #
dist_mat = self.dist_matrix_long
# Keep only two columns #
columns_of_interest = ['dist_type_name', 'dmid', 'dist_desc_aidb']
df = dist_mat[columns_of_interest].drop_duplicates()
# Check #
#assert not any(df['dmid'] == numpy.nan)
# Return #
return df
#-------------------------- Special Methods ------------------------------#
def symlink(self):
# Where is the data, default case #
aidb_repo = DirectoryPath("~/repos/libcbm_aidb/")
# But you can override that with an environment variable #
if os.environ.get("CBMCFS3_AIDB"):
aidb_repo = DirectoryPath(os.environ['CBMCFS3_AIDB'])
# The source #
source = aidb_repo + self.parent.iso2_code + '/orig/aidb_eu.mdb'
# Special case for ZZ #
if self.parent.iso2_code == 'ZZ':
source = aidb_repo + 'LU/orig/aidb_eu.mdb'
# Check the AIDB exists #
assert source
# The destination #
destin = self.paths.aidb
# Remove destination if it already exists #
destin.remove()
# Symlink #
source.link_to(destin)
# Return #
return 'Symlink success for ' + self.parent.iso2_code + '.'
|
python
|
import numpy as np
import interconnect
import copy
# # VARIABLES
N = 3001 # max clock cycles +1
FW = 16 # flit width
FPP = 32 # flits per packet
def get_header(FW=16):
'''
generates a random header for a flit-width of FW)
'''
return np.random.random_integers(0, (1 << FW)-1)
# data, day = np.load('./videos/traffic_pictures_day.npz'), 1
data, day = np.load('./videos/traffic_pictures_night.npz'), 0
# data = np.load('./videos/traffic_features.npz')
sim = np.load('./res_simulator/sensors_to_memory.npz')
mux = sim['mux_matrices']
DHs = [get_header(16) for i in range(int(N))]
D0s = data['pic1'].astype(int) # pixel samples
D1s = data['pic2'].astype(int)
D2s = data['pic3'].astype(int)
D3s = data['pic4'].astype(int)
D4s = data['pic5'].astype(int)
D5s = data['pic6'].astype(int)
D0s = np.add(D0s[0::2, :], (1 << 8)*D0s[1::2, :]) # attach two for flit
D1s = np.add(D1s[0::2, :], (1 << 8)*D1s[1::2, :])
D2s = np.add(D2s[0::2, :], (1 << 8)*D2s[1::2, :])
D3s = np.add(D3s[0::2, :], (1 << 8)*D3s[1::2, :])
D4s = np.add(D4s[0::2, :], (1 << 8)*D4s[1::2, :])
D5s = np.add(D5s[0::2, :], (1 << 8)*D5s[1::2, :])
ic2D = interconnect.Interconnect(B=16, wire_spacing=0.3e-6, # 2D IC
wire_width=0.3e-6, wire_length=100e-6)
ic3D = interconnect.Interconnect(16, 0.6e-6, 0.3e-6, wire_length=0, # 3D IC
TSVs=True, TSV_radius=2e-6, TSV_pitch=8e-6)
E3dLink0bitlevel = []
E2dLink1bitlevel = []
E2dLink2bitlevel = []
E3dLink0highlevel = []
E2dLink1highlevel = []
E2dLink2highlevel = []
E3dLink0ref = []
E2dLink1ref = []
E2dLink2ref = []
# # MAIN PART
for coding in range(8):
# run the simulation for
# 0: NO-CODING; 1: NEGK1; 2: NEGK0
# 3: NEGCORR; 4:NEG(K0+CORR); 5:NEG(K1+CORR)
D_true = []
cD = [] # counter for the different data types
DH = interconnect.DataStream(np.copy(DHs), 16) # headers not coded
D0 = interconnect.DataStream(D0s.flatten()[:N], 16) # DATA STREAMS UNCO
D1 = interconnect.DataStream(D1s.flatten()[:N], 16)
D2 = interconnect.DataStream(D2s.flatten()[:N], 16)
D3 = interconnect.DataStream(D3s.flatten()[:N], 16)
D4 = interconnect.DataStream(D4s.flatten()[:N], 16)
D5 = interconnect.DataStream(D5s.flatten()[:N], 16)
# coding correlated data streams
if coding == 1:
D0, D1 = D0.k0_encoded().invert, D1.k0_encoded().invert
D2, D3 = D2.k0_encoded().invert, D3.k0_encoded().invert
D4, D5 = D4.k0_encoded().invert, D5.k0_encoded().invert
elif coding == 2:
D0, D1 = D0.k1_encoded().invert, D1.k1_encoded().invert
D2, D3 = D2.k1_encoded().invert, D3.k1_encoded().invert
D4, D5 = D4.k1_encoded().invert, D5.k1_encoded().invert
elif coding == 3:
D0, D1 = D0.corr_encoded().invert, D1.corr_encoded().invert
D2, D3 = D2.corr_encoded().invert, D3.corr_encoded().invert
D4, D5 = D4.corr_encoded().invert, D5.corr_encoded().invert
elif coding == 4:
D0 = D0.k0_encoded().corr_encoded().invert
D1 = D1.k0_encoded().corr_encoded().invert
D2 = D2.k0_encoded().corr_encoded().invert
D3 = D3.k0_encoded().corr_encoded().invert
D4 = D4.k0_encoded().corr_encoded().invert
D5 = D5.k0_encoded().corr_encoded().invert
elif coding == 5:
D0 = D0.k1_encoded().corr_encoded().invert
D1 = D1.k1_encoded().corr_encoded().invert
D2 = D2.k1_encoded().corr_encoded().invert
D3 = D3.k1_encoded().corr_encoded().invert
D4 = D4.k1_encoded().corr_encoded().invert
D5 = D5.k1_encoded().corr_encoded().invert
elif coding == 6:
D0 = D0.corr_encoded().k0_encoded().invert
D1 = D1.corr_encoded().k0_encoded().invert
D2 = D2.corr_encoded().k0_encoded().invert
D3 = D3.corr_encoded().k0_encoded().invert
D4 = D4.corr_encoded().k0_encoded().invert
D5 = D5.corr_encoded().k0_encoded().invert
elif coding == 7:
D0 = D0.corr_encoded().k1_encoded().invert
D1 = D1.corr_encoded().k1_encoded().invert
D2 = D2.corr_encoded().k1_encoded().invert
D3 = D3.corr_encoded().k1_encoded().invert
D4 = D4.corr_encoded().k1_encoded().invert
D5 = D5.corr_encoded().k1_encoded().invert
# # #
for i in range(len(sim['links'])):
d_link = [0] # data going over the link (init val 0)
# copy of single data streams as list
h = np.copy(DH.samples).tolist()
d0, d1 = np.copy(D0.samples).tolist(), np.copy(D1.samples).tolist()
d2, d3 = np.copy(D2.samples).tolist(), np.copy(D3.samples).tolist()
d4, d5 = np.copy(D4.samples).tolist(), np.copy(D5.samples).tolist()
d_list = [h, d0, d1, d2, d3, d4, d5]
counter = [0, 0, 0, 0, 0, 0, 0]
seq = sim['true_values'][i].astype(int) # pattern sequence
for j in range(1, len(seq)):
if seq[j] < 7:
d_link.append(d_list[seq[j]].pop(0))
counter[seq[j]] += 1
else:
d_link.append(d_link[-1])
cD.append(counter)
D_true.append(interconnect.DataStream(d_link, 16))
D_mux0 = interconnect.DataStreamProb([DH[:cD[0][0]], D0[:cD[0][1]],
D1[:cD[0][2]], D2[:cD[0][3]],
D3[:cD[0][4]], D4[:cD[0][5]],
D5[:cD[0][6]]], mux[0])
D_mux1 = interconnect.DataStreamProb([DH[:cD[1][0]], D0, # D0-D2 not trans
D1, D2,
D3[:cD[1][4]], D4[:cD[1][5]],
D5[:cD[1][6]]], mux[1])
D_mux2 = interconnect.DataStreamProb([DH[:cD[2][0]], D0,
D1, D2, # only D3 transmitted
D3[:cD[2][4]], D4, D5], mux[2])
D_noMux0 = copy.deepcopy(DH[:cD[0][0]])
D_noMux0.append(D0[:cD[0][1]])
D_noMux0.append(D1[:cD[0][2]])
D_noMux0.append(D2[:cD[0][3]])
D_noMux0.append(D3[:cD[0][4]])
D_noMux0.append(D4[:cD[0][5]])
D_noMux0.append(D5[:cD[0][6]])
D_noMux1 = copy.deepcopy(DH[:cD[1][0]])
D_noMux1.append(D3[:cD[1][4]])
D_noMux1.append(D4[:cD[1][5]])
D_noMux1.append(D5[:cD[1][6]])
D_noMux2 = copy.deepcopy(DH[:cD[2][0]])
D_noMux2.append(D3[:cD[2][4]])
# golden values (bit-level sim)
E3dLink0bitlevel.append(ic3D.E(D_true[0]))
E2dLink1bitlevel.append(ic2D.E(D_true[1]))
E2dLink2bitlevel.append(ic2D.E(D_true[2]))
# proposed high-level model
E3dLink0highlevel.append(ic3D.E(D_mux0))
E2dLink1highlevel.append(ic2D.E(D_mux1))
E2dLink2highlevel.append(ic2D.E(D_mux2))
# ref bit level
E3dLink0ref.append(ic3D.E(D_noMux0))
E2dLink1ref.append(ic2D.E(D_noMux1))
E2dLink2ref.append(ic2D.E(D_noMux2))
if day == 0:
E3dLink0bitlevel_night = E3dLink0bitlevel
E2dLink1bitlevel_night = E2dLink1bitlevel
E2dLink2bitlevel_night = E2dLink2bitlevel
E3dLink0highlevel_night = E3dLink0highlevel
E2dLink1highlevel_night = E2dLink1highlevel
E2dLink2highlevel_night = E2dLink2highlevel
E3dLink0ref_night = E3dLink0ref
E2dLink1ref_night = E2dLink1ref
E2dLink2ref_night = E2dLink2ref
else:
E3dLink0bitlevel_day = E3dLink0bitlevel
E2dLink1bitlevel_day = E2dLink1bitlevel
E2dLink2bitlevel_day = E2dLink2bitlevel
E3dLink0highlevel_day = E3dLink0highlevel
E2dLink1highlevel_day = E2dLink1highlevel
E2dLink2highlevel_day = E2dLink2highlevel
E3dLink0ref_day = E3dLink0ref
E2dLink1ref_day = E2dLink1ref
E2dLink2ref_day = E2dLink2ref
if 'E3dLink0ref_day' in locals() and 'E3dLink0ref_night' in locals():
packages = 2*sum(cD[0])/32
E3dLink0ref_tot = (N/packages)*(np.array(E3dLink0ref_day)+np.array(E3dLink0ref_night))
E2dLink1ref_tot = (N/packages)*(np.array(E2dLink1ref_day)+np.array(E2dLink1ref_night))
E2dLink2ref_tot = (N/packages)*(np.array(E2dLink2ref_day)+np.array(E2dLink2ref_night))
E3dLink0bitlevel_tot = (N/packages)*(np.array(E3dLink0bitlevel_day)+np.array(E3dLink0bitlevel_night))
E2dLink1bitlevel_tot = (N/packages)*(np.array(E2dLink1bitlevel_day)+np.array(E2dLink1bitlevel_night))
E2dLink2bitlevel_tot = (N/packages)*(np.array(E2dLink2bitlevel_day)+np.array(E2dLink2bitlevel_night))
E3dLink0highlevel_tot = (N/packages)*(np.array(E3dLink0highlevel_day)+np.array(E3dLink0highlevel_night))
E2dLink1highlevel_tot = (N/packages)*(np.array(E2dLink1highlevel_day)+np.array(E2dLink1highlevel_night))
E2dLink2highlevel_tot = (N/packages)*(np.array(E2dLink2highlevel_day)+np.array(E2dLink2highlevel_night))
|
python
|
from py_db import db
import NSBL_helpers as helper
# Re-computes the team hitting tables
db = db('NSBL')
def process():
print "processed_team_hitting"
db.query("TRUNCATE TABLE `processed_team_hitting_basic`")
db.query("TRUNCATE TABLE `processed_team_hitting_advanced`")
yr_min, yr_max = db.query("SELECT MIN(year), MAX(year) FROM processed_league_averages_pitching")[0]
for year in range(yr_min, yr_max+1):
for _type in ('basic', 'advanced'):
print str(year) + "\thitting\t" + _type
table = 'processed_team_hitting_%s' % (_type)
if _type == 'basic':
entries = process_basic(year)
elif _type == 'advanced':
entries = process_advanced(year)
if entries != []:
db.insertRowDict(entries, table, replace=True, insertMany=True, rid=0)
db.conn.commit()
def process_basic(year):
entries = []
qry = """SELECT
r.team_abb,
SUM(pa), SUM(ab), SUM(h), SUM(2B), SUM(3b), SUM(Hr), SUM(r), SUM(rbi), SUM(hbp), SUM(bb), SUM(k), SUM(sb), SUM(cs)
FROM register_batting_primary r
JOIN processed_compWAR_offensive o USING (player_name, team_abb, YEAR)
JOIN processed_WAR_hitters w USING (pa, player_name, team_abb, YEAR)
WHERE r.year = %s
GROUP BY r.team_abb;"""
query = qry % (year)
res = db.query(query)
for row in res:
team_abb, pa, ab, h, _2, _3, hr, r, rbi, hbp, bb, k, sb, cs = row
entry = {}
entry["year"] = year
entry["team_abb"] = team_abb
_1 = h - _2 - _3 - hr
avg = float(h)/float(ab)
obp = (float(h)+float(bb)+float(hbp))/float(pa)
slg = (float(_1)+2*float(_2)+3*float(_3)+4*float(hr))/float(pa)
entry["avg"] = avg
entry["obp"] = obp
entry["slg"] = slg
entry["pa"] = pa
entry["ab"] = ab
entry["h"] = h
entry["2b"] = _2
entry["3b"] = _3
entry["hr"] = hr
entry["r"] = r
entry["rbi"] = rbi
entry["hbp"] = hbp
entry["bb"] = bb
entry["k"] = k
entry["sb"] = sb
entry["cs"] = cs
entries.append(entry)
return entries
def process_advanced(year):
entries = []
qry = """SELECT
r.team_abb, SUM(pa), SUM(pf*pa)/SUM(pa), SUM(wOBA*pa)/SUM(pa), SUM(park_wOBA*pa)/SUM(pa), SUM(OPS*pa)/SUM(pa), SUM(OPS_plus*pa)/SUM(pa), SUM(babip*pa)/SUM(pa), SUM(wRC), SUM(wRC_27*pa)/SUM(pa), SUM(wRC_plus*pa)/SUM(pa), SUM(rAA), SUM(w.oWAR)
FROM register_batting_primary r
JOIN processed_compWAR_offensive o USING (player_name, team_abb, YEAR)
JOIN processed_WAR_hitters w USING (pa, player_name, team_abb, YEAR)
WHERE r.year = %s
GROUP BY r.team_abb;"""
query = qry % (year)
res = db.query(query)
for row in res:
team_abb, pa, pf, woba, park_woba, ops, ops_plus, babip, wrc, wrc_27, wrc_plus, raa, owar = row
entry = {}
entry["year"] = year
entry["team_abb"] = team_abb
entry["pa"] = pa
entry["pf"] = pf
entry["wOBA"] = woba
entry["park_wOBA"] = park_woba
entry["OPS"] = ops
entry["OPS_plus"] = ops_plus
entry["babip"] = babip
entry["wRC"] = wrc
entry["wRC_27"] = wrc_27
entry["wRC_plus"] = wrc_plus
entry["rAA"] = raa
entry["oWAR"] = owar
entries.append(entry)
return entries
if __name__ == "__main__":
process()
|
python
|
"""
The MIT License (MIT)
Copyright (c) 2020-Current Skelmis
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, List, AsyncIterable, Dict
from attr import asdict
import orjson as json
from antispam.abc import Cache
from antispam.enums import ResetType
from antispam.exceptions import GuildNotFound, MemberNotFound
from antispam.dataclasses import Message, Member, Guild, Options
if TYPE_CHECKING:
from redis import asyncio as aioredis
from antispam import AntiSpamHandler
log = logging.getLogger(__name__)
class RedisCache(Cache):
"""
A cache backend built to use Redis.
Parameters
----------
handler: AntiSpamHandler
The AntiSpamHandler instance
redis: redis.asyncio.Redis
Your redis connection instance.
"""
def __init__(self, handler: AntiSpamHandler, redis: aioredis.Redis):
self.redis: aioredis.Redis = redis
self.handler: AntiSpamHandler = handler
async def get_guild(self, guild_id: int) -> Guild:
log.debug("Attempting to return cached Guild(id=%s)", guild_id)
resp = await self.redis.get(f"GUILD:{guild_id}")
if not resp:
raise GuildNotFound
as_json = json.loads(resp.decode("utf-8"))
guild: Guild = Guild(**as_json)
# This is actually a dict here
guild.options = Options(**guild.options) # type: ignore
guild_members: Dict[int, Member] = {}
for member_id in guild.members: # type: ignore
member: Member = await self.get_member(member_id, guild_id)
guild_members[member.id] = member
guild.members = guild_members
return guild
async def set_guild(self, guild: Guild) -> None:
log.debug("Attempting to set Guild(id=%s)", guild.id)
# Store members separate
for member in guild.members.values():
await self.set_member(member)
guild.members = [member.id for member in guild.members.values()]
as_json = json.dumps(asdict(guild, recurse=True))
await self.redis.set(f"GUILD:{guild.id}", as_json)
async def delete_guild(self, guild_id: int) -> None:
log.debug("Attempting to delete Guild(id=%s)", guild_id)
await self.redis.delete(f"GUILD:{guild_id}")
async def get_member(self, member_id: int, guild_id: int) -> Member:
log.debug(
"Attempting to return a cached Member(id=%s) for Guild(id=%s)",
member_id,
guild_id,
)
resp = await self.redis.get(f"MEMBER:{guild_id}:{member_id}")
if not resp:
raise MemberNotFound
as_json = json.loads(resp.decode("utf-8"))
member: Member = Member(**as_json)
messages: List[Message] = []
for message in member.messages:
messages.append(Message(**message)) # type: ignore
member.messages = messages
return member
async def set_member(self, member: Member) -> None:
log.debug(
"Attempting to cache Member(id=%s) for Guild(id=%s)",
member.id,
member.guild_id,
)
# Ensure a guild exists
try:
guild = await self.get_guild(member.guild_id)
guild.members = [m.id for m in guild.members.values()]
guild.members.append(member.id)
guild_as_json = json.dumps(asdict(guild, recurse=True))
await self.redis.set(f"GUILD:{guild.id}", guild_as_json)
except GuildNotFound:
guild = Guild(id=member.guild_id, options=self.handler.options)
guild.members = [member.id]
guild_as_json = json.dumps(asdict(guild, recurse=True))
await self.redis.set(f"GUILD:{guild.id}", guild_as_json)
as_json = json.dumps(asdict(member, recurse=True))
await self.redis.set(f"MEMBER:{member.guild_id}:{member.id}", as_json)
async def delete_member(self, member_id: int, guild_id: int) -> None:
log.debug(
"Attempting to delete Member(id=%s) in Guild(id=%s)", member_id, guild_id
)
try:
guild: Guild = await self.get_guild(guild_id)
guild.members.pop(member_id)
await self.set_guild(guild)
except:
pass
await self.redis.delete(f"MEMBER:{guild_id}:{member_id}")
async def add_message(self, message: Message) -> None:
log.debug(
"Attempting to add a Message(id=%s) to Member(id=%s) in Guild(id=%s)",
message.id,
message.author_id,
message.guild_id,
)
try:
member: Member = await self.get_member(message.author_id, message.guild_id)
except (MemberNotFound, GuildNotFound):
member: Member = Member(message.author_id, guild_id=message.guild_id)
member.messages.append(message)
await self.set_member(member)
async def reset_member_count(
self, member_id: int, guild_id: int, reset_type: ResetType
) -> None:
log.debug(
"Attempting to reset counts on Member(id=%s) in Guild(id=%s) with type %s",
member_id,
guild_id,
reset_type.name,
)
try:
member: Member = await self.get_member(member_id, guild_id)
except (MemberNotFound, GuildNotFound):
return
if reset_type == ResetType.KICK_COUNTER:
member.kick_count = 0
else:
member.warn_count = 0
await self.set_member(member)
async def drop(self) -> None:
log.warning("Cache was just dropped")
await self.redis.flushdb(asynchronous=True)
async def get_all_guilds(self) -> AsyncIterable[Guild]:
log.debug("Yielding all cached guilds")
keys: List[bytes] = await self.redis.keys("GUILD:*")
for key in keys:
key = key.decode("utf-8").split(":")[1]
yield await self.get_guild(int(key))
async def get_all_members(self, guild_id: int) -> AsyncIterable[Member]:
log.debug("Yielding all cached members for Guild(id=%s)", guild_id)
# NOOP
await self.get_guild(guild_id)
keys: List[bytes] = await self.redis.keys(f"MEMBER:{guild_id}:*")
for key in keys:
key = key.decode("utf-8").split(":")[2]
yield await self.get_member(int(key), guild_id)
|
python
|
# -*- coding: utf-8 -*-
try:
import mdp
use_mdp = True
except ImportError:
print 'mdp (modular data processing) module not installed. Cannot do PCA'
use_mdp = False
import numpy as np
from neuropype import node
from itertools import imap, repeat
from copy import deepcopy, copy
from neuropype import parameter
import os
from bisect import bisect_left
from neuropype.ressources._common import boxfilter, findextrema, cross_threshold
from neuropype.ressources._common import flatenList, filterValues
import neuropype.ressources.progressbar as pgb
from neuropype.datatypes import Time_list, Sweep
from neuropype.gui.lassoExempl import LassoManager
class DetectSpike(node.Node):
"""Detect events in a sweep
* filter is a list of 4-tuples, (sniptype, property, comp, value)
sniptype can be 'raw' or 'filtered',
property can be any of the 'props' param or one PCA componant,
comp can be 0 -- for < --, 1 -- for > --, 'in' or 'out',
if comp is 0 or 1, value is a float
if comp is 'in' or 'out', value must be a list of 2 floats,
defining the window to keep/exclude"""
def __init__(self, name, parent):
# Inputs
self.in_sweep = node.Input(['Sweep', 'SweepData'])
self.in_numSweeps = node.Input('int')
self.in_chanNames = node.Input('list')
self.in_origin = node.Input('list')
self.in_tag = node.Input('list')
self.in_sweepInfo = node.Input('SweepInfo')
# Outputs
self.out_time = node.Output('Time_list')
self.out_numSweeps = node.Output('int')
self.out_sweep = node.Output('Sweep')
self.out_chanNames = node.Output('list')
self.out_origin = node.Output('list')
self.out_tagTimeList = node.Output('list')
self.out_sweepInfo = node.Output('SweepInfo')
self.out_numSpikes = node.Output('int')
self.out_snip_tag = node.Output('list')
self.out_snip_sweepInfo = node.Output('SweepInfo')
self.out_snip = node.Output('Sweep')
self.out_snip_origin = node.Output('list')
self.out_snip_chanNames = node.Output('list')
super(DetectSpike, self).__init__(name, parent)
self._inputGroups['sweep'] = {'sweep': 'in_sweep',
'numSweeps': 'in_numSweeps',
'chanNames': 'in_chanNames',
'origin': 'in_origin',
'tag' : 'in_tag',
'sweepInfo': 'in_sweepInfo'}
self._outputGroups = {'time_list': {'time_list': 'out_time',
'numSweeps': 'out_numSweeps',
'tag': 'out_tagTimeList'},
'filteredSweep': {'sweep': 'out_sweep',
'numSweeps': 'out_numSweeps',
'chanNames': 'out_chanNames',
'origin': 'out_origin',
'tag': 'out_tagTimeList',
'sweepInfo': 'out_sweepInfo'},
'snippet': {'sweep': 'out_snip',
'numSweeps': 'out_numSpikes',
'chanNames': 'out_snip_chanNames',
'origin': 'out_snip_origin',
'tag': 'out_snip_tag',
'sweepInfo': 'out_snip_sweepInfo'}}
# Default parameters:
baseline = parameter.combobox('baseline', self, ['fixed', 'floating',
'mean', 'window', None], 'floating')
fixed_baseline = parameter.float_param('fixed_baseline', self, 0, decimals= 9,
singleStep= 1e-3)
createUniv = CreateUniv(self)
self.cU = createUniv
chan = parameter.combobox('chan', self, [], 'None', func = createUniv)
padding = parameter.combobox('padding', self, ['flatPad', 'zeroPad', 'keep'], 'flatPad')
win0 = parameter.float_param('win0', self, 5e-3, minVal= 0, decimals= 9,
singleStep= 1e-3)
win1 = parameter.float_param('win1', self, 1e-3, minVal= 0, decimals= 9,
singleStep= 1e-3)
win2 = parameter.float_param('win2', self, 1.5e-3, minVal= 0, decimals=
9, singleStep= 1e-3)
dt0 = parameter.float_param('dt0', self, 1.5e-3, minVal= 0, decimals=
9, singleStep= 1e-3)
dt1 = parameter.float_param('dt1', self, 1.5e-3, minVal= 0, decimals=
9, singleStep= 1e-3)
pointinterval = parameter.float_param('pointinterval', self, 1e-3, minVal= 0, decimals=
9, singleStep= 0.1e-3)
numWins = parameter.integer('numWins', self, 1, minVal= 1, maxVal= 3)
threshold = parameter.float_param('threshold', self, 0, decimals=
9, singleStep= 1e-3)
maximum = parameter.boolean('maximum', self, Default = True)
upwards = parameter.boolean('upwards', self, Default = True)
cross_threshold_param = parameter.boolean('cross_threshold', self, Default = False)
self._params={'chan': chan,
'maximum': maximum,
'upwards': upwards,
'threshold': threshold,
'cross_threshold': cross_threshold_param,
'pointinterval': pointinterval,
'baseline' : baseline,
'verbose' : 1,
'numWins': numWins,
'win0': win0,
'win1' : win1,
'win2' : win2,
'dt0' : dt0,
'dt1' : dt1,
'baseline_window': ['begin', 'end'],
'fixed_baseline': fixed_baseline,
'memory': 'store',
'snip_window' : [-2e-3,2e-3],
'props' :['sw_ind', 'max', 'min', 'median', 'mean',
'ptp', 'std', 'sum'],
'snip_memory' : ('store', 'all'),
'filter':[],
'padding': padding,
'graphviz':{'style': 'filled', 'fillcolor':
'lightyellow'}}
#connecting outputs:
self.out_time.output = self.time_list
self.out_numSweeps.output = self.numSweeps
self.out_chanNames.output = self.chanNames
self.out_sweep.output = self.filteredSweep
self.out_origin.output = self.origin
self.out_tagTimeList.output = self.tag
self.out_sweepInfo.output = self.sweepInfo
self.out_numSpikes.output = self.numSpikes
self.out_snip_tag.output = self.snip_tag
self.out_snip.output = self.snippet
self.out_snip_origin.origin = self.snippet_origin
self.out_snip_sweepInfo.origin = self.snippet_sweepInfo
self.out_snip_chanNames.output = self.snippet_chanNames
def _ready_trace(self, data, time, debug = 0):
'''do the filtering'''
f0, f1, f2, dt0, dt1 = None, None, None, None, None
dtype = None
if data.dtype == 'int':
dtype = data.dtype
data = np.asarray(data, dtype ='float64')
dt = float(time[1]-time[0])
#substracting baseline:
baseline = self.get_param('baseline')
if baseline == 'window':
if self.get_param('baseline_window')[0] == 'begin':
beg =0
else:
baseline_window0 = float(self.get_param('baseline_window')[0])
beg = bisect_left(time, baseline_window0)
if self.get_param('baseline_window')[1] == 'end':
end = -1
else:
baseline_window1 = float(self.get_param('baseline_window')[1])
end = bisect_left(time,baseline_window1)
baseline = np.mean(data[beg:end])
data-=baseline
elif baseline == 'fixed':
baseline = float(self.get_param('fixed_baseline'))
data-=baseline
elif baseline == 'floating':
numWins = self.get_param('numWins')
cumsum = data.cumsum()
win0 = int(self.get_param('win0')/dt)
biggestWin = win0
f0 = boxfilter(data, win0, cumsum)
if numWins==1:
#simply substract baseline
data-=f0
else:
#kind of first order derivative
dt0=int(float(self.get_param('dt0')/dt))
if dt0 == 0: dt0 = 1
win1 = int(self.get_param('win1')/dt)
biggestWin = max(win1, biggestWin)
f1 = boxfilter(data, win1, cumsum)
if numWins==2:
data = np.zeros_like(data)
data[dt0/2:-dt0/2] = f1[dt0:] - f0[:-dt0]
elif numWins==3:
#kind of 2nd order derivative
win2 = int(self.get_param('win2')/dt)
biggestWin = max(win2, biggestWin)
dt1=int(float(self.get_param('dt1')/dt))
if dt1 == 0: dt1=1
f2 = boxfilter(data, win2, cumsum)
data = np.zeros_like(data)
data[dt1/2:-dt1/2] = f2[dt1:]-2*f1[dt0:-dt1+dt0]-f0[:-dt1]
else:
raise ValueError('wrong numWins: %s'%numWins)
padding = self.get_param('padding')
if padding == 'zeroPad':
data[:biggestWin] = np.zeros(biggestWin)
data[-biggestWin:] = np.zeros(biggestWin)
elif padding == 'flatPad':
data[:biggestWin] = np.ones(biggestWin)*data[biggestWin]
data[-biggestWin:] = np.ones(biggestWin)*data[biggestWin]
elif baseline == 'mean':
baseline=np.mean(data)
data-=baseline
if dtype is not None:
data = np.asarray(data, dtype = dtype)
if debug:
return data, f0, f1, f2, dt0, dt1
return data
def _detect(self, trace, time):
'''Do the detection on trace, delete values in pointinterval and
return the other'''
params = self.params
dt=time[1]-time[0]
pointinterval=int(float(params['pointinterval']/dt))
if not params['cross_threshold']:
temp = findextrema(trace, params['maximum'], params['threshold'],
pointinterval)
else:
temp = cross_threshold(trace, params['upwards'], params['maximum'],
params['threshold'], pointinterval)
out = np.array([time[i] for i in temp])
return out
def _spikeTimes(self, index_sweep):
'''Return the list of spike times detected in sweep 'index_sweep' '''
if self.get_param('memory') == 'write':
data, dataTot = None, None
path = self.get_cache('path')
if path is not None:
dataTot = np.load(path)
if 'Sw_'+str(index_sweep) in dataTot.files:
data = dataTot['Sw_'+str(index_sweep)]
elif self.get_param('memory') == 'store':
if not self._cache.has_key('sp_times'): self._cache['sp_times']={}
data = self._cache['sp_times'].get(index_sweep)
elif self.get_param('memory') is None:
data = None
else:
print 'param %s for memory not '%self.get_param('memory') + \
'recognised, won\'t memorise anything'
data = None
if data is None:
sweep = self._get_input_sweep(index_sweep)
time = self._get_time(index_sweep)
swdata = sweep._data[1] if sweep._data.shape[0]>1 else sweep._data[0]
trace = self._ready_trace(deepcopy(swdata), time)
data = self._detect(trace, time)
if self.get_param('memory') == 'store':
self._cache['sp_times'][index_sweep] = data
elif self.get_param('memory') == 'write':
if dataTot is not None:
temp = {}
for i in dataTot.files:
temp[i]= dataTot[i]
temp['Sw_'+str(index_sweep)] = data
else:
temp = {'Sw_'+str(index_sweep) : data}
path = self.parent.home + self.parent.name + '_' +self.name + \
'_spikeTimes.npz'
np.savez(path, **temp)
self.set_cache('path', self.parent.home + self.parent.name +
'_' + self.name + '_spikeTimes.npz', force = 1)
dataTot.close()
# if self.get_param('snip_memory') is not None:
# snippet = self._extract_snippet(data, sweep._data[1], time)
# self._saveSnip(index_sweep, snippet, 'raw')
# snippet = self._extract_snippet(data, trace, time)
# self._saveSnip(index_sweep, snippet, 'filtered')
return data
def all_times(self, list_sweep=None, groupbysweep=False, keepMasked = False):
out = []
if list_sweep is None:
list_sweep = xrange(self.numSweeps())
if not keepMasked:
mask = self._mask()
for i, sweep in enumerate(list_sweep):
data = self._spikeTimes(sweep)
if not keepMasked:
b0, b1 = self._sweepBorder(sweep)
data = data[mask[b0: b1]]
out.append(data)
if not groupbysweep:
out = flatenList(out)
return out
def ISI(self,list_sweep=None, groupbysweep=False, keepMasked = False):
out = []
if list_sweep is None:
list_sweep = xrange(self.numSweeps())
if not keepMasked:
mask = self._mask()
for i, sweep in enumerate(list_sweep):
data = self._spikeTimes(sweep)
if not keepMasked:
b0, b1 = self._sweepBorder(i)
data = data[mask[b0: b1]]
out.append(np.diff(data))
if not groupbysweep:
out = flatenList(out)
return out
def time_list(self, index_sweep, keepMasked = False):
sp_times = self._spikeTimes(index_sweep)
if not keepMasked:
b0, b1 = self._sweepBorder(index_sweep)
mask = self._mask()[b0:b1]
sp_times = sp_times[mask]
name = 'SpikesOfSweep'+str(index_sweep)
origin = self.in_origin(index_sweep)+ [str(self.get_param('chan'))]
out = Time_list.Time_list(name, sp_times, origin, SweepIndex = index_sweep,
nodeOfSweep = self, title = 'NoTitle', units
= 's')
out.tag = self.tag(index_sweep)
return out
def findOriginFromIndex(self, index, keepMasked = False):
"""return the index of the sweep and the spike in that sweep
corresponding to spike index"""
tempind = 0
func = self._spikeTimes if keepMasked else self.time_list
if not keepMasked:
for i in xrange(self.in_numSweeps()):
length = len(self.time_list(i))
if index >= (tempind + length):
#if it's not in this sweep
tempind += length #note: add only the # of valid spikes
else:
index_trace = i
index_time_list = index - tempind
# index_time_list is the index of the spike in the time_list
# with all its spike (even if partialTraces is Ignore)
return index_trace, index_time_list
raise ValueError('I haven\'t found snippet %s'%index)
else:
borders = self._borders()
sortedSecondIndex = [borders[i][1] for i in range(self.in_numSweeps())]
index_trace = np.searchsorted(sortedSecondIndex, index)
if index_trace == 0:
return index_trace, index
if index_trace >= len(sortedSecondIndex):
raise ValueError('I haven\'t found snippet %s'%index)
index_time_list = index - sortedSecondIndex[index_trace - 1]
def _sweepBorder(self, index, borderbefore = None):
"""index of first and last spikes of sweep in numSpikes"""
if not self._cache.has_key('borders'): self._cache['borders'] = {}
bord = self._cache['borders']
out = bord.get(index)
if out is not None:
return out
if borderbefore is None:
out = (self.findSpikeFromSweep(index, True),
self.findSpikeFromSweep(index+1, True))
else:
out = (borderbefore[1], self._spikeTimes(index).size+borderbefore[1])
bord[index] = out
return out
def _borders(self):
borderbefore = None
for i in range(self.numSweeps()):
borderbefore = self._sweepBorder(i, borderbefore)
return self.get_cache('borders')
def numSpikes(self, list_sweep = None, keepMasked = False, verbose =1):
"""Count the number of spikes in list_sweep.
if list_sweep is None, count on all the sweeps,
if countMaskedSpikes, count the spikes before applying filter"""
if list_sweep is None:
list_sweep = xrange(self.numSweeps())
elif not list_sweep:
return 0
if not hasattr(list_sweep, '__iter__'):
list_sweep = [int(list_sweep)]
if keepMasked:
iterator = imap(self._spikeTimes, list_sweep)
else:
iterator = imap(self.time_list, list_sweep, repeat(False))
if verbose:
print 'numSpikes in %s:'%self.name
pbar = pgb.ProgressBar(maxval=len(list_sweep),
term_width = 79).start()
n=0
nspikes=len(iterator.next())
for i in iterator:
n+=1
nspikes += len(i)
if verbose: pbar.update(n)
if verbose: pbar.finish()
return nspikes
def findSpikeFromSweep(self, index, keepMasked = False):
return self.numSpikes(range(index), keepMasked, verbose = 0)
def _extract_snippet(self, sp_times, trace, time):
if not sp_times.size:
return np.array([])
win0, win1 = self.get_param('snip_window')
beg = time.searchsorted(sp_times + win0)
end = time.searchsorted(sp_times + win1)
midl = time.searchsorted(sp_times)
length = int(np.ceil((win1-win0)/(time[1]-time[0])))
# ceil and have the smallest window that include totally the interval
out = np.zeros((len(beg), length), dtype = trace.dtype)
for i ,(s, b,e) in enumerate(zip(midl, beg, end)):
if e - b == length:
out[i] = trace[b:e]
else:
border0 = int(length/2)
if s - b > border0 or border0+e -s > length:
raise ValueError('ca bug')
out[i, border0 - (s - b): border0 + (e-s)] = trace[b:e]
out[i, :border0 - (s - b)]=trace[b]
out[i, border0 + (e-s):] = trace[max(0,e-1)]
#minus 1 cause e can be len(trace)
return out
def _saveSnip(self, index_sweep, snippet, sniptype):
snip_memory = self.get_param('snip_memory')
if snip_memory[0] != 'store':
return
if snip_memory[1] != 'all':
saved = self.get_cache('snippet_'+sniptype)
if saved is None: saved = {}
if saved.has_key(index_sweep):
return
saved_index = self.get_cache('snippet_index_'+sniptype)
if saved_index is None: saved_index = []
if isinstance(snip_memory[1], int):
while len(saved_index) >= snip_memory:
first = saved_index.pop(0)
saved.pop(first)
elif snip_memory[1] != 'all':
return
saved_index.append(index_sweep)
saved[index_sweep] = snippet
self.set_cache('snippet_'+sniptype, saved, force =1)
self.set_cache('snippet_index_'+sniptype, saved_index,
force =1)
else:
if not self._cache.has_key('snippet_'+sniptype):
out = np.zeros((self.numSpikes(keepMasked = True),
snippet.shape[1]),
dtype = snippet.dtype)
self._cache['snippet_'+sniptype] = out
if not self._cache.has_key('snippet_index_'+sniptype):
out = np.zeros(self.numSpikes(keepMasked=True), dtype = 'bool')
self._cache['snippet_index_'+sniptype] = out
b0, b1 = self._sweepBorder(index_sweep)
self._cache['snippet_'+sniptype][b0:b1,:] = snippet
self._cache['snippet_index_'+sniptype][b0:b1] = np.ones(b1-b0,
dtype = 'bool')
def _extract_one_sweep(self, index_sweep, sniptype):
time = self._get_time(index_sweep)
spike = self._spikeTimes(index_sweep)
if not spike.size:
return None
sweep = self._get_input_sweep(index_sweep)
sweep = sweep._data[1] if sweep._data.shape[0]>1 else sweep._data[0]
if sniptype == 'filtered':
sweep = self._ready_trace(sweep, time)
elif sniptype != 'raw':
raise ValueError('Unknown sniptype: %s'%sniptype)
snip = self._extract_snippet(spike, sweep, time)
return snip
def _extract_all_snippet(self, sniptype):
print 'extracting %s snippets in %s'%(sniptype, self.name)
pbar = pgb.ProgressBar(maxval=self.numSweeps(), term_width = 79).start()
for index_sweep in range(self.in_numSweeps()):
snip = self._extract_one_sweep(index_sweep, sniptype)
if snip is None:
continue
self._saveSnip(index_sweep, snip, sniptype)
pbar.update(index_sweep)
pbar.finish()
def _getSnip(self, listindex, sniptype):
if not isinstance(listindex, list):
listindex = [int(listindex)]
snip_memory = self.get_param('snip_memory')
if snip_memory is not None and snip_memory[0] == 'store':
if snip_memory[1] == 'last_sweep':
arg_sort = np.argsort(listindex)
saved_ind = self.get_cache('last_sweep_snip_ind_'+sniptype)
saved = self.get_cache('last_sweep_snip_'+sniptype)
if saved is None or saved_ind is None:
saved = []
saved_ind = []
out = []
for arg in arg_sort:
try:
ind = saved_ind.index(listindex[arg_sort])
out.append(saved[ind])
except ValueError:
index_sweep, indSpinSw = self.findOriginFromIndex(listindex[arg_sort], keepMasked = 1)
time = self._get_time(index_sweep)
spike = self._spikeTimes(index_sweep)
sweep = self._get_input_sweep(index_sweep)
sweep = sweep._data[1] if sweep._data.shape[0]>1 else sweep._data[0]
if sniptype == 'filtered':
sweep = self._ready_trace(sweep, time)
snip = self._extract_snippet(spike, sweep, time)
snipinds = np.arange(*self._borders()[index_sweep])
self.set_cache('last_sweep_snip_ind_'+sniptype, snipinds)
self.set_cache('last_sweep_snip_'+sniptype, snip)
out.append[snip[indSpinSw]]
return out
# out = {}
# sweep_saved = self.get_cache('snippet_'+sniptype)
# if not sweep_saved is None:
# for i in listindex:
# swind, spind = self.findOriginFromIndex(i)
# not_saved = []
# for i in list_index:
# sweep
# saved_ind =[i for i in listindex if i in sweep_saved]
elif snip_memory[1] == "all":
inds = self.get_cache('snippet_index_'+sniptype)
if inds is None or any([not inds[i] for i in listindex]):
self._extract_all_snippet(sniptype)
return np.array(self._cache['snippet_'+sniptype][listindex,:])
else:
raise NotImplementedError()
elif snip_memory is not None:
raise NotImplementedError()
out = None
for index_snip in listindex:
index_sweep, indSpinSw = self.findOriginFromIndex(index_snip, keepMasked = 1)
time = self._get_time(index_sweep)
spike = self._spikeTimes(index_sweep)
if not spike.size:
continue
sweep = self._get_input_sweep(index_sweep)
sweep = sweep._data[1] if sweep._data.shape[0]>1 else sweep._data[0]
if sniptype == 'filtered':
sweep = self._ready_trace(sweep, time)
elif sniptype != 'raw':
raise ValueError('Unknown sniptype: %s'%sniptype)
snip = self._extract_snippet(spike, sweep, time)
if out is None:
out = snip[indSpinSw]
else:
out = np.vstack((out, snip[indSpinSw]))
return snip
# not saved
# indices = [self.findOriginFromIndex(i) for i in listindex]
# time = self._get_time(index_sweep)
# time_list =self._time_list(index_sweep)
# spike = time_list._data
# sweep = self._get_input_sweep(index_sweep, dtype =self.get_param(
# 'dtype'))._data[1]
# if sniptype == 'filtered':
# sweep = self._ready_trace(sweep, time)
# elif sniptype != 'raw':
# raise ValueError('Unknown sniptype: %s'%sniptype)
# snip = self._extract_snippet(spike, sweep, time)
# self._saveSnip(index_sweep, snip, sniptype)
def _value_around_pic(self, list_index, props, sniptype):
snip = self._getSnip(list_index, sniptype)
if not isinstance(props, list):
props = [props]
out = np.zeros((len(props),snip.shape[0]), dtype = 'float')
for i, v in enumerate(props):
func = getattr(np, v)
if hasattr(func, '__call__'):
val = func(snip, axis = 1)
else:
print 'Prop is not callable, it might not be what you wanted'
out[i] = val
return out
def PCA(self, sniptype):
if not use_mdp:
print 'mdp is not installed'
return
if self._cache.has_key('PCA_'+sniptype):
return self.get_cache('PCA_'+sniptype)
all_props = self.get_param('props')
PCAnode = mdp.nodes.PCANode(input_dim=len(all_props), output_dim=len(all_props)-1)
arr = self._getFilterArray(sniptype)
PCAnode.train(arr)
out = PCAnode(arr)
self.set_cache('PCA_'+sniptype, out)
return out
def _getFilterArray(self, sniptype, list_sweep = None):
if not self._cache.has_key('properties'):
self._cache['properties'] = {}
if self._cache['properties'].has_key(sniptype):
return self.get_cache('properties')[sniptype]
props = copy(self.get_param('props'))
wasNone = False
if list_sweep is None:
wasNone = True
list_sweep = range(self.numSweeps())
elif not isinstance(list_sweep, list):
list_sweep = list(list_sweep)
out = np.zeros((self.numSpikes(list_sweep, keepMasked = True),
len(props)), dtype = 'float')
print 'getting filter array for %s in %s'%(sniptype, self.name)
inds = range(len(props))
try:
indSw = props.index('sw_ind')
props.pop(indSw)
inds.pop(indSw)
N = 0
for i in list_sweep:
n = len(self._spikeTimes(i))
out[N:N+n, indSw] = np.ones(n, dtype = 'float')*i
N+=n
except ValueError:
print 'sw_ind not in prop'
pass
if wasNone:
out[:,inds] = self._value_around_pic(range(out.shape[0]), props, sniptype).T
else:
pbar = pgb.ProgressBar(maxval=len(list_sweep), term_width = 79).start()
for i in list_sweep:
b0, b1 = self._sweepBorder(i)
if b0 != b1:
data = self._value_around_pic(range(b0,b1), props, sniptype)
out[b0:b1,inds] = data.T
pbar.update(i)
self._cache['properties'][sniptype] = out
return out[:]
def _mask(self):
if self._cache.has_key('mask'):
return self._cache['mask']
mask = np.ones(self.numSpikes(keepMasked = True), dtype = bool)
Filter = self.get_param('filter')
if Filter:
for sniptype, prop, comp, value in Filter:
val = np.array(self._getDataToPlot(keepMasked=True, prop=prop,
sniptype=sniptype))
val = filterValues(val, comp, value)
mask = np.logical_and(mask, val)
if self._cache.has_key('lasso'):
Lasso = self.get_cache('lasso')
if Lasso:
for ms in Lasso.values():
mask = np.logical_and(mask,ms)
self._cache['mask'] = mask
return mask
def numSweeps(self):
'''return the number of sweeps'''
return self.in_numSweeps()
def chanNames(self, index = 0):
'''return the name of the channel used for the detection'''
return [self.get_param('chan')]
def origin(self, index):
return self.in_origin(index)
def filteredSweep(self, index_sweep, chan = None):
'''return the trace on wich the detection is done'''
sweep = self._get_input_sweep(index_sweep)
time = self._get_time(index_sweep)
swdata = sweep._data[1] if sweep._data.shape[0]>1 else sweep._data[0]
data = np.array(self._ready_trace(swdata, time), dtype = 'float')
chinf = [getattr(sweep, cname) for cname in sweep.chanNames()]
out = Sweep.Sweep(sweep.name+'_filtered', np.vstack((time, data)), chinf,self.tag(index_sweep))
return out
def snippet_chanNames(self, index = 0):
return [self.get_param('chan')+i for i in ['_raw', '_filtered']]
def snippet_origin(self, index, keepMasked = False):
ind_sw, ind_sp = self.findOriginFromIndex(index, keepMasked = keepMasked)
return self.in_origin(ind_sw)+ ['Spike_'+str(ind_sp)]
def snippet_sweepInfo(self, index, keepMasked = False):
if not keepMasked:
index = self._findNotMaskedFromMaskedIndex(index)
sw_ind, sp_ind = self.findOriginFromIndex(index, keepMasked = True)
sw_inf = self.sweepInfo(sw_ind)
sw_inf.numChans = 2
chInf = [copy(sw_inf.channelInfo[0]) for i in (0,1)]
chInf[0].name = chInf[0].name + '_raw'
chInf[1].name = chInf[1].name + '_filtered'
sw_inf.channelInfo = chInf
dt = sw_inf.dt
win = self.get_param('snip_window')
sw_inf.numPoints = int((win[1] - win[0])/dt)
sw_inf.tend = sw_inf.numPoints
sw_inf.t0 = 0
sw_inf.dt = 1
return sw_inf
def snippet(self, index, chan = None, keepMasked = False):
"""return a snippet
Arguments:
- `index`:
- `chan`:
"""
if not keepMasked:
index = self._findNotMaskedFromMaskedIndex(index)
sw_ind, sp_ind = self.findOriginFromIndex(index, keepMasked = True)
snipRaw = self._getSnip(index, 'raw')[0]
snipFiltered = self._getSnip(index, 'filtered')[0]
data = np.zeros((3, snipRaw.size), dtype = snipRaw.dtype)
data[0] = np.arange(data.shape[1])
data[1] = snipRaw
data[2] = snipFiltered
snipinf = self.snippet_sweepInfo(index, keepMasked)
return Sweep.Sweep('Snippet_'+str(index)+'in_'+self.name, data,
snipinf.channelInfo, tag = self.tag(sw_ind))
def snip_tag(self, index, keepMasked = False):
'''Return the tags of sweep or time_list'''
if not keepMasked:
index = self._findNotMaskedFromMaskedIndex(index)
sw_ind, sp_ind = self.findOriginFromIndex(index)
return self.in_tag(sw_ind)
def tag(self, index):
'''Return the tags of sweep or time_list'''
return self.in_tag(index)
def sweepInfo(self, index):
sw_inf = self.in_sweepInfo(index)
cname = self.get_param('chan')
ind = [i.name for i in sw_inf.channelInfo].index(cname)
sw_inf.channelInfo = [sw_inf.channelInfo[ind]]
sw_inf.numChans = 1
return sw_inf
def _findNotMaskedFromMaskedIndex(self, maskedIndex):
mask = self._mask()
index = np.arange(mask.size)
return index[mask][maskedIndex]
def _get_input_sweep(self, sw_ind, *args, **kwargs):
last = self.get_cache('last')
if last is None or last[0] != sw_ind:
if not kwargs.has_key('chan') and not args:
kwargs['chan'] = self.get_param('chan')
sw = self.in_sweep(sw_ind, *args, **kwargs)
self.set_cache('last', (sw_ind, sw), force = 1)
return copy(sw)
return copy(last[1])
def _get_time(self, index_sweep):
lasttime = self.get_cache('lasttime')
if lasttime is None or lasttime[0] != index_sweep:
node, out = self.inputs['in_sweep']
time = self.in_sweep(index_sweep, self.get_param('chan'))._data[0]
if time.dtype == np.dtype('int16'):
# no time line, need to create time, assume that dt
# is constant on the sweep, does it matter?
swinf = self.sweepInfo(index_sweep)
time = np.arange(swinf.t0, swinf.tend, swinf.dt,dtype = 'float')
self.set_cache('lasttime', (index_sweep, time), force =1)
return copy(time)
return copy(lasttime[1])
def save(self, what = None, path = None, force = 0):
if what is None:
what = ['SpikeTimes', 'Border', 'Mask', 'Prop', 'Lasso']
for i in what:
getattr(self, 'save'+i)(force = force, name = path)
def saveSpikeTimes(self, name = None, force = 0, mode = 'bn', delimiter =
',', keepMasked = True):
'''Save spike times in file 'name' (can only save ALL the spike times at
once)
'name' is absolute or relative to parent.home
if 'force', replace existing file
'mode' can be 'bn', 'csv', 'txt' or 'vert_csv':
'bn': binary, saved in .npz
'csv' or 'txt': text file, value separeted by 'delimiter' (default
',') saved in lines
'vert_csv': text file, value and separeted by 'delimiter' (default
',') saved in columns '''
import os
path = name
if path is None:
path = self.parent.name+'_'+self.name+'_spikeTimes'
if path[0] != '/':
path = self.parent.home + path
data = self.all_times(keepMasked = True, groupbysweep = True)
if mode == 'bn':
path += '.npz'
if not force:
if os.path.isfile(path):
print 'File %s already exist, change name or force'%path
return
kwargs = {}
for i, value in enumerate(data):
kwargs['Sw_'+str(i)] = value
np.savez(path, **kwargs)
elif mode == 'vert_csv':
path += '_vertical.csv'
if not force:
if os.path.isfile(path):
print 'File %s already exist, change name or force'%path
return
nspike = self.numSpikes()
out = file(path, 'w')
totspike = 0
index_spike = 0
while totspike < nspike:
for index_sweep in range(self.numSweeps()):
timelist = self.time_list(index_sweep)
if len(timelist) > index_spike:
out.write(str(timelist._data[index_spike]))
totspike+=1
out.write(str(delimiter))
out.write('\n')
index_spike += 1
totspike += 1
out.close()
elif mode == 'csv' or mode =='txt':
path += '.'+mode
if not force:
if os.path.isfile(path):
print 'File %s already exist, change name or force'%path
return
out = file(path, 'w')
for line in data:
out.write(delimiter.join(np.array(line, dtype = 'str'))+'\n')
out.close()
else:
print 'unknown mode %s'%mode
def saveBorder(self, name = None, force =0):
import os
if name is None:
name = self.parent.name+'_'+self.name+'_borders'
if name[0] != '/':
path = self.parent.home + name
data = self._borders()
outdata = np.zeros((self.numSweeps(), 2), dtype = 'int')
for i, v in data.iteritems():
outdata[i] = v
path += '.npy'
if not force:
if os.path.isfile(path):
print 'File %s already exist, change name or force'%path
return
np.save(path, outdata)
def saveMask(self, name = None, force = 0):
if name is None:
name = self.parent.name+'_'+self.name+'_mask'
if name[0] != '/':
path = self.parent.home + name
path += '.npy'
if not force:
if os.path.isfile(path):
print 'File %s already exist, change name or force'%path
return
data = self._mask()
np.save(path, data)
def saveLasso(self, name = None, force = 0):
if name is None:
name = self.parent.name+'_'+self.name+'_lasso'
if name[0] != '/':
path = self.parent.home + name
path += '.npz'
if not force:
if os.path.isfile(path):
print 'File %s already exist, change name or force'%path
return
data = self._lasso()
if data:
np.save(path, data)
def saveProp(self, sniptype = ['raw', 'filtered'], name = None, force = 0):
if not isinstance(sniptype, list):
sniptype = [str(sniptype)]
if name is None:
name = self.parent.name+'_'+self.name+'_prop'+'_'
if name[0] != '/':
path = self.parent.home + name
for sntp in sniptype:
p = path+sntp+ '.npy'
if not force:
if os.path.isfile(p):
print 'File %s already exist, change name or force'%p
continue
data = self._getFilterArray(sntp)
np.save(p, data)
def set_param(self, *args, **kwargs):
if args:
if len(args) == 2:
kwargs[args[0]] = args[1]
else:
raise ValueError('set_param accept 0 or 2 positionnal arguments')
for val in ['filter', 'props', 'lasso']:
if kwargs.has_key('filter'):
self.dirty('all', selfDirty = False)
self._params['filter'] = kwargs.pop('filter')
if self._cache.has_key('mask'):
self._cache.pop('mask')
if kwargs.has_key('lasso'):
self._params['lasso'] = kwargs.pop('lasso')
if self._cache.has_key('mask'):
self._cache.pop('mask')
for wn in ['win'+str(i) for i in [0,1,2]]:
if kwargs.has_key(wn):
if kwargs[wn] is None:
kwargs[wn]=1e-5
if not kwargs:
return
# topop = ['snippet_indexraw', 'snippet_indexfiltered', 'snippetraw',
# 'snippetfiltered']
# [self._cache.pop(i) for i in topop if self._cache.has_key(i)]
return super(DetectSpike, self).set_param(**kwargs)
def load(self, force = 0):
self.loadSpikes(force= force)
self.loadMask(force= force)
self.loadBorders(force= force)
try:
self.loadProp(force= force)
self.loadLasso(force= force)
except Exception:
print 'Could load only spike times and mask'
def loadSpikes(self, path = None, force = 0):
'''load spikes from a .npz file
if memory is write, just load the path of the file
if memory is store, store spike times from the file in cache'''
if path is None:
path = self.parent.home + self.parent.name + '_' + self.name + \
'_spikeTimes.npz'
self.set_cache('sp_times', {}, force = force)
cached = self._cache['sp_times']
if self.get_param('memory') == 'store':
File = np.load(path)
for name in File.files:
cached[int(name[name.rfind('_')+1:])]= File[name]
elif self.get_param('memory') == 'write':
self.set_cache('path', path)
def loadBorders(self, path = None, force = 0):
'''load spikes from a .npz file or a .npy
if memory is write, just load the path of the file
if memory is store, store spike times from the file in cache'''
if path is None:
path = self.parent.home + self.parent.name + '_' + self.name + \
'_borders'
if os.path.isfile(path+'.npy'):
path+='.npy'
elif os.path.isfile(path+'.npz'):
path+='.npz'
else:
raise IOError('no file with npy or npz extension on this path:\n%s'%
path)
self.set_cache('borders', {}, force = force)
cached = self._cache['borders']
if self.get_param('memory') == 'store':
File = np.load(path)
if path.split('.')[-1] == 'npz':
for name in File.files:
cached[int(name[name.rfind('_')+1:])]= File[name]
else:
for i, line in enumerate(File):
cached[i] = line
else:
raise NotImplementedError()
def loadProp(self, sniptype = ['raw', 'filtered'], path = None, force=0):
if path is None:
path = self.parent.home + self.parent.name + '_' + self.name + \
'_prop_'
if not isinstance(sniptype, list):
sniptype = [str(sniptype)]
for sntp in sniptype:
p = path + sntp+'.npy'
File = np.load(p)
if not self._cache.has_key('properties'):
self._cache['properties'] = {}
self._cache['properties'][sntp] = File
def loadMask(self, path = None, force =0):
if path is None:
path = self.parent.home + self.parent.name + '_' + self.name + \
'_mask'+'.npy'
File = np.load(path)
self._cache['mask'] = File
def loadLasso(self, path = None, force =0):
import os
if path is None:
path = self.parent.home + self.parent.name + '_' + self.name + \
'_lasso'+'.npz'
if os.path.isfile(path):
File = np.load(path)
self._cache['lasso'] = dict([(i, File[i]) for i in File.files])
def all_val(self, sniptype, list_sweep = None, groupbysweep = False,
keepMasked = False):
if list_sweep is None:
list_sweep = range(self.numSweeps())
def _getDataToPlot(self, prop, sniptype, keepMasked):
if prop.split('_')[0] == 'PCA':
if not use_mdp:
print 'mdp is not installed'
return
indPCA = prop.split('_')[1]
data= self.PCA(sniptype=sniptype)[:,indPCA]
else:
props = self.get_param('props')
indprop = props.index(prop)
data = self._getFilterArray(sniptype)[:,indprop]
if not keepMasked:
mask = self._mask()
data = data[mask]
return data
def prop_hist(self, fig, prop = 'props', sniptype = 'raw',
keepMasked = False, **kwargs):
fig.clear()
ax = fig.add_subplot(111)
if prop == 'props':
data = self._getFilterArray(sniptype)
if not keepMasked:
data = data[self._mask(),:]
labels = self.get_param('props')
elif prop == 'PCA':
if not use_mdp:
print 'mdp is not installed'
return
labels = ['PCA_'+str(i) for i in range(len(self.get_param('props')))]
data = self.PCA(sniptype)
if not keepMasked:
data = data[self._mask(),:]
else:
labels = prop
data = None
for i,p in enumerate(prop):
out = self._getDataToPlot(p, sniptype, keepMasked)
if data is None:
data = np.zeros((out.size, len(prop)))
data[:,i] = out
out = ax.hist(data, label = labels, **kwargs)
fig.canvas.draw()
return out
def select_snip(self, sniptype, prop, comp, value, keepMasked = True):
allsnip = self._getSnip(range(self.numSpikes(keepMasked = True)),
sniptype)
p = self._getDataToPlot(prop, sniptype, keepMasked)
toKeep = p >= value
if not comp:
toKeep = np.invert(toKeep)
return allsnip[toKeep,:]
def plot_selectedsnip(self, fig, sniptype, prop=None, comp=None, value=
None, keepMasked = True, maxnum = 5000, **kwargs):
if any([i is None for i in [prop, comp, value]]):
snip = self._getSnip(range(self.numSpikes(keepMasked = True)),
sniptype)
if not keepMasked:
snip = snip[self._mask(),:]
else:
snip = self.select_snip(sniptype,prop,comp, value, keepMasked)
totnum = snip.shape[0]
fig.clear()
ax = fig.add_subplot(111)
if snip.shape[0]> maxnum:
snip = snip[:maxnum,:]
ax.plot(snip.T, **kwargs)
mean = snip.mean(axis = 0)
ax.plot(mean, 'r')
ax.set_title('%s snippets of %s\n(%s/%s plotted)'%(sniptype, self.name,
snip.shape[0],totnum
))
fig.canvas.draw()
return snip, mean
def prop_plot(self, figure, propx= 'min', propy = 'max', sniptype = 'raw',
clear = True, keepMasked = False, **kwargs):
print 'ploting properties in %s'%self.name
self._fig = figure
self._sniptype = sniptype
self._keepMasked = keepMasked
if clear: self._fig.clear()
ax = self._fig.add_subplot(111)
ax.set_xlabel(propx)
ax.set_ylabel(propy)
X = self._getDataToPlot(propx, sniptype, keepMasked)
Y = self._getDataToPlot(propy, sniptype, keepMasked)
if not kwargs.has_key('marker'):
kwargs['marker']='.'
ax.plot(X, Y, 'k',ls = '', picker = 5, label = '_nolegend_',
**kwargs)
ax.set_title('Properties of spikes from %s \n%s spikes plotted'%(
self.name, X.size))
self._fig.canvas.mpl_connect('pick_event', self._picked)
self._last_event = None
self._fig.canvas.draw()
self._temp = None
return X, Y
def _picked(self, event):
if self._last_event is not None:
if self._last_event.mouseevent is event.mouseevent: return
line = self._last_event.artist
line.set_mfc('k')
line.set_zorder(1)
self._last_event = event
line = self._last_event.artist
line.set_zorder(0)
i = event.ind[0]
x = np.array([line.get_xdata()[i]])
y = np.array([line.get_ydata()[i]])
if self._temp is None:
self._temp, = self._fig.axes[0].plot(x, y, 'yo', ms = 10,
alpha = .5)
self._temp.set_zorder(2)
self._temp.set_label('Spike %s'%i)
else:
self._temp.set_xdata(x)
self._temp.set_ydata(y)
self._temp.set_label('Spike %s'%i)
self._fig.axes[0].legend((line, self._temp),(line.get_label(),
self._temp.get_label()), loc = 2)
self._fig.canvas.draw()
ax = self._fig.add_axes([0.6,0.6,0.25,0.25], facecolor = 'none')
ax.clear()
index_sweep, index_spike = self.findOriginFromIndex(i, keepMasked =
self._keepMasked)
ax.set_title('Spike %s (%s in sweep %s)'%(i, index_spike, index_sweep))
ax.set_xlabel('index')
ax.set_ylabel(self._sniptype + ' snippet')
if not self._keepMasked:
i = self._findNotMaskedFromMaskedIndex(i)
snip = self._getSnip(i, self._sniptype)
line = ax.plot(snip.T, 'k')
self._fig.canvas.draw()
def multi_prop_plot(self, fig, prop, sniptype = 'raw', keepMasked = False,
**kwargs):
"""Plot all prop[i] vs prop[j] combination in one figure
Use prop = PCA to plot all PCA components
prop = props to plot all other properties"""
if prop == 'PCA':
prop = ['PCA_'+str(i) for i in range(len(self.get_param("props"))-2)]
elif prop == 'props':
prop = self.get_param('props')
size = len(prop)
fig.clear()
data = [self._getDataToPlot(keepMasked=keepMasked,sniptype=sniptype, prop = i) for i in prop]
axes =[]
for line in range(1,size):
datay = data[line]
axes.append([fig.add_subplot(size-1,size-1,(line-1)*(size-1)+1+i) for i in range(line)])
[ax.plot(data[j],datay, **kwargs) for j,ax in enumerate(axes[-1])]
[ax[0].set_ylabel(prop[i+1]) for i, ax in enumerate(axes)]
[ax.set_xlabel(prop[i]) for i, ax in enumerate(axes[-1])]
fig.canvas.draw()
return fig
def _lasso(self):
if not self._cache.has_key('lasso'):
self._cache['lasso'] = {}
return self._cache['lasso']
def lasso_prop_plot(self, figure, propx= 'min', propy = 'max', sniptype = 'raw',
clear = True, keepMasked = False, **kwargs):
print 'ploting properties in %s'%self.name
if clear: figure.clear()
ax = figure.add_subplot(111)
ax.set_xlabel(propx)
ax.set_ylabel(propy)
X = self._getDataToPlot(propx, sniptype, keepMasked)
Y = self._getDataToPlot(propy, sniptype, keepMasked)
self._lassoMask = self._mask()
self._lassoManager = LassoManager(ax, np.vstack((X,Y)).T, sizes = (5,), **kwargs)
ax.set_xlim(X.min(),X.max())
ax.set_ylim(Y.min(),Y.max())
figure.canvas.draw()
return X, Y
def keep_in_lasso(self, name = None):
isinside = self._lassoManager.isinside
mask = np.array(self._lassoMask)
mask[mask] = np.logical_and(mask[mask], isinside)
Lasso = self._lasso()
if name is None:
n = 0
while 'lasso_%s'%n in Lasso.keys():
n+=1
name = 'lasso_%s'%n
self._cache['lasso'][name] = mask
if self._cache.has_key('mask'):
self.set_cache('mask', np.logical_and(self._mask(), mask), force = 1)
return mask
def exlude_lasso(self, name =None):
isinside = self._lassoManager.isinside
mask = np.array(self._lassoMask)
mask[mask] = np.logical_and(mask[mask], np.logical_not(isinside))
Lasso = self._lasso()
if name is None:
n = 0
while 'lasso_%s'%n in Lasso.keys():
n+=1
name = 'lasso_%s'%n
self._cache['lasso'][name] = mask
if self._cache.has_key('mask'):
self.set_cache('mask', np.logical_and(self._mask(), mask), force = 1)
return mask
class CreateUniv:
def __init__(self, node):
self.node = node
def __call__(self):
a = self.node.in_chanNames()
a.append('None')
return a
|
python
|
import json
import codecs
import tldextract
urls = dict()
duplicates = list()
with codecs.open('/home/rkapoor/Documents/ISI/data/Network/intersecting-urls.jsonl', 'r', 'utf-8') as f:
for line in f:
doc = json.loads(line)
url = doc['url']
if url in urls:
urls[url] += 1
else:
urls[url] = 1
# if url == 'http://flint.backpage.com/FemaleEscorts/unforgettable-new-staff-new-attitude/17626747':
# duplicates.append(doc['name'])
# for key, value in sorted(urls.items(), key=lambda x:x[1]):
# if value > 10:
# print("%s: %s" % (key, value))
# print("SIZE:",len(urls))
# # for key, value in urls.items():
# # if(value > 1):
# # print(key,":",value)
# print(duplicates)
DATA_FILE = "/home/rkapoor/Documents/ISI/data/DIG-Nov-Eval/gt-v02-all.jl"
def safe_copy_simple(json_from, json_to, field):
if field in json_from and json_from[field] is not None:
json_to[field] = json_from[field]
def safe_copy(json_from, json_to, field):
try:
if field in json_from and json_from[field] is not None:
distinct_values = set()
for values in json_from[field]:
results = values['result']
if type(results) is list:
for result in results:
distinct_values.add(result['value'])
elif 'value' in results:
distinct_values.add(results['value'])
json_to[field] = list(distinct_values)
except Exception:
print(json_from[field])
def extract_data(json_document, outfile):
extracted_document = {}
extracted_document['high_precision'] = {}
extracted_document['high_recall'] = {}
safe_copy_simple(json_document, extracted_document, 'url')
if 'high_precision' in json_document:
safe_copy(json_document['high_precision'], extracted_document['high_precision'], 'city')
safe_copy(json_document['high_precision'], extracted_document['high_precision'], 'name')
safe_copy(json_document['high_precision'], extracted_document['high_precision'], 'phone')
if 'high_recall' in json_document:
safe_copy(json_document['high_recall'], extracted_document['high_recall'], 'city')
safe_copy(json_document['high_recall'], extracted_document['high_recall'], 'name')
safe_copy(json_document['high_recall'], extracted_document['high_recall'], 'phone')
outfile.write(json.dumps(extracted_document))
outfile.write('\n')
output_file_base = "intersecting.jl"
count = 0
domain = 'backpage.com'
outfile = codecs.open(domain+'/'+output_file_base, 'w', 'utf-8')
with codecs.open(DATA_FILE, 'r', 'utf-8') as infile:
for line in infile:
count += 1
json_document = json.loads(line)
if json_document['url'] in urls:
extract_data(json_document, outfile)
if(count % 100 == 0):
print(count)
outfile.close()
|
python
|
import subprocess
from uuid import uuid1
import yaml
from jinja2 import Environment, PackageLoader
from sanetrain.workflow_builder import generate_training
def test_generate_training():
env = Environment(loader=PackageLoader('sanetrain', 'templates'))
template = env.get_template('test_template.py')
with open('tests/test_model.yaml') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
train_script = generate_training(template, config)
fname = 'tests/%s.py' % uuid1().hex
with open(fname, 'w+') as fout:
fout.write(train_script)
subprocess.run(["python", fname])
|
python
|
#!/usr/bin/python
import time
fact_arr = [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880]
class memoize:
def __init__(self, function):
self.function = function
self.memoized = {}
def __call__(self, *args):
try:
return self.memoized[args[0]]
except KeyError:
self.memoized[args[0]] = self.function(*args)
return self.memoized[args[0]]
def fact(n):
if n in (0, 1):
return 1
return reduce(lambda x,y: x*y, xrange(2, n+1))
def sumofact(n):
total = 0
while n > 0:
total += fact_arr[n%10]
n /= 10
return total
#total = 0
#for d in str(n):
# total += fact(int(d))
#return total
#return reduce(lambda x,y: fact(int(x))+fact(int(y)), str(n))
@memoize
def lochelper(n, s):
if n in s:
return 0
s.add(n)
return 1 + lochelper(sumofact(n), s)
def lengthochain(n):
return lochelper(n, set([]))
if __name__ == '__main__':
#t = time.clock()
count = 0
for i in xrange(1000000):
if lengthochain(i) == 60:
count += 1
print count
#print time.clock()-t
|
python
|
#! /usr/bin/env python
def condense(w):
return w[0] + str(len(w)-2) + w[-1:]
def expand(w):
length = int(w[1:-1]) + 2
for word in get_words(length):
if word.startswith(w[0]) and word.endswith(w[-1:]):
print word
def get_words(length, filename = '/usr/share/dict/words'):
return (word.strip() for word in open(filename) if len(word) == length)
if __name__ == "__main__":
print "Words With Numbers In Them Thing"
while(True):
w = raw_input("Word: ")
print "Condensed: "
print ' '.join(condense(p) for p in w.split())
try:
print "Expanded: "
expand(w)
except:
print "Could not expand " + w
|
python
|
#
#
# Use: genKey(5)
# => "xmckl"
#
#
import math, random
def genKey(n):
alphabet = list("abcdefghijklmnopqrstuvwxyz")
out = ""
for i in range(n):
out += alphabet[math.floor(random.randint(0, 25))]
return out
|
python
|
linha1 = input().split(" ")
linha2 = input().split(" ")
cod1, qtde1, valor1 = linha1
cod2, qtde2, valor2 = linha2
total = (int(qtde1) * float(valor1)) + (int(qtde2) * float(valor2))
print("VALOR A PAGAR: R$ %0.2f" %total)
|
python
|
# https://github.com/dannysteenman/aws-toolbox
#
# License: MIT
#
# This script will set a CloudWatch Logs Retention Policy to x number of days for all log groups in the region that you exported in your cli.
import argparse
import boto3
cloudwatch = boto3.client("logs")
def get_cloudwatch_log_groups():
kwargs = {"limit": 50}
cloudwatch_log_groups = []
while True: # Paginate
response = cloudwatch.describe_log_groups(**kwargs)
cloudwatch_log_groups += [log_group for log_group in response["logGroups"]]
if "NextToken" in response:
kwargs["NextToken"] = response["NextToken"]
else:
break
return cloudwatch_log_groups
def cloudwatch_set_retention(args):
retention = vars(args)["retention"]
cloudwatch_log_groups = get_cloudwatch_log_groups()
for group in cloudwatch_log_groups:
print(group)
if "retentionInDays" not in group or group["retentionInDays"] != retention:
print(f"Retention needs to be updated for: {group['logGroupName']}")
cloudwatch.put_retention_policy(
logGroupName=group["logGroupName"], retentionInDays=retention
)
else:
print(
f"CloudWatch Loggroup: {group['logGroupName']} already has the specified retention of {group['retentionInDays']} days."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Set a retention in days for all your CloudWatch Logs in a single region."
)
parser.add_argument(
"retention",
metavar="RETENTION",
type=int,
choices=[
1,
3,
5,
7,
14,
30,
60,
90,
120,
150,
180,
365,
400,
545,
731,
1827,
3653,
],
help="Enter the retention in days for the CloudWatch Logs.",
)
args = parser.parse_args()
cloudwatch_set_retention(args)
|
python
|
from backend.util.crypto_hash import crypto_hash
HEX_TO_BINARY_CONVERSION_TABLE ={
'0': '0000',
'1': '0001',
'2': '0010',
'3': '0011',
'4': '0100',
'5': '0101',
'6': '0110',
'7': '0111',
'8': '1000',
'9': '1001',
'a': '1010',
'b': '1011',
'c': '1100',
'd': '1101',
'e': '1110',
'f': '1111'
}
def hex_to_binary(hex_string):
binary_string = ''
for character in hex_string:
binary_string += HEX_TO_BINARY_CONVERSION_TABLE[character]
return binary_string
def main():
number = 451
hex_number = hex(number)[2:]
print(f'hex_number: {hex_number}')
binary_number = hex_to_binary(hex_number)
print(f'binary_number: {binary_number}')
original_number = int(binary_number,2)
print(f'original_number: {original_number}')
hex_to_binary_crypto_hash = hex_to_binary(crypto_hash('test-data'))
print(f'hex_to_binary_crypto_hash: {hex_to_binary_crypto_hash}')
if __name__ =='__main__':
main()
|
python
|
import numpy as np
from numpy.linalg import inv, cholesky
from scipy.stats import norm, rankdata
from synthpop.method import NormMethod, smooth
class NormRankMethod(NormMethod):
# Adapted from norm by carrying out regression on Z scores from ranks
# predicting new Z scores and then transforming back
def fit(self, X_df, y_df):
X_df, y_df = self.prepare_dfs(X_df=X_df, y_df=y_df, normalise_num_cols=True, one_hot_cat_cols=True)
y_real_min, y_real_max = np.min(y_df), np.max(y_df)
self.n_rows, n_cols = X_df.shape
X = X_df.to_numpy()
y = y_df.to_numpy()
z = norm.ppf(rankdata(y).astype(int) / (self.n_rows + 1))
self.norm.fit(X, z)
residuals = z - self.norm.predict(X)
if self.proper:
# looks like proper is not working quite yet as it produces negative values for a strictly possitive column
# Draws values of beta and sigma for Bayesian linear regression synthesis of y given x according to Rubin p.167
# https://link.springer.com/article/10.1007/BF02924688
self.sigma = np.sqrt(np.sum(residuals**2) / np.random.chisquare(self.n_rows - n_cols))
# NOTE: I don't like the use of inv()
V = inv(np.matmul(X.T, X))
self.norm.coef_ += np.matmul(cholesky((V + V.T) / 2), np.random.normal(scale=self.sigma, size=n_cols))
else:
self.sigma = np.sqrt(np.sum(residuals**2) / (self.n_rows - n_cols - 1))
if self.smoothing:
y = smooth(self.dtype, y, y_real_min, y_real_max)
self.y_sorted = np.sort(y)
def predict(self, X_test_df):
X_test_df, _ = self.prepare_dfs(X_df=X_test_df, normalise_num_cols=True, one_hot_cat_cols=True, fit=False)
n_test_rows = len(X_test_df)
X_test = X_test_df.to_numpy()
z_pred = self.norm.predict(X_test) + np.random.normal(scale=self.sigma, size=n_test_rows)
y_pred_indices = (norm.pdf(z_pred) * (self.n_rows + 1)).astype(int)
y_pred_indices = np.clip(y_pred_indices, 1, self.n_rows)
y_pred = self.y_sorted[y_pred_indices]
return y_pred
|
python
|
from caty.core.spectypes import UNDEFINED
from caty.core.facility import Facility, AccessManager
class MongoHandlerBase(Facility):
am = AccessManager()
|
python
|
nome=input('digite seu nome completo =')
nomeup=nome.upper()
nomelo=nome.lower()
nomese=nome.strip()
dividido=nome.split()
print('em maiusculas = {}'.format(nomeup.strip()))
print('em minusculas = {}'.format(nomelo.strip()))
print('o seu nome tem {} letras'.format(len(nomese)-nomese.count(' ')))
print('o seu primeiro nome tem {}'.format(len(dividido[0])))
|
python
|
from flask import render_template, request
from sqlalchemy import desc
from app.proto import bp
from app.models import Share
@bp.route('/', methods=['GET', 'POST'])
@bp.route('/index', methods=['GET', 'POST'])
def index():
user_id = request.args.get('user_id')
shares = Share.query.filter_by(user_id=user_id).order_by(desc(Share.timestamp)).all()
return render_template('index.html', user_id=user_id, shares=shares)
@bp.route('/register', methods=['GET', 'POST'])
def register():
return render_template('register.html')
@bp.route('/login', methods=['GET', 'POST'])
def login():
return render_template('login.html')
|
python
|
import ProblemFileHandler as handler
import OJTemplate
# 生成一个题目文件
# 第一种方法:problem_text_file + test_cases_file
text_file1 = '../resources/OJ/demo_problem1_text.txt'
test_cases_file1 = '../resources/OJ/demo_problem1_test_cases.txt'
output_file1 = '../resources/OJ/Problems/Problem1.plm'
handler.generate_problem(problem_text_file=text_file1,
test_cases_file=test_cases_file1,
output_file=output_file1,
overwrite=True)
# 第二种方法:problem_text_file + standard_answer_func + test_inputs
# 注意此时test_cases_file必须为None(默认)
text_file2 = '../resources/OJ/demo_problem2_text.txt'
answer_func = OJTemplate.standard_answer
inputs = OJTemplate.test_inputs
output_file2 = '../resources/OJ/Problems/Problem2.plm'
handler.generate_problem(problem_text_file=text_file2,
standard_answer_func=answer_func,
test_inputs=inputs,
output_file=output_file2,
overwrite=True)
# 读取Problem文件(.plm),返回包含'text'和'test_cases'两个key的字典
problem_dict1 = handler.load_problem_file(output_file1)
problem_dict2 = handler.load_problem_file(output_file2)
print(problem_dict1)
print(problem_dict2)
|
python
|
import random
import os
import time
import server
import discord
import ctypes
import server
from discord.ext import commands
from cogs.musiccog import Music
from cogs.funcog import Fun
find_opus = ctypes.util.find_library('opus')
discord.opus.load_opus(find_opus)
TOKEN = os.getenv("DISCORD_TOKEN")
# Silence useless bug reports messages
bot = commands.Bot(command_prefix='!')
bot.add_cog(Music(bot))
bot.add_cog(Fun(bot))
@bot.event
async def on_ready():
print('Logged in as:\n{0.user.name}\n{0.user.id}'.format(bot))
server.server()
bot.run(TOKEN)
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch.nn as nn
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.layers.non_local import init_nl_module
from .generalized_rcnn import GeneralizedRCNN
import torch
from maskrcnn_benchmark.layers import CoordConv2d
from torch.nn.parameter import Parameter
_DETECTION_META_ARCHITECTURES = {"GeneralizedRCNN": GeneralizedRCNN}
def build_detection_model(cfg):
# Define and load the original model
meta_arch = _DETECTION_META_ARCHITECTURES[cfg.MODEL.META_ARCHITECTURE]
model = meta_arch(cfg)
dummy_checkpointer = DetectronCheckpointer(cfg, model)
dummy_checkpointer.load(cfg.MODEL.WEIGHT)
if cfg.MODEL.BACKBONE.COORDS:
module_dict = {
"input": {"parent": model.backbone.body.stem, "name": "conv1"},
"rpn_input": {"parent": model.rpn.head, "name": "conv"}}
# "rpn_input": {"parent": model.rpn.head, "name": "conv"}}
# }
for identifier in cfg.MODEL.BACKBONE.COORDS:
if identifier not in module_dict.keys():
continue
parent_module = module_dict[identifier]["parent"]
name = module_dict[identifier]["name"]
old_conv = getattr(parent_module, name)
out_ch, in_ch, h, w = old_conv.weight.shape
new_weight = torch.cat([old_conv.weight,
torch.zeros([out_ch, 2, h, w], dtype=torch.float32)], dim=1)
kwargs = {"with_r": False}
for key in ["in_channels", "out_channels", "kernel_size", "stride",
"padding", "dilation", "groups"]:
kwargs[key] = getattr(old_conv, key)
if old_conv.bias is None:
kwargs["bias"] = False
else:
kwargs["bias"] = True
# https://discuss.pytorch.org/t/how-can-i-modify-certain-layers-weight-and-bias/11638/3
new_conv = CoordConv2d(**kwargs)
new_conv.conv.state_dict()["weight"].copy_(new_weight)
if old_conv.bias is not None:
new_conv.conv.state_dict()["bias"].copy_(old_conv.bias.data)
delattr(parent_module, name)
setattr(parent_module, name, new_conv)
print("Replace", old_conv, "to", new_conv)
# insert non-local block just before the last block of res4 (layer3)
# if cfg.MODEL.BACKBONE.NON_LOCAL != "":
# nl_block_type, _ = cfg.MODEL.BACKBONE.NON_LOCAL.split("_")
# layer3_list = list(model.backbone.body.layer3.children())
# in_ch = list(layer3_list[-1].children())[0].in_channels
# layer3_list.insert(
# len(layer3_list) - 1,
# init_nl_module(nl_block_type, in_ch, int(in_ch / 2)))
# model.backbone.body.layer3 = nn.Sequential(*layer3_list)
return model
|
python
|
# Copyright 2017 ForgeFlow S.L.
# Copyright 2018 Carlos Dauden - Tecnativa <[email protected]>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class AccountPaymentMode(models.Model):
_inherit = "account.payment.mode"
show_bank_account = fields.Selection(
selection=[
("full", "Full"),
("first", "First n chars"),
("last", "Last n chars"),
("no", "No"),
],
string="Show bank account",
default="full",
help="Show in invoices partial or full bank account number",
)
show_bank_account_from_journal = fields.Boolean(string="Bank account from journals")
show_bank_account_chars = fields.Integer(
string="# of digits for customer bank account"
)
@api.constrains("company_id")
def account_invoice_company_constrains(self):
for mode in self:
if (
self.env["account.move"]
.sudo()
.search(
[
("payment_mode_id", "=", mode.id),
("company_id", "!=", mode.company_id.id),
],
limit=1,
)
):
raise ValidationError(
_(
"You cannot change the Company. There exists "
"at least one Journal Entry with this Payment Mode, "
"already assigned to another Company."
)
)
@api.constrains("company_id")
def account_move_line_company_constrains(self):
for mode in self:
if (
self.env["account.move.line"]
.sudo()
.search(
[
("payment_mode_id", "=", mode.id),
("company_id", "!=", mode.company_id.id),
],
limit=1,
)
):
raise ValidationError(
_(
"You cannot change the Company. There exists "
"at least one Journal Item with this Payment Mode, "
"already assigned to another Company."
)
)
|
python
|
''' FUNCTIONS
Functions are pieces(block) of code that does something.
'''
|
python
|
from classifiers.base_stance_classifier import BaseStanceClassifier
from classifiers.random_stance_classifier import RandomStanceClassifier
from classifiers.greedy_stance_classifier import MSTStanceClassifier
from classifiers.maxcut_stance_classifier import MaxcutStanceClassifier
|
python
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#
import threading
import time
from opentelemetry.context import attach, detach, set_value
from opentelemetry.sdk.metrics import Meter
from opentelemetry.sdk.metrics.export import MetricsExportResult
from azure_monitor.sdk.auto_collection.live_metrics import utils
from azure_monitor.sdk.auto_collection.live_metrics.exporter import (
LiveMetricsExporter,
)
from azure_monitor.sdk.auto_collection.live_metrics.sender import (
LiveMetricsSender,
)
from azure_monitor.sdk.auto_collection.metrics_span_processor import (
AzureMetricsSpanProcessor,
)
# Interval for failures threshold reached in seconds
FALLBACK_INTERVAL = 60.0
# Ping interval for succesful requests in seconds
PING_INTERVAL = 5.0
# Post interval for succesful requests in seconds
POST_INTERVAL = 1.0
# Main process interval (Manager) in seconds
MAIN_INTERVAL = 2.0
class LiveMetricsManager(threading.Thread):
"""Live Metrics Manager
It will start Live Metrics process when instantiated,
responsible for switching between ping and post actions.
"""
daemon = True
def __init__(
self,
meter: Meter,
instrumentation_key: str,
span_processor: AzureMetricsSpanProcessor,
):
super().__init__()
self.thread_event = threading.Event()
self.interval = MAIN_INTERVAL
self._instrumentation_key = instrumentation_key
self._is_user_subscribed = False
self._meter = meter
self._span_processor = span_processor
self._exporter = LiveMetricsExporter(
self._instrumentation_key, self._span_processor
)
self._post = None
self._ping = LiveMetricsPing(self._instrumentation_key)
self.start()
def run(self):
self.check_if_user_is_subscribed()
while not self.thread_event.wait(self.interval):
self.check_if_user_is_subscribed()
def check_if_user_is_subscribed(self):
if self._ping:
if self._ping.is_user_subscribed:
# Switch to Post
self._ping.shutdown()
self._ping = None
self._span_processor.is_collecting_documents = True
self._post = LiveMetricsPost(
self._meter, self._exporter, self._instrumentation_key
)
if self._post:
if not self._post.is_user_subscribed:
# Switch to Ping
self._span_processor.is_collecting_documents = False
self._post.shutdown()
self._post = None
self._ping = LiveMetricsPing(self._instrumentation_key)
def shutdown(self):
if self._ping:
self._ping.shutdown()
if self._post:
self._post.shutdown()
self.thread_event.set()
class LiveMetricsPing(threading.Thread):
"""Ping to Live Metrics service
Ping to determine if user is subscribed and live metrics need to be send.
"""
daemon = True
def __init__(self, instrumentation_key):
super().__init__()
self.instrumentation_key = instrumentation_key
self.thread_event = threading.Event()
self.interval = PING_INTERVAL
self.is_user_subscribed = False
self.last_send_succeeded = False
self.last_request_success_time = 0
self.sender = LiveMetricsSender(self.instrumentation_key)
self.start()
def run(self):
self.ping()
while not self.thread_event.wait(self.interval):
self.ping()
def ping(self):
envelope = utils.create_metric_envelope(self.instrumentation_key)
token = attach(set_value("suppress_instrumentation", True))
response = self.sender.ping(envelope)
detach(token)
if response.ok:
if not self.last_send_succeeded:
self.interval = PING_INTERVAL
self.last_send_succeeded = True
self.last_request_success_time = time.time()
if (
response.headers.get(utils.LIVE_METRICS_SUBSCRIBED_HEADER)
== "true"
):
self.is_user_subscribed = True
else:
self.last_send_succeeded = False
if time.time() >= self.last_request_success_time + 60:
self.interval = FALLBACK_INTERVAL
def shutdown(self):
self.thread_event.set()
class LiveMetricsPost(threading.Thread):
"""Post to Live Metrics service
Post to send live metrics data when user is subscribed.
"""
daemon = True
def __init__(self, meter, exporter, instrumentation_key):
super().__init__()
self.instrumentation_key = instrumentation_key
self.meter = meter
self.thread_event = threading.Event()
self.interval = POST_INTERVAL
self.is_user_subscribed = True
self.last_send_succeeded = False
self.last_request_success_time = time.time()
self.exporter = exporter
self.start()
def run(self):
self.post()
while not self.thread_event.wait(self.interval):
self.post()
def post(self):
self.meter.collect()
token = attach(set_value("suppress_instrumentation", True))
result = self.exporter.export(self.meter.batcher.checkpoint_set())
detach(token)
self.meter.batcher.finished_collection()
if result == MetricsExportResult.SUCCESS:
self.last_request_success_time = time.time()
if not self.last_send_succeeded:
self.interval = POST_INTERVAL
self.last_send_succeeded = True
if not self.exporter.subscribed:
self.is_user_subscribed = False
else:
self.last_send_succeeded = False
if time.time() >= self.last_request_success_time + 20:
self.interval = FALLBACK_INTERVAL
def shutdown(self):
self.thread_event.set()
|
python
|
from art import logo
import os
clear = lambda: os. system('cls')
def new_bidder():
global greater_bid
bidder = input("What's your name?: ")
bid = int(input("What's your bid?: "))
new_bidder_dict = {"Bidder": bidder, "Bid": bid}
if bid > greater_bid["Bid"]:
greater_bid = new_bidder_dict
bids_dictionary[len(bids_dictionary)+1] = new_bidder_dict
print(logo)
bids_dictionary = {}
greater_bid = {"Bidder": "Start", "Bid": 0}
while True:
new_bidder()
other_bidder = input("Are there any other bidders? Type 'yes' or 'no': ")
clear()
if other_bidder == "no":
break
print(f'The winner is {greater_bid["Bidder"]} with a bid of ${greater_bid["Bid"]}.')
|
python
|
from geocoder import main
from geocoder import STARTTIME, NUM_DOCS
import re
import os
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from datetime import datetime #for testing time of script execution
RE_URLS = 'http[s]?:\/\/(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+'
RE_AT_MENTIONS = '(?:@[\w_]+)'
RE_HASHTAGS = '#'
RE_EXTRA_WHITE_SPACE = '\s+'
RE_INSIDE_PARENTHESIS = '\([^)]*\)'
RE_SPECIAL_CHARS = "\.|\,|\\|\r|\n|\s|\(|\)|\"|\[|\]|\{|\}|\;|\:|\.|\°|\-|\/|\&|\(|\)|\||\*"
#preserve question marks and exclamation marks for Vader
EMOJI_PATTERN = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
EMOJI_PATTERN2 = re.compile(u'('
u'\ud83c[\udf00-\udfff]|'
u'\ud83d[\udc00-\ude4f\ude80-\udeff]|'
u'[\u2600-\u26FF\u2700-\u27BF])+',
re.UNICODE)
class CleanText():
"""
Clean the text of the tweet as well as the user description (for later analysis).
Preserving things like emojis and exclamation marks for Vader Sentiment Analyzer,
since it is able to interpret meaning / emotional value from these symbols.
"""
def clean_tweet(self, text):
text = re.sub(RE_URLS, " ", str(text))
text = re.sub(RE_AT_MENTIONS, " ", text)
text = re.sub(RE_HASHTAGS," ", text)
text = re.sub(RE_SPECIAL_CHARS," ",text)
text = text.strip()
text = re.sub(RE_EXTRA_WHITE_SPACE, " ", text)
return text
def clean_user(self, text):
# print(f'\n\nemoji pattern type: {type(EMOJI_PATTERN)}\n\n')
text = re.sub(RE_URLS, " ", str(text))
text = re.sub(RE_AT_MENTIONS, " ", text)
text = re.sub(RE_INSIDE_PARENTHESIS, " ", text)
text = re.sub(RE_HASHTAGS," ", text)
text = re.sub(EMOJI_PATTERN, " ", text)
text = re.sub(EMOJI_PATTERN2, " ", text)
text = re.sub(RE_SPECIAL_CHARS," ",text)
text = text.strip()
text = re.sub(RE_EXTRA_WHITE_SPACE, " ", text)
return text
class Analyzer():
def __init__(self, df):
self.df = df
def analyze_sentiment(self):
self.df['text'] = self.df['text'].apply(CleanText().clean_tweet)
self.df['user_description'] = self.df['user_description'].apply(CleanText().clean_user)
all_tweets = list(self.df['text'])
analyzer = SentimentIntensityAnalyzer()
"""
Can also include sentiment 'sub-scores' (i.e. negative, neutral, and positive),
but for now only including composite sentiment. Others are commented out.
"""
# neg_sent = []
# neu_sent = []
# pos_sent = []
comp_sent = []
for tw in all_tweets:
vs = analyzer.polarity_scores(tw)
# neg_sent.append(vs['neg'])
# neu_sent.append(vs['neu'])
# pos_sent.append(vs['pos'])
comp_sent.append(vs['compound'])
# self.df['neg. sentiment'] = neg_sent
# self.df['neu. sentiment'] = neu_sent
# self.df['pos. sentiment'] = pos_sent
self.df['comp. sentiment'] = comp_sent
self.df['strong positive'] = self.df['comp. sentiment'].map(lambda x: 1 if x >= 0.8 else 0)
self.df['strong negative'] = self.df['comp. sentiment'].map(lambda x: 1 if x <= -0.8 else 0)
"""CONSIDER FILTERING OUT SENTIMENTS THAT FALL WITHIN 'MIDDLE RANGE'
(e.g. anything between -0.5 -- 0.5 ) """
"""Will eventually return the resulting df, but for now printing it to csv for testing"""
test_filename = 'sandbox/live_demo.csv'
if os.path.exists(test_filename):
print(f'\n\n{test_filename} already exists; removing it first\n')
os.remove(test_filename)
with open(test_filename, 'w') as f:
self.df.to_csv(f, header=True)
print("Successfully printed to csv!\n\n")
return self.df
if __name__ == '__main__':
print(f"\n\nNumber of documents currently in DB: {NUM_DOCS}\n")
df = main()
print(f'Passing dataframe to the sentiment analyzer...')
sentiment_analyzer = Analyzer(df)
sentiment_analyzer.analyze_sentiment()
print(f"Time to completion: {datetime.now() - STARTTIME}")
|
python
|
#Making List l
l = [11, 12, 13, 14]
#Using append function on list
l.append(50)
l.append(60)
print("list after adding 50 & 60:- ", l)
#Using remove function on list
l.remove(11)
l.remove(13)
print("list after removing 11 & 13:- ", l)
#Using the sort function with their parameters changed
#Implementing sorting in a list
l.sort(reverse=False)
print("list after sortinng in ascending order:- ",l)
l.sort(reverse=True)
print("list after sorting in descending order:- ",l)
#Implementing searching in a list
if 13 in l:
print("yes 13 is in the list")
else:
print("no 13 is not in the list")
print("no of elements list have:- ",len(l))
#Implementing traversing in a list
s = 0
oddsum = 0
evensum = 0
primesum = 0
for i in l:
s = s + i
if i % 2 == 0:
evensum = evensum + i
else:
oddsum = oddsum + i
count = 0
j = 1
while( j < len(l)):
if l[j] % j == 0:
count = count + 1
j = j+1
if count == 2:
primesum = primesum + l[i]
print("sum of elements in the list:- ",s)
print("sum of odd elements in the list:- ",oddsum)
print("sum of even elements in the list:- ",evensum)
print("sum of prime elements in the list:- ",primesum)
#Using clear function to delete all the data in list
#Implementing delete functionality in a list by using predefined functions
l.clear()
print("list after using clear function:- ",l)
del l
|
python
|
budget_wanted = float(input())
total = 0
money_full = False
command = input()
while command != "Party!":
drink_name = command
number_of_drinks = int(input())
price = int(len(drink_name))
drinks_price = price * number_of_drinks
if drinks_price % 2 == 1:
drinks_price -= drinks_price * 25 / 100
else:
drinks_price = drinks_price
total += drinks_price
if total >= budget_wanted:
print(f"Target acquired.")
money_full = True
break
command = input()
if command == "Party!" and total < budget_wanted:
diff = budget_wanted - total
print(f"We need {diff:.2f} leva more.")
if money_full or command == "Party!":
print(f"Club income - {total:.2f} leva.")
|
python
|
#!/usr/bin/env python
# coding:utf-8
#DESCRICAO
#Esse script foi desenvolvido para facilitar a forma como cadastramos,
#alteramos ou excluímos os principais ativos em dois ou mais servidores zabbix.
#A ideia é utilizar esse script para ambientes onde os eventos não estão sincronizados,
#permitindo uma ótima facilidade e agilidade nesses processos.
#A integracao e realizada via Zabbix API
# Author: Vinicius Trancoso Bitencourt - <http:github/viniciustbitencourt>
#
# FileName: altera_hosts.py
import sys
from ConfigParser import SafeConfigParser
from zabbix_api import ZabbixAPI
#Arquivo de configuracao com os parametros conf.ini
config = SafeConfigParser()
config.read('conf.ini')
#pega os valores do arquivo de configuracao
host01 = config.get('zabbix01', 'hostname')
usr01 = config.get('zabbix01', 'user')
pwd01 = config.get('zabbix01', 'passwd')
#pega os valores do arquivo de configuração
host02 = config.get('zabbix02', 'hostname')
usr02 = config.get('zabbix02', 'user')
pwd02 = config.get('zabbix02', 'passwd')
#API Zabbix com a URL de cada Servidor
zapi = ZabbixAPI(host01)
zapi2 = ZabbixAPI(host02)
#Faz login com a API Zabbix
zapi.login(usr01, pwd01)
zapi2.login(usr02, pwd02)
class AlteraHosts(object):
pass
##TELA DE EXIBICAO
print "***************************************************************************"
print "*********** SCRIPT - ALTERA HOSTS EM DOIS SERVIDORES ZABBIX ***************"
print "* FAVOR INSERIR TODOS OS DADOS CORRETAMENTE! *"
print "***************************************************************************"
print'1 - PARA ALTERAR O NOME DO EQUIPAMENTO'
print'2 - PARA ALTERAR O IP DO EQUIPAMENTO'
print'3 - PARA SAIR DESSA TELA !'
#Pega opcao selecionada
a = raw_input('Digite a Opção desejada: ')
if a == '1':
host = raw_input('Digite o NOME do HOST: ')
rename = raw_input('Digite o nome que deseja alterar: ')
#Funcao valida os dados digitados
def valida_dados(host, rename):
if host == "":
print 'Digite corretamente o NOME do HOST corretamente!'
sys.exit(0)
elif rename == "":
print 'Digite o NOME do HOST que deseja alterar corretamente!'
sys.exit(0)
valida_dados(host, rename)
#Zabbix API - Altera no Zabbix
for x in zapi.host.get({'filter': {'name': host}}):
host_id = x['hostid']
altera = zapi.host.update({'hostid': host_id, 'host': rename, 'status': 0})
#Zabbix API - Altera no Zabbix
for y in zapi2.host.get({'filter': {'name':host}}):
host_id2 = y['hostid']
altera2 = zapi2.host.update({'hostid': host_id2, 'host': rename, 'status': 0})
print ('Equipamento - ' + host + ' - alterado NOME para: ' + rename)
elif a == '2':
host = raw_input('Digite o NOME do equipamento: ')
rename = raw_input('Digite o IP que deseja alterar: ')
#Funcao valida os dados digitados
def valida_dados(host, rename):
if host == "":
print 'Digite corretamente o NOME do equipamento corretamente!'
sys.exit(0)
elif rename == "":
print 'Digite o IP que deseja alterar corretamente!'
sys.exit(0)
valida_dados(host, rename)
#Zabbix API - Altera IP Zabbix Primeiro Servidor
for x in zapi.host.get({'filter': {'name': host}}):
host_id = x['hostid']
for x in zapi.hostinterface.get({'hostids': host_id}):
host_interface = x['interfaceid']
alteraip = zapi.hostinterface.update({'interfaceid': host_interface, 'ip': rename})
#Zabbix API - Altera IP Zabbix Segundo Servidor
for y in zapi2.host.get({'filter': {'name': host}}):
host_id2 = y['hostid']
for y in zapi2.hostinterface.get({'hostids': host_id2}):
host_interface2 = y['interfaceid']
alteraip2 = zapi2.hostinterface.update({'interfaceid': host_interface2, 'ip': rename})
print ('Equipamento - ' + host +' - alterado IP para: '+ rename)
else:
print 'OPÇÃO INVALIDA - FIM!!'
sys.exit(0)
|
python
|
# ToggleButton examples.
import os
from ocempgui.widgets import *
from ocempgui.widgets.Constants import *
def _create_vframe (text):
frame = VFrame (Label (text))
frame.spacing = 5
frame.align = ALIGN_LEFT
return frame
def create_button_view ():
states = ("STATE_NORMAL", "STATE_ENTERED", "STATE_ACTIVE",
"STATE_INSENSITIVE")
table = Table (2, 3)
table.spacing = 5
table.set_row_align (0, ALIGN_TOP)
table.set_row_align (1, ALIGN_TOP)
# Frame with the states.
frm_states = _create_vframe ("States")
for i, s in enumerate (states):
btn = ToggleButton (s)
if STATE_TYPES[i] == STATE_INSENSITIVE:
btn.sensitive = False
else:
btn.state = STATE_TYPES[i]
frm_states.add_child (btn)
table.add_child (0, 0, frm_states)
# Frame with different padding.
frm_padding = _create_vframe ("Padding")
for i in xrange (5):
btn = ToggleButton ("Padding: %dpx" % (i * 2))
btn.padding = i * 2
frm_padding.add_child (btn)
table.add_child (0, 1, frm_padding)
# Mnemonics.
frm_mnemonic = _create_vframe ("Mnemonics")
btn = ToggleButton ("#Simple Mnemonic")
btn2 = ToggleButton ("#Activate using <ALT><Underlined Key>")
frm_mnemonic.add_child (btn, btn2)
table.add_child (0, 2, frm_mnemonic)
# Multiline labeled buttons
frm_multiline = _create_vframe ("Multiline labels")
strings = ("Single lined ToggleButton", "Two lines on\na ToggleButton",
"Two lines with a\n#mnemonic")
for s in strings:
button = ToggleButton (s)
button.child.multiline = True
frm_multiline.add_child (button)
table.add_child (1, 0, frm_multiline)
# Empty buttons with different minimum sizes
frm_empty = _create_vframe ("Empty Buttons")
for i in xrange (5):
button = ToggleButton ()
button.minsize = (20 * i, 10 * i)
frm_empty.add_child (button)
table.add_child (1, 2, frm_empty)
return table
if __name__ == "__main__":
# Initialize the drawing window.
re = Renderer ()
re.create_screen (530, 400)
re.title = "ToggleButton examples"
re.color = (234, 228, 223)
re.add_widget (create_button_view ())
# Start the main rendering loop.
re.start ()
|
python
|
from sys import stdin
def print_karte(karte):
for i in range(len(karte)):
liste = [str(x) for x in karte[i]]
print(''.join(liste))
zeilen = []
max_x = 0
max_y = 0
for line in stdin:
eingabe = line.strip()
eingabe = eingabe.split(" ")
eins = [int(x) for x in eingabe[0].split(",")]
zwei = [int(x) for x in eingabe[2].split(",")]
#if eins[0] == zwei[0] or eins[1] == zwei[1]:
zeilen.append([eins[0], eins[1], zwei[0], zwei[1]])
if eins[0] > max_x:
max_x = eins[0]
if eins[1] > max_y:
max_y = eins[1]
if zwei[0] > max_x:
max_x = zwei[0]
if zwei[1] > max_y:
max_y = zwei[1]
max_x += 1
max_y += 1
karte = [["."]*max_x for x in range(max_y)]
for zeile in zeilen:
if zeile[0] == zeile[2]:
if zeile[1] < zeile[3]:
incrementer = 1
zahl = zeile[3]+1
else:
incrementer = -1
zahl = zeile[3]-1
for i in range(zeile[1],zahl,incrementer):
if karte[i][zeile[0]] == ".":
karte[i][zeile[0]] = 1
else:
karte[i][zeile[0]] += 1
elif zeile[1] == zeile[3]:
if zeile[0] < zeile[2]:
incrementer = 1
zahl = zeile[2]+1
else:
incrementer = -1
zahl = zeile[2]-1
for i in range(zeile[0],zahl,incrementer):
if karte[zeile[1]][i] == ".":
karte[zeile[1]][i] = 1
else:
karte[zeile[1]][i] += 1
else:
pos_x = zeile[0]
pos_y = zeile[1]
if karte[pos_y][pos_x] == ".":
karte[pos_y][pos_x] = 1
else:
karte[pos_y][pos_x] += 1
if zeile[0] < zeile[2]:
pos_x += 1
else:
pos_x -= 1
if zeile[1] < zeile[3]:
pos_y += 1
else:
pos_y -= 1
while True:
if karte[pos_y][pos_x] == ".":
karte[pos_y][pos_x] = 1
else:
karte[pos_y][pos_x] += 1
if zeile[0] < zeile[2]:
pos_x += 1
else:
pos_x -= 1
if zeile[1] < zeile[3]:
pos_y += 1
else:
pos_y -= 1
if zeile[0] < zeile[2]:
if pos_x > zeile[2]:
break
else:
if pos_x < zeile[2]:
break
gefahren_punkte = 0
for i in range(max_y):
for ii in range(max_x):
if karte[i][ii] != ".":
if karte[i][ii] > 1:
gefahren_punkte += 1
print(gefahren_punkte)
|
python
|
from django.apps import apps
from .models import State, Workflow
def create_builtin_workflows(sender, **kwargs):
"""
Receiver function to create a simple and a complex workflow. It is
connected to the signal django.db.models.signals.post_migrate during
app loading.
"""
if Workflow.objects.exists():
# If there is at least one workflow, then do nothing.
return
workflow_1 = Workflow(name="Simple Workflow")
workflow_1.save(skip_autoupdate=True)
state_1_1 = State(
name="submitted",
workflow=workflow_1,
allow_create_poll=True,
allow_support=True,
allow_submitter_edit=True,
)
state_1_1.save(skip_autoupdate=True)
state_1_2 = State(
name="accepted",
workflow=workflow_1,
recommendation_label="Acceptance",
css_class="success",
merge_amendment_into_final=1,
)
state_1_2.save(skip_autoupdate=True)
state_1_3 = State(
name="rejected",
workflow=workflow_1,
recommendation_label="Rejection",
css_class="danger",
merge_amendment_into_final=-1,
)
state_1_3.save(skip_autoupdate=True)
state_1_4 = State(
name="not decided",
workflow=workflow_1,
recommendation_label="No decision",
css_class="default",
merge_amendment_into_final=-1,
)
state_1_4.save(skip_autoupdate=True)
state_1_1.next_states.add(state_1_2, state_1_3, state_1_4)
workflow_1.first_state = state_1_1
workflow_1.save(skip_autoupdate=True)
workflow_2 = Workflow(name="Complex Workflow")
workflow_2.save(skip_autoupdate=True)
state_2_1 = State(
name="published",
workflow=workflow_2,
allow_support=True,
allow_submitter_edit=True,
dont_set_identifier=True,
)
state_2_1.save(skip_autoupdate=True)
state_2_2 = State(
name="permitted",
workflow=workflow_2,
recommendation_label="Permission",
allow_create_poll=True,
allow_submitter_edit=True,
)
state_2_2.save(skip_autoupdate=True)
state_2_3 = State(
name="accepted",
workflow=workflow_2,
recommendation_label="Acceptance",
css_class="success",
merge_amendment_into_final=1,
)
state_2_3.save(skip_autoupdate=True)
state_2_4 = State(
name="rejected",
workflow=workflow_2,
recommendation_label="Rejection",
css_class="danger",
merge_amendment_into_final=-1,
)
state_2_4.save(skip_autoupdate=True)
state_2_5 = State(
name="withdrawed",
workflow=workflow_2,
css_class="default",
merge_amendment_into_final=-1,
)
state_2_5.save(skip_autoupdate=True)
state_2_6 = State(
name="adjourned",
workflow=workflow_2,
recommendation_label="Adjournment",
css_class="default",
merge_amendment_into_final=-1,
)
state_2_6.save(skip_autoupdate=True)
state_2_7 = State(
name="not concerned",
workflow=workflow_2,
recommendation_label="No concernment",
css_class="default",
merge_amendment_into_final=-1,
)
state_2_7.save(skip_autoupdate=True)
state_2_8 = State(
name="refered to committee",
workflow=workflow_2,
recommendation_label="Referral to committee",
css_class="default",
merge_amendment_into_final=-1,
)
state_2_8.save(skip_autoupdate=True)
state_2_9 = State(
name="needs review",
workflow=workflow_2,
css_class="default",
merge_amendment_into_final=-1,
)
state_2_9.save(skip_autoupdate=True)
state_2_10 = State(
name="rejected (not authorized)",
workflow=workflow_2,
recommendation_label="Rejection (not authorized)",
css_class="default",
merge_amendment_into_final=-1,
)
state_2_10.save(skip_autoupdate=True)
state_2_1.next_states.add(state_2_2, state_2_5, state_2_10)
state_2_2.next_states.add(
state_2_3, state_2_4, state_2_5, state_2_6, state_2_7, state_2_8, state_2_9
)
workflow_2.first_state = state_2_1
workflow_2.save(skip_autoupdate=True)
def get_permission_change_data(sender, permissions, **kwargs):
"""
Yields all necessary collections if 'motions.can_see' permission changes.
"""
motions_app = apps.get_app_config(app_label="motions")
for permission in permissions:
# There could be only one 'motions.can_see' and then we want to return data.
if (
permission.content_type.app_label == motions_app.label
and permission.codename == "can_see"
):
yield from motions_app.get_startup_elements()
|
python
|
import os
from codecs import open
from setuptools import setup
import suit_rq
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-suit-rq',
version=suit_rq.__version__,
author='Ryan Senkbeil',
author_email='[email protected]',
description='Support the django-rq admin when using django-suit',
long_description=long_description,
url='https://github.com/gsmke/django-suit-rq',
license='BSD',
packages=['suit_rq'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'django-suit >=0.2.15, <0.3.0',
'django-rq >=0.8.0, <=1.2.0',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
]
)
|
python
|
import os
import wget
## Verify if directory exists. Create if it doesnt exist.
def check_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
## Download source Files from urls
def download_files(urls, out_path='downloads/', silent=False):
for url in urls:
check_dir(out_path)
print('Downloading', url)
wget.download(url, out=out_path)
print()
# os.system('wget %s' % url)
if __name__ == "__main__":
urls=['https://gamepedia.cursecdn.com/darkestdungeon_gamepedia/c/ce/Vo_narr_tut_firstdungeon.ogg']
download_files(urls,'testing/')
# check_dir('testing/')
|
python
|
"""Morse code handling"""
from configparser import ConfigParser
import os
from pathlib import Path
import sys
import warnings
import numpy as np
import sklearn.cluster
import sklearn.exceptions
from .io import read_wave
from .processing import smoothed_power, squared_signal
class MorseCode:
"""Morse code
Attributes:
data (np.ndarray): 1D binary array, representing morse code in time
"""
_morse_to_char: dict = None
def __init__(self, data: np.ndarray, sample_rate: int = None):
"""Initialize code with binary data
Args:
data (np.ndarray): 1D binary array, representing morse code in time
sample_rate (np.ndarray): Audio sampling rate. Default: None.
"""
self.data = data
self.sample_rate = sample_rate
@classmethod
def from_wavfile(cls, file: os.PathLike) -> "MorseCode":
"""Construct from wave file
- Read in wave file
- Calculate signal envelope (smoothing of 0.1 seconds)
- Apply squaring (threshold: 50% of max smoothed data value)
Args:
file (os.PathLike): path to input WAV file
Returns:
MorseCode: class instance, with 1D binary input data
"""
sample_rate, wave = read_wave(file)
window_size = int(0.01 * sample_rate)
envelope = smoothed_power(wave, window_size)
square_data = squared_signal(envelope)
return cls(square_data)
def decode(self) -> str:
"""Decode data
Returns:
str: Morse code content, in plain language
Raises:
UserWarning: dash/dot separation cannot be made unambiguosly
"""
on_samples, off_samples = self._on_off_samples()
dash_dot_chars = self._dash_dot_characters(on_samples)
char_break_idx, word_space_idx = self._break_spaces(off_samples)
morse_words = self._morse_words(dash_dot_chars, char_break_idx, word_space_idx)
return self._translate(morse_words)
@classmethod
@property
def morse_to_char(cls) -> dict[str, str]:
"""Morse to character dictionary
Read mappings from morse.ini and store them to class variable. Later,
return directly from this class variable.
Returns:
dict[str, str]: Mapping of morse character string to letter
"""
if cls._morse_to_char is not None:
return cls._morse_to_char
config = ConfigParser()
config.read(Path(__file__).parent / "morse.ini")
chars = config["characters"]
cls._morse_to_char = {chars[key]: key.upper() for key in chars}
return cls._morse_to_char
def _on_off_samples(self) -> tuple[np.ndarray, np.ndarray]:
"""Calculate signal ON/OFF durations
Locate rising and falling edges in square wave at self.data. Calculate
number of samples in each ON / OFF period.
Returns:
tuple[np.ndarray, np.ndarray]: on_samples, off_samples. Note that
in addition to character and word spaces, off_samples also
includes inter-character spaces.
"""
if len(self.data) == 0:
return np.array([], dtype="int"), np.array([], dtype="int")
square_diff = np.diff(self.data)
rising_idx = np.nonzero(square_diff == 1)[0]
falling_idx = np.nonzero(square_diff == -1)[0]
# Case: data starts with ON - it started one sample before index 0
if falling_idx[0] < rising_idx[0]:
rising_idx = np.insert(rising_idx, 0, -1)
# Case: data ends with ON
if rising_idx[-1] > falling_idx[-1]:
falling_idx = np.insert(falling_idx, len(falling_idx), len(self.data) - 1)
on_samples = falling_idx - rising_idx
off_samples = rising_idx[1:] - falling_idx[: len(falling_idx) - 1]
return on_samples, off_samples
def _dash_dot_characters(self, on_samples: np.ndarray) -> np.ndarray:
"""Convert array of ON sample lengths to array of dashes and dots
NOTE: It is expected, that the signal contains exactly two distinct
lengths - those for a dash and for a dot. If the keying speed varies,
or either character does not exist, then this method will fail.
As a circumvention, 20 WPM is used as a guess
Args:
on_samples (np.ndarray): number of samples in each ON period in
the signal. This comes from `MorseCode._on_off_samples`.
Raises:
UserWarning: if there are no distinct clusters (only dashes
or dots in the input), and self.sample_rate is not set; thus
no guess can be made on dash/dot.
Returns:
np.ndarray: array of dashes and dots, of object (string) type
"""
if len(on_samples) == 0:
return np.array([], dtype="str")
n_clusters = min(2, len(on_samples))
column_vec = on_samples.reshape(-1, 1)
# Suppress ConvergenceWarning on too low distinct clusters; fix it later
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clustering = sklearn.cluster.KMeans(
n_clusters=n_clusters, random_state=0
).fit(column_vec)
distinct_clusters = len(set(clustering.labels_))
# It is not clear whether dash or dot -- use (20 wpm dot length) * 1.5 as limit
if distinct_clusters == 1:
if self.sample_rate is None:
raise UserWarning("Cannot determine whether dash or dot")
sys.stderr.write("WARNING: too little data, guessing based on 20 wpm")
sample_length = clustering.cluster_centers_[0]
is_dot = sample_length / (self.sample_rate * 60 / 1000) < 1.5
dot_label = 0 if is_dot else 1
dash_label = 1 if is_dot else 0
else:
cluster_sort_idx = np.argsort(
clustering.cluster_centers_.flatten()
).tolist()
dot_label = cluster_sort_idx.index(0)
dash_label = cluster_sort_idx.index(1)
dash_dot_map = {dot_label: ".", dash_label: "-"}
dash_dot_characters = np.vectorize(dash_dot_map.get)(clustering.labels_)
return dash_dot_characters
@staticmethod
def _break_spaces(off_samples: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""Convert array of OFF sample lengths to indices for char/word breaks
NOTE: It is expected, that the signal contains exactly three distinct
space lengths: inter-character space, character space and word space.
If the keying speed varies, or word spaces do not exist, then this
method will fail.
Args:
off_samples (np.ndarray): number of samples in each OFF period in
the signal. This comes from `MorseCode._on_off_samples`.
Returns:
tuple[np.ndarray, np.ndarray]: indices for breaking dash/dot
character array from `MorseCode._dash_dot_characters`. First
array contains positions, where character breaks should be.
Second array contains positions, where word spaces should be in
the list of already resolved morse characters.
"""
if len(off_samples) == 0:
return np.array([], dtype="int"), np.array([], dtype="int")
n_clusters = min(3, len(off_samples))
column_vec = off_samples.reshape(-1, 1)
# Suppress ConvergenceWarning on too low distinct clusters; fix it later
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clustering = sklearn.cluster.KMeans(
n_clusters=n_clusters, random_state=0
).fit(column_vec)
distinct_clusters = len(set(clustering.labels_))
cluster_sort_idx = np.argsort(clustering.cluster_centers_.flatten()).tolist()
# This index breaks dashes/dots into characters
intra_space_label = cluster_sort_idx.index(0)
char_break_idx = np.nonzero(clustering.labels_ != intra_space_label)[0] + 1
char_or_word_space_arr = clustering.labels_[
clustering.labels_ != intra_space_label
]
# This index breaks character list into word lists
if distinct_clusters == 3:
word_space_label = cluster_sort_idx.index(2)
word_space_idx = (
np.nonzero(char_or_word_space_arr == word_space_label)[0] + 1
)
else:
word_space_idx = np.array([], dtype="int")
return char_break_idx, word_space_idx
@staticmethod
def _morse_words(
raw_dash_dot: np.ndarray,
char_break_idx: np.ndarray,
word_space_idx: np.ndarray,
) -> list[list[str]]:
"""Convert character and space arrays to list of morse words
Args:
raw_dash_dot (np.ndarray): Numpy array of strings, contains
'.' and '-' characters, as processed from self.data
char_break_idx (np.ndarray): Index of locations in raw_dash_dot,
where a character space or word space would exist. The array
raw_dash_dot is first broken into characters with this index.
word_space_idx (np.ndarray): Index for breaking character array
into words. Contains locations of word spaces between natural
language characters.
Returns:
list[list[str]]: Words in morse code. A single word is a list of
dash-dot character combinations.
"""
char_start_idx = [0] + (char_break_idx).tolist()
char_end_idx = (char_break_idx).tolist() + [len(raw_dash_dot)]
morse_characters = [
"".join(raw_dash_dot[i:j].tolist())
for i, j in zip(char_start_idx, char_end_idx)
]
word_start_idx = [0] + (word_space_idx).tolist()
word_end_idx = (word_space_idx).tolist() + [len(morse_characters)]
return [morse_characters[i:j] for i, j in zip(word_start_idx, word_end_idx)]
def _translate(self, morse_words: list[list[str]]) -> str:
"""Translate list of morse-coded words to string
Args:
morse_words (list[list[str]]): List of words, having list of characters.
The characters are in morse-coded dash/dot form, e.g. '.--' for 'w'
Returns:
str: Message contained in input
"""
char_dict = self.morse_to_char
char_lists = [[char_dict.get(j, "") for j in i] for i in morse_words]
return " ".join(["".join(word) for word in char_lists])
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sklearn.preprocessing import StandardScaler
X = [[0, 15],
[1, -10]]
# scale data according to computed scaling values
print(StandardScaler().fit(X).transform(X))
|
python
|
#!/usr/bin/env python3
#Author: Stefan Toman
if __name__ == '__main__':
print("Hello, World!")
|
python
|
from l_00_inventory import inventory
import json
with open("m02_files/l_00_inventory.json", "w") as json_out:
json_out.write(json.dumps(inventory))
with open("m02_files/l_00_inventory.json", "r") as json_in:
json_inventory = json_in.read()
print("l_00_inventory.json file:", json_inventory)
print("\njson pretty version:")
print(json.dumps(json.loads(json_inventory), indent=4))
|
python
|
import datetime
import time as samay
try:
from pac import voice_io
except ModuleNotFoundError:
import voice_io
def date():
x = datetime.datetime.now().strftime("%d/%m/%Y")
voice_io.show(f"Today's date is {x} (DD/MM/YYYY)")
def time():
#x=datetime.datetime.now().strftime("%H:%M:%S")
localtime = samay.localtime()
x = samay.strftime("%I:%M:%S %p", localtime)
voice_io.show(f"The current time is {x}")
def year():
x=datetime.datetime.now().strftime("%Y")
voice_io.show(f"The current year is {x}")
def month():
x=datetime.datetime.now().strftime("%B")
voice_io.show(f"The current month is {x}")
def day():
x=datetime.datetime.now().strftime("%A")
voice_io.show(f"Today it is a {x}")
|
python
|
from settings import settings
from office365.graph.graph_client import GraphClient
def get_token_for_user(auth_ctx):
"""
Acquire token via user credentials
:type auth_ctx: adal.AuthenticationContext
"""
token = auth_ctx.acquire_token_with_username_password(
'https://graph.microsoft.com',
settings['user_credentials']['username'],
settings['user_credentials']['password'],
settings['client_credentials']['client_id'])
return token
def enum_folders_and_files(root_folder):
drive_items = root_folder.children
client.load(drive_items)
client.execute_query()
for drive_item in drive_items:
item_type = drive_item.folder.is_server_object_null and "file" or "folder"
print("Type: {0} Name: {1}".format(item_type, drive_item.name))
if not drive_item.folder.is_server_object_null and drive_item.folder.childCount > 0:
enum_folders_and_files(drive_item)
client = GraphClient(settings['tenant'], get_token_for_user)
root = client.me.drive.root
enum_folders_and_files(root)
|
python
|
import os
import pytest
import responses
from ewhs.client import EwhsClient
@pytest.fixture(scope="function")
def client():
client = EwhsClient("test", "testpassword", "9fc05c82-0552-4ca5-b588-c64d77f117a9", "ewhs")
return client
@pytest.fixture(scope="session")
def authenticated_client():
client = EwhsClient("test", "testpassword", "9fc05c82-0552-4ca5-b588-c64d77f117a9")
client.access_token = "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJpYXQiOjE2Mzg1NDM1NzUsImV4cCI6MTYzODU0NzE3NSwicm9sZXMiOlsiUk9MRV9TQ0FOTkVSIiwiUk9MRV9EQVNIQk9BUkRfUkVBRCIsIlJPTEVfREFTSEJPQVJEX1NUQVRVUyIsIlJPTEVfU1RPQ0tfUkVBRCIsIlJPTEVfU1RPQ0tfSU1QT1JUIiwiUk9MRV9TVE9DS19FWFBPUlQiLCJST0xFX1NUT0NLSElTVE9SWV9SRUFEIiwiUk9MRV9TVE9DS0hJU1RPUllfRVhQT1JUIiwiUk9MRV9NT0RJRklDQVRJT05fUkVBRCIsIlJPTEVfTU9ESUZJQ0FUSU9OX0NSRUFURSIsIlJPTEVfTU9ESUZJQ0FUSU9OX1VQREFURSIsIlJPTEVfTU9ESUZJQ0FUSU9OX0FQUFJPVkUiLCJST0xFX01PRElGSUNBVElPTl9FWFBPUlQiLCJST0xFX1RSQU5TRkVSX1JFQUQiLCJST0xFX1RSQU5TRkVSX0NSRUFURSIsIlJPTEVfVFJBTlNGRVJfVVBEQVRFIiwiUk9MRV9UUkFOU0ZFUl9ERUxFVEUiLCJST0xFX1RSQU5TRkVSX0NBTkNFTCIsIlJPTEVfVFJBTlNGRVJfVU5IT0xEIiwiUk9MRV9UUkFOU0ZFUl9VTkFTU0lHTiIsIlJPTEVfVFJBTlNGRVJfUkVWSUVXIiwiUk9MRV9UUkFOU0ZFUl9FWFBPUlQiLCJST0xFX0FSVElDTEVfUkVBRCIsIlJPTEVfQVJUSUNMRV9DUkVBVEUiLCJST0xFX0FSVElDTEVfVVBEQVRFX0JBUkNPREVTIiwiUk9MRV9BUlRJQ0xFX1VQREFURSIsIlJPTEVfQVJUSUNMRV9ERUxFVEUiLCJST0xFX0FSVElDTEVfSU1QT1JUIiwiUk9MRV9BUlRJQ0xFX1VQREFURV9ET0NVTUVOVFMiLCJST0xFX0FSVElDTEVfRVhQT1JUIiwiUk9MRV9WQVJJQU5UX1FVQVJBTlRJTkUiLCJST0xFX1NVUFBMSUVSX1JFQUQiLCJST0xFX1NVUFBMSUVSX0NSRUFURSIsIlJPTEVfU1VQUExJRVJfVVBEQVRFIiwiUk9MRV9TVVBQTElFUl9ERUxFVEUiLCJST0xFX0lOQk9VTkRfUkVBRCIsIlJPTEVfSU5CT1VORF9DUkVBVEUiLCJST0xFX0lOQk9VTkRfVVBEQVRFIiwiUk9MRV9JTkJPVU5EX0NBTkNFTCIsIlJPTEVfSU5CT1VORF9QUk9DRVNTIiwiUk9MRV9JTkJPVU5EX0NPTVBMRVRFIiwiUk9MRV9JTkJPVU5EX0VYUE9SVCIsIlJPTEVfT1JERVJfUkVBRCIsIlJPTEVfT1JERVJfQ1JFQVRFIiwiUk9MRV9PUkRFUl9VUERBVEUiLCJST0xFX09SREVSX1VQREFURV9QUk9DRVNTSU5HIiwiUk9MRV9PUkRFUl9VUERBVEVfUEFBWkwiLCJST0xFX09SREVSX1BBUlRJQUwiLCJST0xFX09SREVSX1VOSE9MRCIsIlJPTEVfT1JERVJfQ0FOQ0VMIiwiUk9MRV9PUkRFUl9DQU5DRUxfUFJPQ0VTU0lORyIsIlJPTEVfT1JERVJfUFJPQkxFTSIsIlJPTEVfT1JERVJfRVhQT1JUIiwiUk9MRV9PUkRFUl9QUklPUklUSVpFIiwiUk9MRV9PUkRFUl9JTVBPUlQiLCJST0xFX1BJQ0tMSVNUX1JFQUQiLCJST0xFX1BJQ0tMSVNUX0VYUE9SVCIsIlJPTEVfUElDS0xJU1RfVU5BU1NJR04iLCJST0xFX1BJQ0tMSVNUX1BSSU9SSVRJWkUiLCJST0xFX1NISVBNRU5UX1JFQUQiLCJST0xFX1NISVBNRU5UX1BSSU5UIiwiUk9MRV9TSElQTUVOVF9ET1dOTE9BRCIsIlJPTEVfU0hJUE1FTlRfRVhQT1JUIiwiUk9MRV9NQUlMU0hJUE1FTlRfUkVBRCIsIlJPTEVfTUFJTFNISVBNRU5UX1VQREFURSIsIlJPTEVfTUFJTFNISVBNRU5UX1BST0NFU1MiLCJST0xFX1RSQUNLSU5HREFUQV9SRUFEIiwiUk9MRV9UUkFDS0lOR0RBVEFfVVBEQVRFIiwiUk9MRV9UUkFDS0lOR0RBVEFfREVMRVRFIiwiUk9MRV9SRVRVUk5MQUJFTF9SRUFEIiwiUk9MRV9SRVRVUk5MQUJFTF9DUkVBVEUiLCJST0xFX1JFVFVSTkxBQkVMX1VQREFURSIsIlJPTEVfUkVUVVJOTEFCRUxfQ0FOQ0VMIiwiUk9MRV9QUklOVEVSX1JFQUQiLCJST0xFX1BSSU5URVJfVVBEQVRFIiwiUk9MRV9QUklOVEVSX0NSRUFURSIsIlJPTEVfUFJJTlRFUl9ERUxFVEUiLCJST0xFX1BBQ0tJTkdUQUJMRV9SRUFEIiwiUk9MRV9QQUNLSU5HVEFCTEVfQ1JFQVRFIiwiUk9MRV9QQUNLSU5HVEFCTEVfREVMRVRFIiwiUk9MRV9QQUNLSU5HVEFCTEVfVVBEQVRFIiwiUk9MRV9QQUNLSU5HVEFCTEVfVVBEQVRFX0FERFJFU1MiLCJST0xFX1BBQ0tJTkdUQUJMRV9VUERBVEVfU0hJUFBJTkdPUFRJT04iLCJST0xFX0ZJTExJTkdfUkVBRCIsIlJPTEVfRklMTElOR19FWFBPUlQiLCJST0xFX0ZJTExJTkdfSU1QT1JUIiwiUk9MRV9DT0xMT19SRUFEIiwiUk9MRV9DT0xMT19FWFBPUlQiLCJST0xFX0NPTExPX0lNUE9SVCIsIlJPTEVfQ1VTVE9NRVJfUkVBRCIsIlJPTEVfQ1VTVE9NRVJfQ1JFQVRFIiwiUk9MRV9DVVNUT01FUl9VUERBVEUiLCJST0xFX0NVU1RPTUVSX0lNUE9SVCIsIlJPTEVfQ1VTVE9NRVJfRVhQT1JUIiwiUk9MRV9DVVNUT01FUl9ERUxFVEUiLCJST0xFX0NVU1RPTUVSVVNFUl9SRUFEIiwiUk9MRV9DVVNUT01FUlVTRVJfQ1JFQVRFIiwiUk9MRV9DVVNUT01FUlVTRVJfVVBEQVRFIiwiUk9MRV9DVVNUT01FUlVTRVJfREVMRVRFIiwiUk9MRV9DVVNUT01FUlVTRVJfRVhQT1JUIiwiUk9MRV9DVVNUT01FUlVTRVJfSU1QT1JUIiwiUk9MRV9DVVNUT01FUkdST1VQX1JFQUQiLCJST0xFX0NVU1RPTUVSR1JPVVBfQ1JFQVRFIiwiUk9MRV9DVVNUT01FUkdST1VQX1VQREFURSIsIlJPTEVfQ1VTVE9NRVJHUk9VUF9ERUxFVEUiLCJST0xFX0FQSV9SRUFEIiwiUk9MRV9BUElfQ1JFQVRFIiwiUk9MRV9BUElfVVBEQVRFIiwiUk9MRV9BUElfREVMRVRFIiwiUk9MRV9SRVNUUklDVEVESVBfUkVBRCIsIlJPTEVfUkVTVFJJQ1RFRElQX1VQREFURSIsIlJPTEVfUkVTVFJJQ1RFRElQX0RFTEVURSIsIlJPTEVfRU1QTE9ZRUVfUkVBRCIsIlJPTEVfRU1QTE9ZRUVfQ1JFQVRFIiwiUk9MRV9FTVBMT1lFRV9VUERBVEUiLCJST0xFX0VNUExPWUVFX0RFTEVURSIsIlJPTEVfRU1QTE9ZRUVfRVhQT1JUIiwiUk9MRV9FTVBMT1lFRV9JTVBPUlQiLCJST0xFX0VNUExPWUVFR1JPVVBfUkVBRCIsIlJPTEVfRU1QTE9ZRUVHUk9VUF9DUkVBVEUiLCJST0xFX0VNUExPWUVFR1JPVVBfVVBEQVRFIiwiUk9MRV9FTVBMT1lFRUdST1VQX0RFTEVURSIsIlJPTEVfTE9DQVRJT05fUkVBRCIsIlJPTEVfTE9DQVRJT05fQ1JFQVRFIiwiUk9MRV9MT0NBVElPTl9VUERBVEUiLCJST0xFX0xPQ0FUSU9OX0RFTEVURSIsIlJPTEVfTE9DQVRJT05fSU1QT1JUIiwiUk9MRV9MT0NBVElPTl9FWFBPUlQiLCJST0xFX0xPQ0FUSU9OX1FVQVJBTlRJTkUiLCJST0xFX0xPQ0FUSU9OR1JPVVBfUkVBRCIsIlJPTEVfTE9DQVRJT05HUk9VUF9DUkVBVEUiLCJST0xFX0xPQ0FUSU9OR1JPVVBfVVBEQVRFIiwiUk9MRV9MT0NBVElPTkdST1VQX0RFTEVURSIsIlJPTEVfV0FSRUhPVVNFU19SRUFEIiwiUk9MRV9aT05FX1JFQUQiLCJST0xFX1pPTkVfQ1JFQVRFIiwiUk9MRV9aT05FX1VQREFURSIsIlJPTEVfWk9ORV9ERUxFVEUiLCJST0xFX1pPTkVfRVhQT1JUIiwiUk9MRV9aT05FX0lNUE9SVCIsIlJPTEVfUFJJTlRfQkFSQ09ERSIsIlJPTEVfU0hJUFBJTkdNQVRSSVhfUkVBRCIsIlJPTEVfU0hJUFBJTkdNQVRSSVhfVVBEQVRFIiwiUk9MRV9CVVNJTkVTU1JVTEVNQVRSSVhfUkVBRCIsIlJPTEVfQlVTSU5FU1NSVUxFTUFUUklYX1VQREFURSIsIlJPTEVfU0hJUFBJTkdNRVRIT0RfUkVBRCIsIlJPTEVfU0hJUFBJTkdNRVRIT0RfQ1JFQVRFIiwiUk9MRV9TSElQUElOR01FVEhPRF9VUERBVEUiLCJST0xFX0VYUE9SVF9SRUFEX0ZJTkFOQ0lBTCIsIlJPTEVfRVhQT1JUX1JFQURfQklMTElORyIsIlJPTEVfSVNTVUVfUkVBRCIsIlJPTEVfSVNTVUVfQVNTSUdOIiwiUk9MRV9JU1NVRV9DUkVBVEVfQ09NTUVOVCIsIlJPTEVfSVNTVUVfUkVBRF9DT01NRU5UIiwiUk9MRV9TSElQUElOR1RFTVBMQVRFX1JFQUQiLCJST0xFX1NISVBQSU5HVEVNUExBVEVfVVBEQVRFIiwiUk9MRV9DT05UUkFDVF9SRUFEIiwiUk9MRV9DT05UUkFDVF9DUkVBVEUiLCJST0xFX0NPTlRSQUNUX1VQREFURSIsIlJPTEVfQ1VTVE9NRVJQUklDRV9SRUFEIiwiUk9MRV9DVVNUT01FUlBSSUNFX1VQREFURSIsIlJPTEVfU0VSSUFMTlVNQkVSX1JFQUQiLCJST0xFX1NFUklBTE5VTUJFUl9FWFBPUlQiLCJST0xFX1NISVBQSU5HU09GVFdBUkVfUkVBRCIsIlJPTEVfU0hJUFBJTkdTT0ZUV0FSRV9FWFBPUlQiLCJST0xFX01JRERMRVdBUkVfUkVBRCIsIlJPTEVfSU5TVFJVQ1RJT05TX09WRVJWSUVXX1JFQUQiLCJST0xFX0lOU1RSVUNUSU9OU19ET1dOTE9BRF9BUEtfUkVBRCIsIlJPTEVfSU5TVFJVQ1RJT05TX1NFTEVDVF9XTVNfQVBQX1JFQUQiLCJST0xFX1dFQiIsIlJPTEVfVVNFUiJdLCJ1c2VybmFtZSI6ImZlcnJ5IiwidXNlcl9pZCI6IjlmZDNlZmYwLWZiMjgtMTFlNS05YzMyLWJjNWZmNGY3YWVmNCIsInVzZXJfdHlwZSI6ImVtcGxveWVlIiwiY3VzdG9tZXJfaWRzIjpbIjUzYjVhNTQzLTEyOWEtNDAzYy05YTZlLTNkOWM1MjVmZmE1YiIsImYxOTA5MDBhLWViZmQtNDI2Mi05ZGQ2LTA1ZGRkNjE3MjFiMCIsIjhjZTEwNWYwLWZjMWQtNDIzYS1iMDY2LWQ4NGM1ZWE1N2NhYSIsIjQ4ZTc3YTdhLWUwMzMtNDcxOS05MTkxLTc4YTlhMWI0NjQ5MiJdLCJyZXF1ZXN0X2lwIjoiMTkyLjE2OC4xMjguMSJ9.crcZ-2i9u1u5i3RBhV6tCMo-hrdeuQ91yDDVGT9k6iAFbF48k65RQbVPVkrIwZx9wN6hCvl6mMOOGkiLxFtweSi4nt_hGZeCsuieypQHZxf3MCdwo0zKtb0M8NmBB--D7_AvHWqcz6IEgoXMUtYLOkab4BPVdZlHmegbf7qRtNZlaKRVXPqgn3ReiPVvX_TGdK74VEXZzWPStoTxJwVkFvCFV9RFfYb_b9BgfTaSDJAYGFmSE-QxbW1K4TQBgUjuUAQSRh-y5diw4nuY9VJgcJ2LAD6ZX19do1zFCsc8zq2KUoTppPV9xO8WpOdxlXKGLu3rwfvLV9clhrc9ogmEAYF7UDcJkwgL5nHEfmsAD602T6_NtMjwP1dhTL9OeRz6oJwNRUb3hSe6uG7hvhlE7X-O8GwCafyWX8vgGT0D1NPh5ehwFsh8oc57M-W5PczDwZwQJ99jdHcAFRcsEKMJpKrs1G2LYAqDMS38i6IbZghPqN88Cnc6cpPfWVI6rs1BPZ4DxRBkQkXLWdamAVck6mCpW1QOA-YnNbmLn16d88PeMhzt7TN_jJfi0VAf2BK1DEbdy2sdSoqm3kCWqSzG11hTDLjvbpvJ0rCby7kz4c47qyxzxhyYOCBD4Rns9bNRW2xbE4BSJ0eKMeacaaWNQX0LeUaQy2Q6qPCVPO-hxAo"
client.refresh_token = "91aff30e4f3bb35b923892e525bd848ab88cf68d9669b5ccf07ae0262934b43a67cf7df89ef6213ddbb47c400c1b2c32e4d9178790caa1420e28a94b892addb3"
client.expires_at = 2638547175
return client
class ImprovedRequestsMock(responses.RequestsMock):
"""Wrapper adding a few shorthands to responses.RequestMock."""
def get(self, url, filename, status=200):
"""Setup a mock response for a GET request."""
body = self._get_body(filename)
self.add(responses.GET, url, body=body, status=status, content_type="application/json")
def post(self, url, filename, status=200):
"""Setup a mock response for a POST request."""
body = self._get_body(filename)
self.add(responses.POST, url, body=body, status=status, content_type="application/json")
def delete(self, url, filename, status=204):
"""Setup a mock response for a DELETE request."""
body = self._get_body(filename)
self.add(responses.DELETE, url, body=body, status=status, content_type="application/json")
def patch(self, url, filename, status=200):
"""Setup a mock response for a PATCH request."""
body = self._get_body(filename)
self.add(responses.PATCH, url, body=body, status=status, content_type="application/json")
def _get_body(self, filename):
"""Read the response fixture file and return it."""
file = os.path.join(os.path.dirname(__file__), "responses", f"{filename}.json")
with open(file, encoding="utf-8") as f:
return f.read()
@pytest.fixture
def response():
"""Setup the responses fixture."""
with ImprovedRequestsMock() as mock:
yield mock
|
python
|
# -*- coding: utf-8 -*-
"""
chanjo.cli
~~~~~~~~~~~
Command line interface (console entry points). Based on Click_.
.. _Click: http://click.pocoo.org/
"""
from __future__ import absolute_import, unicode_literals
from pkg_resources import iter_entry_points
import click
from . import __version__
from ._compat import text_type
from .config import Config, config_file_name, markup
from .store import Store
@click.group()
@click.option(
'-c', '--config',
default=config_file_name,
type=click.File('w', encoding='utf-8'),
help='path to config file')
@click.option('--db', type=text_type, help='path/URI of the SQL database')
@click.option(
'-d', '--dialect',
type=click.Choice(['sqlite', 'mysql']),
help='type of SQL database')
@click.version_option(__version__)
@click.pass_context
def cli(context, config, db, dialect):
"""Clinical sequencing coverage analysis tool."""
# avoid setting global defaults in Click options, do it below when
# updating the config object
context.obj = Config(config, markup=markup)
# global defaults
db_path = db or context.obj.get('db', 'coverage.sqlite3')
db_dialect = dialect or context.obj.get('dialect', 'sqlite')
context.db = Store(db_path, dialect=db_dialect)
# update the context with new defaults from the config file
context.default_map = context.obj
# add subcommands dynamically to the CLI
for entry_point in iter_entry_points('chanjo.subcommands'):
cli.add_command(entry_point.load())
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.