content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
DRB1_1385_9 = {0: {'A': -999.0, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': -0.004754, 'I': -0.99525, 'H': -999.0, 'K': -999.0, 'M': -0.99525, 'L': -0.99525, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': -0.004754, 'V': -0.99525, 'Y': -0.004754}, 1: {'A': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'E': -1.4251, 'D': -1.5135, 'G': -1.5724, 'F': 0.54328, 'I': 0.26645, 'H': 0.26629, 'K': 0.082601, 'M': 0.91659, 'L': 0.78109, 'N': 0.036182, 'Q': 0.0014865, 'P': -1.5914, 'S': -0.64719, 'R': -0.2678, 'T': -0.81058, 'W': 0.22027, 'V': -0.1439, 'Y': -0.18922}, 4: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'E': -1.4081, 'D': -2.3885, 'G': -0.70585, 'F': -1.3969, 'I': 0.69291, 'H': -0.11092, 'K': 1.2687, 'M': -0.90111, 'L': 0.18921, 'N': -0.58393, 'Q': -0.31017, 'P': 0.49539, 'S': -0.090593, 'R': 0.97197, 'T': 0.8083, 'W': -1.3962, 'V': 1.1966, 'Y': -1.3998}, 6: {'A': 0.0, 'E': -1.0872, 'D': -1.7521, 'G': -0.91274, 'F': 0.16734, 'I': 0.090774, 'H': -0.091681, 'K': -0.29398, 'M': 0.48662, 'L': 0.57886, 'N': -0.14347, 'Q': -0.26554, 'P': -0.57386, 'S': -0.69106, 'R': 0.26585, 'T': -0.86328, 'W': -0.041585, 'V': -0.15572, 'Y': -0.14029}, 7: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'E': -0.54182, 'D': -0.78869, 'G': 0.1478, 'F': 0.55352, 'I': 0.43948, 'H': -0.38613, 'K': -0.2285, 'M': 0.82817, 'L': -0.20101, 'N': -0.73258, 'Q': -0.073797, 'P': -0.48481, 'S': 1.0175, 'R': 0.22077, 'T': -0.6178, 'W': -0.99494, 'V': 0.11956, 'Y': 0.066112}} | python |
from linghelper.phonetics.praat import PraatLoader
from linghelper.phonetics.praat.helper import to_time_based_dict
from scipy.interpolate import interp1d
from numpy import vstack,array
def interpolate_pitch(pitch_track):
defined_keys = [k for k in sorted(pitch_track.keys()) if pitch_track[k]['Pitch'] != '--undefined--']
x = array(defined_keys)
y = array([ pitch_track[k]['Pitch'] for k in defined_keys])
if len(x) == 0:
return None
times = list(filter(lambda z: z >= min(x) and z <= max(x),defined_keys))
f = interp1d(x,y)
return f(times)
def get_intensity_spline(intensity_track):
y = array([ intensity_track[k]['Intensity'] for k in sorted(intensity_track.keys()) if intensity_track[k]['Intensity'] != '--undefined--'])
return y
def interpolate_prosody(pitch,intensity):
defined_keys = [k for k in sorted(pitch.keys()) if pitch[k]['Pitch'] != '--undefined--']
x = array(defined_keys)
y = array([ pitch[k]['Pitch'] for k in defined_keys])
if len(x) == 0:
return None
times = list(filter(lambda z: z >= min(x) and z <= max(x),defined_keys))
p = interp1d(x,y)
x = list(sorted(intensity.keys()))
y =[intensity[k]['Intensity'] for k in x]
i = interp1d(x, y)
pitch_spline = p(times)
intensity_spline = i(times)
return vstack((pitch_spline,intensity_spline)).T
def to_pitch(filename,time_step):
p = PraatLoader()
output = p.run_script('pitch.praat', filename,time_step)
try:
pitch = to_time_based_dict(output)
except IndexError:
return None
pitch_spline = interpolate_pitch(pitch)
if pitch_spline is None:
return None
return pitch_spline.T
def to_intensity(filename,time_step):
p = PraatLoader()
output = p.run_script('intensity.praat', filename,time_step)
intensity = to_time_based_dict(output)
intensity_spline = get_intensity_spline(intensity)
return intensity_spline.T
def to_prosody(filename,time_step):
p = PraatLoader()
output = p.run_script('pitch.praat', filename,time_step)
try:
pitch = to_time_based_dict(output)
except IndexError:
return None
output = p.run_script('intensity.praat', filename,time_step)
intensity = to_time_based_dict(output)
prosody = interpolate_prosody(pitch,intensity)
return prosody
| python |
import sys
import web_tests.create_test_suite as tests
import web_tests.csv2_runner as csv2_runner
def main(gvar):
# setup to run Chromium tests
runner = csv2_runner.Csv2TestRunner(verbosity=2, gvar=gvar)
suite = tests.chromium_test_suite()
runner.run(suite)
print()
if __name__ == "__main__":
main(None)
| python |
from payment.payment_interface import PaymentInterface
from rest_framework.test import APITestCase
class TestPaymentInterface(APITestCase):
def test_get(self):
res = PaymentInterface.get('https://api.paystack.co/bank')
self.assertEquals(res.get('status'), True)
def test_get_with_auth(self):
res = PaymentInterface.get_with_auth(
'https://api.paystack.co/bank/resolve?account_number=310484182&bank_code=011')
self.assertEquals(res.get('status'), False)
| python |
import numpy as np
from multiprocessing import Pool
from multiprocessing import cpu_count
_user_input = None
_item_input = None
_labels = None
_batch_size = None
_index = None
_dataset = None
# input: dataset(Mat, List, Rating, Negatives), batch_choice, num_negatives
# output: [_user_input_list, _item_input_list, _labels_list]
def sampling(dataset, num_negatives):
_user_input, _item_input, _labels = [], [], []
num_users, num_items = dataset.trainMatrix.shape
for (u, i) in dataset.trainMatrix.keys():
# positive instance
_user_input.append(u)
_item_input.append(i)
_labels.append(1)
# negative instances
for t in xrange(num_negatives):
j = np.random.randint(num_items)
while dataset.trainMatrix.has_key((u, j)):
j = np.random.randint(num_items)
_user_input.append(u)
_item_input.append(j)
_labels.append(0)
return _user_input, _item_input, _labels
def shuffle(samples, batch_size, dataset):
global _user_input
global _item_input
global _labels
global _batch_size
global _index
global _dataset
_user_input, _item_input, _labels = samples
_batch_size = batch_size
_dataset = dataset
_index = range(len(_labels))
np.random.shuffle(_index)
num_batch = len(_labels) // _batch_size
pool = Pool(cpu_count())
res = pool.map(_get_train_batch, range(num_batch))
pool.close()
pool.join()
user_list = [r[0] for r in res]
num_idx = [r[1] for r in res]
item_list = [r[2] for r in res]
labels_list = [r[3] for r in res]
return user_list, num_idx, item_list, labels_list
def _get_train_batch(i):
user_batch, num_batch, item_batch, labels_batch = [], [], [], []
begin = i * _batch_size
trainList = _dataset.trainList
num_items = _dataset.num_items
for idx in range(begin, begin + _batch_size):
user_idx = _user_input[_index[idx]]
item_idx = _item_input[_index[idx]]
nonzero_row = []
nonzero_row += trainList[user_idx]
num_batch.append(_remove_item(num_items, nonzero_row, item_idx))
user_batch.append(nonzero_row)
item_batch.append(item_idx)
labels_batch.append(_labels[_index[idx]])
return np.array(_add_mask(num_items, user_batch, max(num_batch))), np.array(num_batch), np.array(item_batch), np.array(labels_batch)
def _remove_item(feature_mask, users, item):
flag = 0
for i in range(len(users)):
if users[i] == item:
users[i] = users[-1]
users[-1] = feature_mask
flag = 1
break
return len(users) - flag
def _add_mask(feature_mask, features, num_max):
# uniformalize the length of each batch
for i in xrange(len(features)):
features[i] = features[i] + [feature_mask] * (num_max + 1 - len(features[i]))
return features
| python |
#!/usr/bin/python3
import numpy as np
from os.path import join as pjoin
from os import linesep
from shutil import copyfile
from scipy.io import mmwrite
from scipy.sparse import coo_matrix
import gzip
diri='data/raw'
diro='data/de'
key='celltype'
values=['dysfunctional','naive']
#Load covariate info
dc=np.loadtxt(pjoin(diri,'cov.tsv.gz'),delimiter='\t')
with open(pjoin(diri,'cov.txt'),'r') as f:
namec=f.readlines()
namec=np.array([x.strip() for x in namec])
namecdict=dict(zip(namec,range(len(namec))))
#Select cells for DE
ids=[namecdict[key+'='+x] for x in values]
ids=dc[ids].astype(bool)
assert ids.any(axis=1).all()
ida=ids.any(axis=0)
#Process covariates
namecn_id=np.array([namecdict[x] for x in filter(lambda x:not x.startswith(key+'='),namec)])
dcn=dc[namecn_id][:,ida]
#Remove single-valued covariates
t1=[len(np.unique(x))>1 for x in dcn]
namecn_id=namecn_id[t1]
dcn=dcn[t1]
namecn=namec[namecn_id]
#Output covariates
np.savetxt(pjoin(diro,'0_cov.tsv.gz'),dcn,delimiter='\t',fmt="%.8G")
with open(pjoin(diro,'0_cov.txt'),'w') as f:
f.write(linesep.join(namecn))
del namecn,dcn
#Process cells
with open(pjoin(diri,'cell.txt'),'r') as f:
names=f.readlines()
names=np.array([x.strip() for x in names])
namesn=names[ida]
with open(pjoin(diro,'0_cell.txt'),'w') as f:
f.write(linesep.join(namesn))
#Process transcriptome
dt=np.loadtxt(pjoin(diri,'read.tsv.gz'),delimiter='\t')
dtn=dt[:,ida]
dtn=coo_matrix(dtn)
with gzip.open(pjoin(diro,'0_read.mtx.gz'),'w') as f:
mmwrite(f,dtn,field='integer')
#Process grouping
dg=np.zeros(len(ida),dtype=int)
dg[ids[0]]=1
dg=dg[ida].astype(int)
#Output grouping
np.savetxt(pjoin(diro,'0_group.tsv.gz'),dg,delimiter='\t',fmt="%u")
#Copy genes
copyfile(pjoin(diri,'gene.txt'),pjoin(diro,'0_gene.txt'))
| python |
__version__ = "0.3.2"
__api_version__ = "0.10.1"
| python |
from aiocloudflare.commons.auth import Auth
class Dnssec(Auth):
_endpoint1 = "zones"
_endpoint2 = "dnssec"
_endpoint3 = None
| python |
#reference: https://github.com/val-iisc/capnet/blob/master/src/proj_codes.py
from __future__ import division
import math
import numpy as np
import torch
import utils.network_utils
class Projector(torch.nn.Module):
'''
Project the 3D point cloud to 2D plane
args:
xyz: float tensor, (BS,N_PTS,3); input point cloud
values assumed to be in (-1,1)
az: float tensor, (BS); azimuthal angle of camera in radians
el: float tensor, (BS); elevation of camera in radians
N_PTS: float, (); number of points in point cloud
returns:
grid_val: float, (N_batch,H,W);
output silhouette
'''
def __init__(self, cfg):
super(Projector, self).__init__()
# self.batch_size = cfg.CONST.BATCH_SIZE
self.cfg = cfg
self.n_pts = cfg.CONST.NUM_POINTS
self.grid_h = cfg.PROJECTION.GRID_H
self.grid_w = cfg.PROJECTION.GRID_W
self.sigma_sq_cont = cfg.PROJECTION.SIGMA_SQ_CONT
self.sigma_sq_disc = cfg.PROJECTION.SIGMA_SQ_DISC
def forward(self, xyz, az, el):
# World co-ordinates to camera co-ordinates
batch_size = xyz.size(0)
pcl_out_rot = self.world2cam(xyz, az, el, batch_size=batch_size, N_PTS=self.n_pts)
# Perspective transform
pcl_out_persp = self.perspective_transform(pcl_out_rot, batch_size=batch_size, grid_h=self.grid_h, grid_w=self.grid_w)
if self.cfg.SUPERVISION_2D.PROJ_TYPE == "CONT":
proj_pred = self.cont_proj(pcl_out_persp, grid_h=self.grid_h, grid_w=self.grid_w, sigma_sq=self.sigma_sq_cont)
elif self.cfg.SUPERVISION_2D.PROJ_TYPE == "DISC":
proj_pred = self.cont_proj(pcl_out_persp, grid_h=self.grid_h, grid_w=self.grid_w, sigma_sq=self.sigma_sq_disc)
return proj_pred
def cont_proj(self, pcl, grid_h, grid_w, sigma_sq=0.5):
'''
Continuous approximation of Orthographic projection of point cloud
to obtain Silhouette
args:
pcl: float, (N_batch,N_PTS,3); input point cloud
values assumed to be in (-1,1)
grid_h, grid_w: int, ();
output depth map height and width
returns:
grid_val: float, (N_batch,H,W);
output silhouette
'''
x, y, z = pcl.chunk(3, dim=2) # divide to three parts
pcl_norm = torch.cat([x, y, z], dim=2)
pcl_xy = torch.cat([x,y], dim=2) #(BS, N_PTS, 2)
out_grid = torch.meshgrid(torch.arange(0, grid_h), torch.arange(0, grid_w))
out_grid = [out_grid[0].type(torch.FloatTensor), out_grid[1].type(torch.FloatTensor)]
grid_z = torch.unsqueeze(torch.zeros_like(out_grid[0]), 2) # (H,W,1)
grid_xyz = torch.cat([torch.stack(out_grid, 2), grid_z], dim=2) # (H,W,3)
grid_xy = torch.stack(out_grid, 2) # (H,W,2)
grid_xy = utils.network_utils.var_or_cuda(grid_xy)
grid_diff = torch.unsqueeze(torch.unsqueeze(pcl_xy, 2), 2) - grid_xy # (BS,N_PTS,H,W,2)
grid_val = self.apply_kernel(grid_diff, sigma_sq) # (BS,N_PTS,H,W,2)
grid_val = grid_val[:,:,:,:,0]*grid_val[:,:,:,:,1] # (BS,N_PTS,H,W)
grid_val = torch.sum(grid_val, dim=1) # (BS,H,W)
grid_val = torch.tanh(grid_val)
return grid_val
"""
def disc_proj(self, pcl, grid_h, grid_w):
'''
Discrete Orthographic projection of point cloud
to obtain Silhouette
Handles only batch size 1 for now
args:
pcl: float, (N_batch,N_Pts,3); input point cloud
values assumed to be in (-1,1)
grid_h, grid_w: int, ();
output depth map height and width
returns:
grid_val: float, (N_batch,H,W); output silhouette
'''
x, y, z = pcl.chunk(3, dim=2) # divide to three parts
pcl_norm = torch.cat([x, y, z], dim=2)
pcl_xy = torch.cat([x,y], dim=2)
2048, 2
xy_indices = pcl_xy[0].long()
xy_values = torch.ones_like(xy_indices)
print(pcl_xy.requires_grad)
print(xy_indices.requires_grad)
print(xy_values.requires_grad)
xy_shape = torch.zeros((grid_h, grid_w), dtype=xy_values.dtype)
xy_shape = utils.network_utils.var_or_cuda(xy_shape)
# xy_shape[xy_indices[:,0], xy_indices[:,1]] = 1.
# out_grid = torch.unsqueeze(xy_shape, 0)
out_grid = xy_shape
out_grid = torch.unsqueeze(xy_shape, 0)
print("grad:", out_grid.requires_grad)
return out_grid
"""
def apply_kernel(self, x, sigma_sq=0.5):
'''
Get the un-normalized gaussian kernel with point co-ordinates as mean and
variance sigma_sq
args:
x: float, (BS,N_PTS,H,W,2); mean subtracted grid input
sigma_sq: float, (); variance of gaussian kernel
returns:
out: float, (BS,N_PTS,H,W,2); gaussian kernel
'''
out = (torch.exp(-(x**2)/(2.*sigma_sq)))
return out
def perspective_transform(self, xyz, batch_size, grid_h, grid_w):
'''
Perspective transform of pcl; Intrinsic camera parameters are assumed to be
known (here, obtained using parameters of GT image renderer, i.e. Blender)
Here, output grid size is assumed to be (64,64) in the K matrix
TODO: use output grid size as argument
args:
xyz: float, (BS,N_PTS,3); input point cloud
values assumed to be in (-1,1)
returns:
xyz_out: float, (BS,N_PTS,3); perspective transformed point cloud
'''
alpha_u = 60. * float(grid_h)/32.
alpha_v = 60. * float(grid_w)/32.
u_0 = float(grid_h)/2.
v_0 = float(grid_w)/2.
K = np.array([
[alpha_u, 0., -u_0],
[0., alpha_v, -v_0],
[0., 0., 1.]]).astype(np.float32)
K = np.expand_dims(K, 0)
K = np.tile(K, [batch_size,1,1])
K = torch.from_numpy(K)
K = utils.network_utils.var_or_cuda(K)
xyz_out = torch.matmul(K, xyz.permute(0, 2, 1))
xy_out = xyz_out[:,:2]/abs(torch.unsqueeze(xyz[:,:,2],1))
xyz_out = torch.cat([xy_out, abs(xyz_out[:,2:])],dim=1)
return xyz_out.permute(0, 2, 1)
def world2cam(self, xyz, az, el, batch_size, N_PTS=1024):
'''
Convert pcl from world co-ordinates to camera co-ordinates,
the rotation matrix is different from capnet, inorder to
fit the training data orientation.
in capnet: chair face to z axis
here: chair face to x axis
args:
xyz: float tensor, (BS,N_PTS,3); input point cloud
values assumed to be in (-1,1)
az: float tensor, (BS); azimuthal angle of camera in radians
el: float tensor, (BS); elevation of camera in radians
batch_size: int, (); batch size
N_PTS: float, (); number of points in point cloud
returns:
xyz_out: float tensor, (BS,N_PTS,3); output point cloud in camera
co-ordinates
'''
# Camera origin calculation - az,el,d to 3D co-ord
# Rotation
"""
rotmat_az=[
[torch.ones_like(az),torch.zeros_like(az),torch.zeros_like(az)],
[torch.zeros_like(az),torch.cos(az),-torch.sin(az)],
[torch.zeros_like(az),torch.sin(az),torch.cos(az)]
]
"""
# y ---> x
rotmat_az=[
[torch.cos(az),torch.sin(az),torch.zeros_like(az)],
[-torch.sin(az),torch.cos(az),torch.zeros_like(az)],
[torch.zeros_like(az),torch.zeros_like(az), torch.ones_like(az)]
]
rotmat_az = [ torch.stack(x) for x in rotmat_az ]
# z ---> x, in dataloader, az = original az - 90 degree, which means here is actually x ----> -z
rotmat_el=[
[torch.cos(el),torch.zeros_like(az), torch.sin(el)],
[torch.zeros_like(az),torch.ones_like(az),torch.zeros_like(az)],
[-torch.sin(el),torch.zeros_like(az), torch.cos(el)]
]
rotmat_el = [ torch.stack(x) for x in rotmat_el ]
rotmat_az = torch.stack(rotmat_az, 0) # [3,3,B]
rotmat_el = torch.stack(rotmat_el, 0) # [3,3,B]
rotmat_az = rotmat_az.permute(2, 0, 1) # [B,3,3]
rotmat_el = rotmat_el.permute(2, 0, 1) # [B,3,3]
rotmat = torch.matmul(rotmat_el, rotmat_az)
# Transformation(t)
# Distance of object from camera - fixed to 2
d = 2.
# Calculate translation params
tx, ty, tz = [0, 0, d]
tr_mat = torch.unsqueeze(torch.tensor([tx, ty, tz]), 0).repeat(batch_size,1) # [B,3]
tr_mat = torch.unsqueeze(tr_mat,2) # [B,3,1]
tr_mat = tr_mat.permute(0, 2, 1) # [B,1,3]
tr_mat = tr_mat.repeat(1, N_PTS, 1) # [B,1024,3]
tr_mat = utils.network_utils.var_or_cuda(tr_mat) # [B,1024,3]
xyz_out = torch.matmul(rotmat, xyz.permute(0, 2, 1)) - tr_mat.permute(0, 2, 1)
return xyz_out.permute(0, 2, 1)
| python |
#!/usr/bin/python
#--2 and 3--
__author__ = "gray"
__date__ = "20171228"
__version__ = "1.0.2"
__aim__ = """
GetData.py for miseq pipeline CHSLAB used
Copy file,
Rename file,
unzip file > for QC used
input:
sample sheet
project Dir (Target Dir)
[sample sheet] format
RawSampleName\tNewSampleName[marker]
"""
import sys
import os
import subprocess as sup
def GetData(SampleSheet, TargetDir="./"):
#check SampleSheet
if os.path.exists(SampleSheet):
pass
else:
print("No Find:"+SampleSheet)
sys.exit(1)
#--------
with open(SampleSheet,"r") as Fr:
#no header
content = Fr.readlines()
for line in content:
item = line.strip().split("\t")
Oripath = item[0]
Marker = item[1]
#---cp change name, (with gz file)
Comd = "cp "+Oripath+" "+TargetDir+"/"+Marker+".fastq.gz"
print(Comd)
sup.call(Comd, shell=True)
#
if __name__ == "__main__":
SampleSheet = sys.argv[1]
TargetDir = sys.argv[2]
#check dir
if os.path.exists(TargetDir):
pass
else:
sup.call("mkdir -p "+TargetDir,shell=True)
GetData(SampleSheet, TargetDir)
| python |
# Standard Library
import json
import os
import pstats
import shutil
import time
from multiprocessing.pool import ThreadPool
# Third Party
import boto3
import pandas as pd
import pytest
# First Party
from smdebug.core.access_layer.utils import is_s3
from smdebug.profiler.analysis.python_profile_analysis import PyinstrumentAnalysis, cProfileAnalysis
from smdebug.profiler.profiler_constants import (
CONVERT_TO_MICROSECS,
CPROFILE_NAME,
CPROFILE_STATS_FILENAME,
PYINSTRUMENT_HTML_FILENAME,
PYINSTRUMENT_JSON_FILENAME,
PYINSTRUMENT_NAME,
)
from smdebug.profiler.python_profile_utils import PythonProfileModes, StepPhase
from smdebug.profiler.python_profiler import (
PyinstrumentPythonProfiler,
cProfilePythonProfiler,
cProfileTimer,
)
@pytest.fixture
def test_framework():
return "test-framework"
@pytest.fixture()
def cprofile_python_profiler(out_dir, test_framework):
return cProfilePythonProfiler(out_dir, test_framework, cProfileTimer.TOTAL_TIME)
@pytest.fixture()
def pyinstrument_python_profiler(out_dir, test_framework):
return PyinstrumentPythonProfiler(out_dir, test_framework)
@pytest.fixture()
def framework_dir(out_dir, test_framework):
return "{0}/framework/{1}".format(out_dir, test_framework)
@pytest.fixture(autouse=True)
def reset_python_profiler_dir(framework_dir):
shutil.rmtree(framework_dir, ignore_errors=True)
@pytest.fixture(scope="session")
def bucket_prefix():
return f"s3://smdebug-testing/resources/python_profile/{int(time.time())}"
def pre_step_zero_function():
time.sleep(
0.0011
) # stall long enough to be recorded by pyinstrument, which records every 0.001 seconds
def start_end_step_function():
time.sleep(
0.0011
) # stall long enough to be recorded by pyinstrument, which records every 0.001 seconds
def end_start_step_function():
time.sleep(
0.0011
) # stall long enough to be recorded by pyinstrument, which records every 0.001 seconds
def between_modes_function():
time.sleep(
0.0011
) # stall long enough to be recorded by pyinstrument, which records every 0.001 seconds
def eval_function():
time.sleep(
0.0011
) # stall long enough to be recorded by pyinstrument, which records every 0.001 seconds
def post_hook_close_function():
time.sleep(
0.0011
) # stall long enough to be recorded by pyinstrument, which records every 0.001 seconds
def time_function():
time.sleep(
0.0011
) # stall long enough to be recorded by pyinstrument, which records every 0.001 seconds
def _upload_s3_folder(bucket, key, folder):
s3_client = boto3.client("s3")
filenames = []
for root, _, files in os.walk(folder):
for file in files:
node_id = os.path.basename(os.path.dirname(root))
stats_dir = os.path.basename(root)
full_key = os.path.join(key, node_id, stats_dir, file)
filenames.append((os.path.join(root, file), bucket, full_key))
def upload_files(args):
s3_client.upload_file(*args)
pool = ThreadPool(processes=10)
pool.map(upload_files, filenames)
def _validate_analysis(profiler_name, stats, expected_functions):
function_names = [
pre_step_zero_function.__name__,
start_end_step_function.__name__,
end_start_step_function.__name__,
between_modes_function.__name__,
eval_function.__name__,
post_hook_close_function.__name__,
time_function.__name__,
]
assert stats is not None, "No stats found!"
for analysis_function in function_names:
if profiler_name == CPROFILE_NAME:
function_stats_list = stats.function_stats_list
assert len(function_stats_list) > 0
if analysis_function in expected_functions:
assert any(
[analysis_function in stat.function_name for stat in function_stats_list]
), f"{analysis_function} should be found in function stats!"
else:
assert all(
[analysis_function not in stat.function_name for stat in function_stats_list]
), f"{analysis_function} should not be found in function stats!"
else:
assert len(stats) == 1
actual_functions = map(
lambda x: x["function"], stats[0].json_stats["root_frame"]["children"]
)
assert set(actual_functions) == set(expected_functions)
@pytest.mark.parametrize("use_pyinstrument", [False, True])
@pytest.mark.parametrize("steps", [(1, 2), (1, 5)])
def test_python_profiling(
use_pyinstrument, cprofile_python_profiler, pyinstrument_python_profiler, framework_dir, steps
):
if use_pyinstrument:
python_profiler = pyinstrument_python_profiler
profiler_name = PYINSTRUMENT_NAME
allowed_files = [PYINSTRUMENT_JSON_FILENAME, PYINSTRUMENT_HTML_FILENAME]
else:
python_profiler = cprofile_python_profiler
profiler_name = CPROFILE_NAME
allowed_files = [CPROFILE_STATS_FILENAME]
python_stats_dir = os.path.join(framework_dir, profiler_name)
start_step, end_step = steps
current_step = start_step
while current_step < end_step:
python_profiler.start_profiling(StepPhase.STEP_START, start_step=current_step)
assert python_profiler._start_step == current_step
assert python_profiler._start_phase == StepPhase.STEP_START
python_profiler.stop_profiling(StepPhase.STEP_END, current_step)
current_step += 1
# Test that directory and corresponding files exist.
assert os.path.isdir(python_stats_dir)
for node_id in os.listdir(python_stats_dir):
node_dir_path = os.path.join(python_stats_dir, node_id)
stats_dirs = os.listdir(node_dir_path)
assert len(stats_dirs) == (end_step - start_step)
for stats_dir in stats_dirs:
# Validate that the expected files are in the stats dir
stats_dir_path = os.path.join(node_dir_path, stats_dir)
stats_files = os.listdir(stats_dir_path)
assert set(stats_files) == set(allowed_files)
# Validate the actual stats files
for stats_file in stats_files:
stats_path = os.path.join(stats_dir_path, stats_file)
if stats_file == CPROFILE_STATS_FILENAME:
assert pstats.Stats(stats_path)
elif stats_file == PYINSTRUMENT_JSON_FILENAME:
with open(stats_path, "r") as f:
assert json.load(f)
@pytest.mark.parametrize("use_pyinstrument", [False, True])
@pytest.mark.parametrize("s3", [False, True])
def test_python_analysis(
use_pyinstrument,
cprofile_python_profiler,
pyinstrument_python_profiler,
framework_dir,
test_framework,
bucket_prefix,
s3,
):
"""
This test is meant to test that the cProfile/pyinstrument analysis retrieves the correct step's stats based on the
specified interval. Stats are either retrieved from s3 or generated manually through python profiling.
"""
if use_pyinstrument:
python_profiler = pyinstrument_python_profiler
analysis_class = PyinstrumentAnalysis
profiler_name = PYINSTRUMENT_NAME
num_expected_files = 14
else:
python_profiler = cprofile_python_profiler
analysis_class = cProfileAnalysis
profiler_name = CPROFILE_NAME
num_expected_files = 7
python_stats_dir = os.path.join(framework_dir, profiler_name)
if s3:
# Fetch stats from s3
os.makedirs(python_stats_dir)
python_profile_analysis = analysis_class(
local_profile_dir=python_stats_dir, s3_path=bucket_prefix
)
else:
# Do analysis and use those stats.
# pre_step_zero_function is called in between the start of the script and the start of first step of TRAIN.
python_profiler.start_profiling(StepPhase.START)
pre_step_zero_function()
python_profiler.stop_profiling(
StepPhase.STEP_START, end_mode=PythonProfileModes.TRAIN, end_step=1
)
# start_end_step_function is called in between the start and end of first step of TRAIN.
python_profiler.start_profiling(
StepPhase.STEP_START, start_mode=PythonProfileModes.TRAIN, start_step=1
)
start_end_step_function()
python_profiler.stop_profiling(
StepPhase.STEP_END, end_mode=PythonProfileModes.TRAIN, end_step=1
)
# end_start_step_function is called in between the end of first step and the start of second step of TRAIN.
python_profiler.start_profiling(
StepPhase.STEP_END, start_mode=PythonProfileModes.TRAIN, start_step=1
)
end_start_step_function()
python_profiler.stop_profiling(
StepPhase.STEP_START, end_mode=PythonProfileModes.TRAIN, end_step=2
)
# train_and_eval function is called in between the TRAIN and EVAL modes.
python_profiler.start_profiling(
StepPhase.STEP_END, start_mode=PythonProfileModes.TRAIN, start_step=1
)
between_modes_function()
python_profiler.stop_profiling(
StepPhase.STEP_START, end_mode=PythonProfileModes.EVAL, end_step=1
)
# eval function is called in between the start and end of first step of EVAL.
python_profiler.start_profiling(
StepPhase.STEP_START, start_mode=PythonProfileModes.EVAL, start_step=1
)
eval_function()
python_profiler.stop_profiling(
StepPhase.STEP_END, end_mode=PythonProfileModes.EVAL, end_step=1
)
# post_hook_close_function is called in between the end of the last step of EVAL and the end of the script.
python_profiler.start_profiling(
StepPhase.STEP_END, start_mode=PythonProfileModes.EVAL, start_step=1
)
post_hook_close_function()
python_profiler.stop_profiling(StepPhase.END)
# time function is called in between start and end of second step of TRAIN.
# NOTE: This needs to be profiled last for tests to pass.
python_profiler.start_profiling(
StepPhase.STEP_START, start_mode=PythonProfileModes.TRAIN, start_step=2
)
time_function()
python_profiler.stop_profiling(
StepPhase.STEP_END, end_mode=PythonProfileModes.TRAIN, end_step=2
)
python_profile_analysis = analysis_class(local_profile_dir=python_stats_dir)
_, bucket, prefix = is_s3(bucket_prefix)
key = os.path.join(prefix, "framework", test_framework, profiler_name)
_upload_s3_folder(bucket, key, python_stats_dir)
python_profile_stats_df = python_profile_analysis.list_profile_stats()
assert isinstance(python_profile_stats_df, pd.DataFrame)
assert python_profile_stats_df.shape[0] == num_expected_files
# Test that pre_step_zero_function call is recorded in received stats, but not the other functions.
stats = python_profile_analysis.fetch_pre_step_zero_profile_stats(refresh_stats=False)
_validate_analysis(profiler_name, stats, [pre_step_zero_function.__name__])
# Test that start_end_step_function call is recorded in received stats, but not the other functions.
stats = python_profile_analysis.fetch_profile_stats_by_step(1, refresh_stats=False)
_validate_analysis(profiler_name, stats, [start_end_step_function.__name__])
# Test that end_start_step_function call is recorded in received stats, but not the other functions.
stats = python_profile_analysis.fetch_profile_stats_by_step(
1,
end_step=2,
start_phase=StepPhase.STEP_END,
end_phase=StepPhase.STEP_START,
refresh_stats=False,
)
_validate_analysis(profiler_name, stats, [end_start_step_function.__name__])
# Test that train_and_eval_function call is recorded in received stats, but not the other functions.
stats = python_profile_analysis.fetch_profile_stats_between_modes(
PythonProfileModes.TRAIN, PythonProfileModes.EVAL, refresh_stats=False
)
_validate_analysis(profiler_name, stats, [between_modes_function.__name__])
# Test that eval_function call is recorded in received stats, but not the other functions.
stats = python_profile_analysis.fetch_profile_stats_by_step(
1, mode=PythonProfileModes.EVAL, refresh_stats=False
)
_validate_analysis(profiler_name, stats, [eval_function.__name__])
# Test that pre_step_zero_function call is recorded in received stats, but not the other functions.
stats = python_profile_analysis.fetch_post_hook_close_profile_stats(refresh_stats=False)
_validate_analysis(profiler_name, stats, [post_hook_close_function.__name__])
# Test that time_function call is recorded in received stats, but not the other functions.
time_function_step_stats = python_profile_analysis.python_profile_stats[-1]
step_start_time = (
time_function_step_stats.start_time_since_epoch_in_micros / CONVERT_TO_MICROSECS
)
stats = python_profile_analysis.fetch_profile_stats_by_time(
step_start_time, time.time(), refresh_stats=False
)
_validate_analysis(profiler_name, stats, [time_function.__name__])
# Following analysis functions are for cProfile only
if use_pyinstrument:
return
# Test that functions called in TRAIN are recorded in received stats, but not the other functions.
stats = python_profile_analysis.fetch_profile_stats_by_training_phase(refresh_stats=False)[
(PythonProfileModes.TRAIN, PythonProfileModes.TRAIN)
]
_validate_analysis(
profiler_name,
stats,
[
start_end_step_function.__name__,
end_start_step_function.__name__,
time_function.__name__,
],
)
# Test that functions called in training loop are recorded in received stats, but not the other functions.
stats = python_profile_analysis.fetch_profile_stats_by_job_phase(refresh_stats=False)[
"training_loop"
]
_validate_analysis(
profiler_name,
stats,
[
start_end_step_function.__name__,
end_start_step_function.__name__,
between_modes_function.__name__,
eval_function.__name__,
time_function.__name__,
],
)
| python |
#
# Copyright (c) 2009-2015 Tom Keffer <[email protected]>
#
# See the file LICENSE.txt for your full rights.
#
"""Console simulator for the weewx weather system"""
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
import math
import random
import time
import weewx.drivers
import weeutil.weeutil
DRIVER_NAME = 'Simulator'
DRIVER_VERSION = "3.3"
def loader(config_dict, engine):
start_ts, resume_ts = extract_starts(config_dict, DRIVER_NAME)
station = Simulator(start_time=start_ts, resume_time=resume_ts, **config_dict[DRIVER_NAME])
return station
def extract_starts(config_dict, driver_name):
"""Extract the start and resume times out of the configuration dictionary"""
# This uses a bit of a hack to have the simulator resume at a later
# time. It's not bad, but I'm not enthusiastic about having special
# knowledge about the database in a driver, albeit just the loader.
start_ts = resume_ts = None
if 'start' in config_dict[driver_name]:
# A start has been specified. Extract the time stamp.
start_tt = time.strptime(config_dict[driver_name]['start'], "%Y-%m-%dT%H:%M")
start_ts = time.mktime(start_tt)
# If the 'resume' keyword is present and True, then get the last
# archive record out of the database and resume with that.
if weeutil.weeutil.to_bool(config_dict[driver_name].get('resume', False)):
import weewx.manager
import weedb
try:
# Resume with the last time in the database. If there is no such
# time, then fall back to the time specified in the configuration
# dictionary.
with weewx.manager.open_manager_with_config(config_dict,
'wx_binding') as dbmanager:
resume_ts = dbmanager.lastGoodStamp()
except weedb.OperationalError:
pass
else:
# The resume keyword is not present. Start with the seed time:
resume_ts = start_ts
return start_ts, resume_ts
class Simulator(weewx.drivers.AbstractDevice):
"""Station simulator"""
def __init__(self, **stn_dict):
"""Initialize the simulator
NAMED ARGUMENTS:
loop_interval: The time (in seconds) between emitting LOOP packets.
[Optional. Default is 2.5]
start_time: The start (seed) time for the generator in unix epoch time
[Optional. If 'None', or not present, then present time will be used.]
resume_time: The start time for the loop.
[Optional. If 'None', or not present, then start_time will be used.]
mode: Controls the frequency of packets. One of either:
'simulator': Real-time simulator - sleep between LOOP packets
'generator': Emit packets as fast as possible (useful for testing)
[Required. Default is simulator.]
observations: Comma-separated list of observations that should be
generated. If nothing is specified, then all
observations will be generated.
[Optional. Default is not defined.]
"""
self.loop_interval = float(stn_dict.get('loop_interval', 2.5))
if 'start_time' in stn_dict and stn_dict['start_time'] is not None:
# A start time has been specified. We are not in real time mode.
self.real_time = False
# Extract the generator start time:
start_ts = float(stn_dict['start_time'])
# If a resume time keyword is present (and it's not None),
# then have the generator resume with that time.
if 'resume_time' in stn_dict and stn_dict['resume_time'] is not None:
self.the_time = float(stn_dict['resume_time'])
else:
self.the_time = start_ts
else:
# No start time specified. We are in realtime mode.
self.real_time = True
start_ts = self.the_time = time.time()
# default to simulator mode
self.mode = stn_dict.get('mode', 'simulator')
# The following doesn't make much meteorological sense, but it is
# easy to program!
self.observations = {
'outTemp' : Observation(magnitude=20.0, average= 50.0, period=24.0, phase_lag=14.0, start=start_ts),
'inTemp' : Observation(magnitude=5.0, average= 68.0, period=24.0, phase_lag=12.0, start=start_ts),
'barometer' : Observation(magnitude=1.0, average= 30.1, period=48.0, phase_lag= 0.0, start=start_ts),
'pressure' : Observation(magnitude=1.0, average= 30.1, period=48.0, phase_lag= 0.0, start=start_ts),
'windSpeed' : Observation(magnitude=5.0, average= 5.0, period=48.0, phase_lag=24.0, start=start_ts),
'windDir' : Observation(magnitude=180.0, average=180.0, period=48.0, phase_lag= 0.0, start=start_ts),
'windGust' : Observation(magnitude=6.0, average= 6.0, period=48.0, phase_lag=24.0, start=start_ts),
'windGustDir': Observation(magnitude=180.0, average=180.0, period=48.0, phase_lag= 0.0, start=start_ts),
'outHumidity': Observation(magnitude=30.0, average= 50.0, period=48.0, phase_lag= 0.0, start=start_ts),
'inHumidity' : Observation(magnitude=10.0, average= 20.0, period=24.0, phase_lag= 0.0, start=start_ts),
'radiation' : Solar(magnitude=1000, solar_start=6, solar_length=12),
'UV' : Solar(magnitude=14, solar_start=6, solar_length=12),
'rain' : Rain(rain_start=0, rain_length=3, total_rain=0.2, loop_interval=self.loop_interval),
'txBatteryStatus': BatteryStatus(),
'windBatteryStatus': BatteryStatus(),
'rainBatteryStatus': BatteryStatus(),
'outTempBatteryStatus': BatteryStatus(),
'inTempBatteryStatus': BatteryStatus(),
'consBatteryVoltage': BatteryVoltage(),
'heatingVoltage': BatteryVoltage(),
'supplyVoltage': BatteryVoltage(),
'referenceVoltage': BatteryVoltage(),
'rxCheckPercent': SignalStrength()}
self.trim_observations(stn_dict)
def trim_observations(self, stn_dict):
"""Calculate only the specified observations, or all if none specified"""
if 'observations' in stn_dict and stn_dict['observations'] is not None:
desired = [x.strip() for x in stn_dict['observations'].split(',')]
for obs in list(self.observations):
if obs not in desired:
del self.observations[obs]
def genLoopPackets(self):
while True:
# If we are in simulator mode, sleep first (as if we are gathering
# observations). If we are in generator mode, don't sleep at all.
if self.mode == 'simulator':
# Determine how long to sleep
if self.real_time:
# We are in real time mode. Try to keep synched up with the
# wall clock
sleep_time = self.the_time + self.loop_interval - time.time()
if sleep_time > 0:
time.sleep(sleep_time)
else:
# A start time was specified, so we are not in real time.
# Just sleep the appropriate interval
time.sleep(self.loop_interval)
# Update the simulator clock:
self.the_time += self.loop_interval
# Because a packet represents the measurements observed over the
# time interval, we want the measurement values at the middle
# of the interval.
avg_time = self.the_time - self.loop_interval/2.0
_packet = {'dateTime': int(self.the_time+0.5),
'usUnits' : weewx.US }
for obs_type in self.observations:
_packet[obs_type] = self.observations[obs_type].value_at(avg_time)
yield _packet
def getTime(self):
return self.the_time
@property
def hardware_name(self):
return "Simulator"
class Observation(object):
def __init__(self, magnitude=1.0, average=0.0, period=96.0, phase_lag=0.0, start=None):
"""Initialize an observation function.
magnitude: The value at max. The range will be twice this value
average: The average value, averaged over a full cycle.
period: The cycle period in hours.
phase_lag: The number of hours after the start time when the
observation hits its max
start: Time zero for the observation in unix epoch time."""
if not start:
raise ValueError("No start time specified")
self.magnitude = magnitude
self.average = average
self.period = period * 3600.0
self.phase_lag = phase_lag * 3600.0
self.start = start
def value_at(self, time_ts):
"""Return the observation value at the given time.
time_ts: The time in unix epoch time."""
phase = 2.0 * math.pi * (time_ts - self.start - self.phase_lag) / self.period
return self.magnitude * math.cos(phase) + self.average
class Rain(object):
bucket_tip = 0.01
def __init__(self, rain_start=0, rain_length=1, total_rain=0.1, loop_interval=None):
"""Initialize a rain simulator"""
npackets = 3600 * rain_length / loop_interval
n_rain_packets = total_rain / Rain.bucket_tip
self.period = int(npackets/n_rain_packets)
self.rain_start = 3600* rain_start
self.rain_end = self.rain_start + 3600 * rain_length
self.packet_number = 0
def value_at(self, time_ts):
time_tt = time.localtime(time_ts)
secs_since_midnight = time_tt.tm_hour * 3600 + time_tt.tm_min * 60.0 + time_tt.tm_sec
if self.rain_start < secs_since_midnight <= self.rain_end:
amt = Rain.bucket_tip if self.packet_number % self.period == 0 else 0.0
self.packet_number += 1
else:
self.packet_number = 0
amt = 0
return amt
class Solar(object):
def __init__(self, magnitude=10, solar_start=6, solar_length=12):
"""Initialize a solar simulator
Simulated ob will follow a single wave sine function starting at 0
and ending at 0. The solar day starts at time solar_start and
finishes after solar_length hours.
magnitude: the value at max, the range will be twice
this value
solar_start: decimal hour of day that obs start
(6.75=6:45am, 6:20=6:12am)
solar_length: length of day in decimal hours
(10.75=10hr 45min, 10:10=10hr 6min)
"""
self.magnitude = magnitude
self.solar_start = 3600 * solar_start
self.solar_end = self.solar_start + 3600 * solar_length
self.solar_length = 3600 * solar_length
def value_at(self, time_ts):
time_tt = time.localtime(time_ts)
secs_since_midnight = time_tt.tm_hour * 3600 + time_tt.tm_min * 60.0 + time_tt.tm_sec
if self.solar_start < secs_since_midnight <= self.solar_end:
amt = self.magnitude * (1 + math.cos(math.pi * (1 + 2.0 * ((secs_since_midnight - self.solar_start) / self.solar_length - 1))))/2
else:
amt = 0
return amt
class BatteryStatus(object):
def __init__(self, chance_of_failure=None, min_recovery_time=None):
"""Initialize a battery status.
chance_of_failure - likeliehood that the battery should fail [0,1]
min_recovery_time - minimum time until the battery recovers, seconds
"""
if chance_of_failure is None:
chance_of_failure = 0.0005 # about once every 30 minutes
if min_recovery_time is None:
min_recovery_time = random.randint(300, 1800) # 5 to 15 minutes
self.chance_of_failure = chance_of_failure
self.min_recovery_time = min_recovery_time
self.state = 0
self.fail_ts = 0
def value_at(self, time_ts):
if self.state == 1:
# recover if sufficient time has passed
if time_ts - self.fail_ts > self.min_recovery_time:
self.state = 0
else:
# see if we need a failure
if random.random() < self.chance_of_failure:
self.state = 1
self.fail_ts = time_ts
return self.state
class BatteryVoltage(object):
def __init__(self, nominal_value=None, max_variance=None):
"""Initialize a battery voltage."""
if nominal_value is None:
nominal_value = 12.0
if max_variance is None:
max_variance = 0.1 * nominal_value
self.nominal = nominal_value
self.variance = max_variance
def value_at(self, time_ts):
return self.nominal + self.variance * random.random() * random.randint(-1, 1)
class SignalStrength(object):
def __init__(self, minval=0.0, maxval=100.0):
"""Initialize a signal strength simulator."""
self.minval = minval
self.maxval = maxval
self.max_variance = 0.1 * (self.maxval - self.minval)
self.value = self.minval + random.random() * (self.maxval - self.minval)
def value_at(self, time_ts):
newval = self.value + self.max_variance * random.random() * random.randint(-1, 1)
newval = max(self.minval, newval)
newval = min(self.maxval, newval)
self.value = newval
return self.value
def confeditor_loader():
return SimulatorConfEditor()
class SimulatorConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[Simulator]
# This section is for the weewx weather station simulator
# The time (in seconds) between LOOP packets.
loop_interval = 2.5
# The simulator mode can be either 'simulator' or 'generator'.
# Real-time simulator. Sleep between each LOOP packet.
mode = simulator
# Generator. Emit LOOP packets as fast as possible (useful for testing).
#mode = generator
# The start time. Format is YYYY-mm-ddTHH:MM. If not specified, the default
# is to use the present time.
#start = 2011-01-01T00:00
# The driver to use:
driver = weewx.drivers.simulator
"""
if __name__ == "__main__":
station = Simulator(mode='simulator',loop_interval=2.0)
for packet in station.genLoopPackets():
print(weeutil.weeutil.timestamp_to_string(packet['dateTime']), packet)
| python |
from typing import Dict
import psycopg2
import requests
def insert_reading(reading: Dict):
sql = """
INSERT INTO youless_readings (
net_counter,
power,
consumption_high,
consumption_low,
production_high,
production_low,
gas
)
VALUES(
%s,
%s,
%s,
%s,
%s,
%s,
%s
);"""
conn = None
try:
# read database configuration
# connect to the PostgreSQL database
conn = psycopg2.connect(host="postgres", database="fokko", user="fokko", password="fokko")
# create a new cursor
cur = conn.cursor()
# execute the INSERT statement
cur.execute(
sql,
(
reading["net"],
reading["pwr"],
reading["p1"],
reading["p2"],
reading["n1"],
reading["n2"],
reading["gas"],
),
)
# commit the changes to the database
conn.commit()
# close communication with the database
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
# "tm": unix-time-format (1489333828 => Sun, 12 Mar 2017 15:50:28 GMT)
# "net": Netto counter, as displayed in the web-interface of the LS-120.
# It seems equal to: p1 + p2 - n1 - n2 Perhaps also includes some user set offset.
# "pwr": Actual power use in Watt (can be negative)
# "p1": P1 consumption counter (low tariff)
# "p2": P2 consumption counter (high tariff)
# "n1": N1 production counter (low tariff)
# "n2": N2 production counter (high tariff)
# "Gas": counter gas-meter (in m^3)
youless_address = "http://192.168.1.158/e?f=j"
output = requests.get(url=youless_address)
reading = output.json()[0]
insert_reading(reading)
| python |
import os
import torch
from torch.autograd import Function
import torch.nn as nn
from typing import *
from torch.utils.cpp_extension import load
ppp_ops = load(name="ppp_ops",
sources=[f"{os.path.dirname(os.path.abspath(__file__))}/pointnetpp_operations.cpp",
f"{os.path.dirname(os.path.abspath(__file__))}/pointnetpp_operations.cu"])
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor:
"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest minimum distance
:param ctx:
:param xyz: (B, N, 3) tensor where N > npoint
:param npoint: number of features in the sampled set
:return: (B, npoint) tensor containing the set
"""
assert(xyz.is_cuda)
return ppp_ops.furthest_point_sampling_cuda(xyz, npoint)
@staticmethod
def backward(xyz, a=None):
return None, None
class GatherOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N) tensor
:param idx: (B, npoint) tensor of the features to gather
:return: (B, C, npoint) tensor
"""
_, C, N = features.size()
ctx.for_backwards = (idx, C, N)
assert (features.is_cuda and idx.is_cuda)
return ppp_ops.gather_points_cuda(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
grad_features = ppp_ops.group_points_grad_cuda(grad_out.contiguous(), idx, N)
return grad_features, None
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
:param ctx:
:param unknown: torch.Tensor
:param known: (B, m, 3) tensor of unknown features
:return: (B, n, 3) l2 distance to the three nearest neighbors; (B, n, 3) index of 3 nearest neighbors
"""
assert(unknown.is_cuda and known.is_cuda)
dist2, idx = ppp_ops.three_nn_cuda(unknown, known)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
:param ctx:
:param features: (B, c, m) Features descriptors to be interpolated from
:param idx: (B, n, 3) three nearest neighbors of the target features in features
:param weight: (B, n, 3) weights
:return: (B, c, n) tensor of the interpolated features
"""
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
assert(features.is_cuda and idx.is_cuda and weight.is_cuda)
return ppp_ops.three_interpolate_cuda(features, idx, weight)
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, c, n) tensor with gradients of ouputs
:return: (B, c, m) tensor with gradients of features
"""
idx, weight, m = ctx.three_interpolate_for_backward
grad_features = ppp_ops.three_interpolate_grad_cuda(
grad_out.contiguous(), idx, weight, m
)
return grad_features, None, None
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N) tensor of features to group
:param idx: (B, npoint, nsample) tensor containing the indices of features to group with
:return: (B, C, npoint, nsample) tensor
"""
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
ctx.for_backwards = (idx, N)
assert(features.is_cuda and idx.is_cuda)
return ppp_ops.group_points_cuda(features, idx)
@staticmethod
def backward(ctx, grad_out: torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward
:return: (B, C, N) gradient of the features
"""
idx, N = ctx.for_backwards
grad_features = ppp_ops.group_points_grad_cuda(grad_out.contiguous(), idx, N)
return grad_features, None
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param radius: radius of the balls
:param nsample: maximum number of features in the balls
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centers of the ball query
:return: (B, npoint, nsample) tensor with the indices of the features that form the query balls
"""
assert(new_xyz.is_cuda and xyz.is_cuda)
return ppp_ops.ball_query_cuda(new_xyz, xyz, radius, nsample)
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
class QueryAndGroup(nn.Module):
def __init__(self, radius: float, nsample: int):
"""
Groups with a ball query of radius
:param radius: Radius of ball
:param nsample: Maximum number of features to gather in the ball
"""
super(QueryAndGroup, self).__init__()
self.radius, self.nsample = radius, nsample
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> torch.Tensor:
"""
:param xyz: xyz coordinates of the features (B, N, 3)
:param new_xyz: centroids (B, npoint, 3)
:param features: Descriptors of the features (B, N, C)
:return: (B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
grouped_xyz = grouping_operation(xyz.transpose(1, 2).contiguous(), idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features.transpose(1, 2).contiguous(), idx) # (B, C, npoint, nsample)
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
def __init__(self):
"""
Groups all features
"""
super(GroupAll, self).__init__()
def forward(self, xyz, new_xyz: torch.Tensor, features: torch.Tensor = None) -> torch.Tensor:
"""
:param xyz: xyz coordinates of the features (B, N, 3)
:param new_xyz: Ignored
:param features: Descriptors of the features (B, N, C)
:return: (B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
features = features.transpose(1, 2).contiguous() # (B, C, N)
grouped_features = features.unsqueeze(2)
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_xyz
return new_features
ball_query = BallQuery.apply
furthest_point_sample = FurthestPointSampling.apply
gather_operation = GatherOperation.apply
three_nn = ThreeNN.apply
three_interpolate = ThreeInterpolate.apply
grouping_operation = GroupingOperation.apply
| python |
import sys
import time
import pprint
from web3 import Web3
from solcx import compile_source
import os
contract_source_path = os.environ['HOME']+'/765_a3/MyContract.sol'
logs = False
grcpt = False
def compile_source_file(file_path):
with open(file_path, 'r') as f:
source = f.read()
return compile_source(source)
def getReceipt(tx_hash3):
'''Get and wait for receipts given a transaction hash'''
while True:
try: # keep trying until we get a receipt
time.sleep(0.1)
receipt3 = w3.eth.getTransactionReceipt(tx_hash3)
break
except:
continue
receipt3 = w3.eth.getTransactionReceipt(tx_hash3)
if receipt3 is not None and logs:
print("empty:{0}".format(receipt3['gasUsed'])) #print amount of gas used for execution
return
def registerUserTransaction(sort_contract, user_id, gr=False):
'''Wrapper for calling registerUser function in solidity. Returns the hash of the tentative transaction.'''
if logs:
print("Registering User:", user_id)
tx_hash = sort_contract.functions.registerUser(user_id, "YG").transact({'txType':"0x3", 'from':w3.eth.accounts[0], 'gas':2409638})
if gr:
getReceipt(tx_hash)
return tx_hash
import numpy as np
def createAccTransaction(sort_contract, user_id_1, user_id_2, gr=False):
'''Wrapper for calling createAcc function in solidity. Returns the hash of the tentative transaction.'''
if logs:
print("Creating Account between:", user_id_1, user_id_2)
amt = int(np.random.exponential(10) * 0.5)
tx_hash = sort_contract.functions.createAcc(user_id_1, user_id_2, amt).transact({'txType':"0x3", 'from':w3.eth.accounts[0], 'gas':2409638})
if gr:
getReceipt(tx_hash)
return tx_hash
def closeAccTransaction(sort_contract, user_id_1, user_id_2, gr=False):
'''Wrapper for calling closeAcc function in solidity. Returns the hash of the tentative transaction.'''
if logs:
print("Closing Account between:", user_id_1, user_id_2)
tx_hash = sort_contract.functions.closeAcc(user_id_1, user_id_2).transact({'txType':"0x3", 'from':w3.eth.accounts[0], 'gas':2409638})
if gr:
getReceipt(tx_hash)
return tx_hash
def sendAmountTransaction(sort_contract, user_id_1, user_id_2, amt, gr=False):
'''Wrapper for calling sendAmount function in solidity. Returns the hash of the tentative transaction.'''
if logs:
print("Attempt to send ", amt, " from ", user_id_1, " to ", user_id_2)
tx_hash = sort_contract.functions.sendAmount(user_id_1, user_id_2, amt).transact({'txType':"0x3", 'from':w3.eth.accounts[0], 'gas':2409638})
if gr:
getReceipt(tx_hash)
return tx_hash
def getSucCountCall(sort_contract):
'''Wrapper for checking succesful transaction count in solidity. Returns the number of succesful transactions.'''
tx_hash = sort_contract.functions.getSucCount().call()
print("Number of Successful Transactions:", tx_hash)
return tx_hash
#######################################################################################################################
print("Starting Transaction Submission")
w3 = Web3(Web3.HTTPProvider('http://127.0.0.1:1558')) #start web3 on given port
w3.geth.miner.start(1) #start miner
with open(os.environ['HOME']+'/765_a3/MyContractAddressList') as fp:
for line in fp:
a,b = line.rstrip().split(':', 1)
if a=="empty":
contract_source_path = os.environ['HOME']+'/765_a3/MyContract.sol'
compiled_sol = compile_source_file(contract_source_path) #compile solidity code
contract_id, contract_interface = compiled_sol.popitem()
sort_contract = w3.eth.contract(address=b, abi=contract_interface['abi']) #get contract
N = 100 #number of nodes
T = 1000 #number of transactions
interval = 100 #interval of logging and reporting
t = 0
# Register N users
wait_list = []
for i in range(N):
wait_list.append(registerUserTransaction(sort_contract, i, gr=grcpt))
if not grcpt:
for wl in wait_list:
getReceipt(wl)
#Construct power law degree distribution graph using networkx
import networkx
power_graph = networkx.barabasi_albert_graph(N, int(0.7*N))
#create accounts according to transactions
wait_list = []
# for i in range(N):
# for j in range(i, N):
# edge = (i, j)
for edge in power_graph.edges:
# print(edge)
wait_list.append(createAccTransaction(sort_contract, edge[0], edge[1], gr=grcpt))
if not grcpt:
for wl in wait_list:
getReceipt(wl)
#get initial succesful transaction count. should be 0.
getSucCountCall(sort_contract)
wait_list = []
while (t<T):
sender = np.random.randint(N)
recvr = np.random.randint(N)
if (sender==recvr): #if sender and reciever same continue
continue
t += 1
wait_list.append(sendAmountTransaction(sort_contract, sender, recvr, 1, gr=grcpt)) #send amount transaction between sender and reciever
if (t%interval==0):
if not grcpt:
for wl in wait_list:
getReceipt(wl)
wait_list = []
getSucCountCall(sort_contract)
print("Number of Total Transactions:", t)
w3.geth.miner.stop() #stop miner | python |
import xacc
xacc.Initialize()
# Get access to D-Wave QPU and
# allocate some qubits
dwave = xacc.getAccelerator('dwave')
qubits = dwave.createBuffer('q')
# Define the function we'd like to
# off-load to the QPU, here
# we're using a the QMI low-level language
@xacc.qpu(accelerator=dwave)
def f(buffer, h, j):
qmi(0,0,h)
qmi(1,1,h)
qmi(0,1,j)
# Execute on D-Wave
f(qubits, 1., 2.)
# Print the buffer, this displays
# solutions and energies
print(qubits)
xacc.Finalize() | python |
# -*- coding: utf-8 -*-
'''
Redis SDB module
================
.. versionadded:: 2019.2.0
This module allows access to Redis using an ``sdb://`` URI.
Like all SDB modules, the Redis module requires a configuration profile to
be configured in either the minion or master configuration file. This profile
requires very little. For example:
.. code-block:: yaml
sdb_redis:
driver: redis
host: 127.0.0.1
port: 6379
password: pass
db: 1
The ``driver`` refers to the Redis module, all other options are optional.
For option details see: https://redis-py.readthedocs.io/en/latest/.
'''
from __future__ import absolute_import, print_function, unicode_literals
try:
import redis
HAS_REDIS = True
except ImportError:
HAS_REDIS = False
__func_alias__ = {
'set_': 'set'
}
__virtualname__ = 'redis'
def __virtual__():
'''
Module virtual name.
'''
if not HAS_REDIS:
return (False, 'Please install python-redis to use this SDB module.')
return __virtualname__
def set_(key, value, profile=None):
'''
Set a value into the Redis SDB.
'''
if not profile:
return False
redis_kwargs = profile.copy()
redis_kwargs.pop('driver')
redis_conn = redis.StrictRedis(**redis_kwargs)
return redis_conn.set(key, value)
def get(key, profile=None):
'''
Get a value from the Redis SDB.
'''
if not profile:
return False
redis_kwargs = profile.copy()
redis_kwargs.pop('driver')
redis_conn = redis.StrictRedis(**redis_kwargs)
return redis_conn.get(key)
def delete(key, profile=None):
'''
Delete a key from the Redis SDB.
'''
if not profile:
return False
redis_kwargs = profile.copy()
redis_kwargs.pop('driver')
redis_conn = redis.StrictRedis(**redis_kwargs)
return redis_conn.delete(key)
| python |
import ast
import os
import logging
from contextlib import contextmanager
from pystatic.arg import Arg, Argument
from typing import List, Tuple
from pystatic.target import Target
from pystatic.symid import symid2list
from pystatic.typesys import TypeClassTemp, TypeFuncTemp, TypeIns, TypeTemp, TypeType
from pystatic.symtable import SymTable
logger = logging.getLogger(__name__)
_default_dir = os.path.curdir + os.path.sep + 'out'
_indent_unit = ' ' * 4
IMPORT = 1
FUN = 2
CLS = 3
VAR = 4
def stubgen(targets: List[Target], rt_dir=_default_dir):
if not mkstub_dir(rt_dir):
return
for target in targets:
stub_file = filepath(target, rt_dir)
result = stubgen_main(target)
with open(stub_file, 'w') as f:
f.write(result)
def mkstub_dir(dir: str):
if os.path.exists(dir):
if not os.path.isdir(dir):
r_path = os.path.realpath(dir)
logger.error(f'{r_path} already exists and is a file.')
return False
return True
else:
os.mkdir(dir)
return True
def filepath(target: Target, rt_dir: str):
symidlist = symid2list(target.symid)
cur_dir = rt_dir
for i, name in enumerate(symidlist):
next_dir = os.path.join(cur_dir, name)
if not os.path.exists(next_dir):
if i != len(symidlist) - 1:
os.mkdir(next_dir)
cur_dir = next_dir
return cur_dir + '.pyi'
def stubgen_main(target: Target) -> str:
creator = StubGen(target)
return creator.generate()
class Node:
def __init__(self, symid: str):
self.symid = symid
self.subsymid = {}
self.alias = None
def set_alias(self, alias: str):
self.alias = alias
class NameTree:
def __init__(self, module_symid: str):
self.root = Node('')
self.module_symid = module_symid
def ask(self, temp: TypeTemp) -> str:
module_symid = temp.module_symid
symid = temp.name
symidlist = symid2list(module_symid) + symid2list(symid)
cur_node = self.root
namelist = []
for subname in symidlist:
if subname in cur_node.subsymid:
cur_node = cur_node.subsymid[subname]
if cur_node.alias:
namelist = [cur_node.alias]
else:
namelist.append(subname)
else:
return '.'.join(symidlist)
return '.'.join(namelist)
def add_import(self, module_symid: str, symid: str, asname: str):
symidlist = symid2list(module_symid) + symid2list(symid)
cur_node = self.root
for subname in symidlist:
if not subname:
continue
if subname in cur_node.subsymid:
cur_node = cur_node.subsymid[subname]
else:
cur_node.subsymid[subname] = Node(subname)
if asname:
cur_node.alias = asname
class StubGen:
def __init__(self, target: Target):
self.target = target
self.name_tree = NameTree(target.symid)
self.in_class = False
self.from_typing = set()
self.cur_symid = ''
@property
def module_symid(self):
return self.target.symid
@staticmethod
def scoped_list_to_str(lst: List[Tuple[str, int]]):
if not lst:
return ''
results = [lst[0][0]]
prev_scope = lst[0][1]
for item, scope in lst[1:]:
if prev_scope == scope:
results.append(item)
else:
results.append('\n')
results.append(item)
prev_scope = scope
return ''.join(results)
def generate(self):
src_str = self.stubgen_symtable(self.target.symtable, 0)
sym_local = self.target.symtable.local
typing_list = filter(
lambda name: (name not in sym_local) and name.find('.') < 0,
self.from_typing)
impt_typing = ', '.join(typing_list)
if impt_typing:
return f'from typing import {impt_typing}\n' + src_str
else:
return src_str
@contextmanager
def enter_class(self, clsname: str):
old_symid = self.cur_symid
old_in_class = self.in_class
if not self.cur_symid:
self.cur_symid = f'{clsname}'
else:
self.cur_symid += f'.{clsname}'
yield
self.cur_symid = old_symid
self.in_class = old_in_class
def indent_prefix(self, level: int) -> str:
return _indent_unit * level
def stubgen_symtable(self, symtable: 'SymTable', level: int):
results: List[Tuple[str, int]] = []
impt_stmt = self.stubgen_import(symtable, level)
if impt_stmt:
results.append((impt_stmt, IMPORT))
for name, entry in symtable.local.items():
tpins = entry.get_type()
if not tpins:
logger.warn(f'{name} has incomplete type.')
continue
temp = tpins.temp
if isinstance(tpins, TypeType):
assert isinstance(temp, TypeClassTemp)
results.append((self.stub_cls_def(name, temp, level), CLS))
elif isinstance(temp, TypeFuncTemp):
results.append((self.stub_fun_def(name, temp, level), FUN))
else:
results.append((self.stub_var_def(name, temp, level), VAR))
return self.scoped_list_to_str(results)
def stubgen_import(self, symtable: 'SymTable', level: int) -> str:
results = []
for impt_node in symtable._import_nodes:
impt_dict = split_import_stmt(impt_node, symtable.glob_symid)
if isinstance(impt_node, ast.Import):
import_stmt = 'import '
import_subitem = []
for symid, infolist in impt_dict.items():
module_name = symid
for asname, origin_name in infolist:
assert not origin_name
if asname == module_name:
top_name = symid2list(asname)[0]
if top_name:
symtable.local.pop(top_name, None)
import_subitem.append(f'{module_name}')
self.name_tree.add_import(module_name, '', '')
else:
symtable.local.pop(asname, None)
import_subitem.append(f'{module_name} as {asname}')
self.name_tree.add_import(module_name, '', asname)
if len(import_subitem) > 5:
import_stmt += '(' + ', '.join(import_subitem) + ')'
else:
import_stmt += ', '.join(import_subitem)
results.append((import_stmt, level))
else:
for symid, infolist in impt_dict.items():
module_name = symid
from_impt: List[str] = []
for asname, origin_name in infolist:
if origin_name == asname:
symtable.local.pop(asname, None)
from_impt.append(f"{asname}")
self.name_tree.add_import(module_name, origin_name,
'')
else:
symtable.local.pop(asname, None)
from_impt.append(f"{origin_name} as {asname}")
self.name_tree.add_import(module_name, origin_name,
asname)
if from_impt:
impt_str = ', '.join(from_impt)
if len(from_impt) > 5:
from_stmt = f'from {module_name} import ({impt_str})'
else:
from_stmt = f'from {module_name} import {impt_str}'
results.append((from_stmt, level))
if not results:
return ''
else:
return '\n'.join(
[_indent_unit * ident + stmt
for stmt, ident in results]) + '\n'
def stub_var_def(self, varname: str, temp: TypeTemp, level: int):
module_symid = temp.module_symid
symid = temp.name
type_str = ''
if module_symid == 'builtins':
type_str = symid
elif module_symid == 'typing':
self.from_typing.add(symid)
type_str = symid
elif module_symid == self.module_symid:
if self.cur_symid and symid.find(
self.cur_symid) == 0 and len(symid) > len(self.cur_symid):
type_str = symid[len(self.cur_symid) + 1:]
else:
type_str = symid
else:
type_str = self.name_tree.ask(temp)
return _indent_unit * level + varname + ': ' + type_str + '\n'
def stub_cls_def(self, clsname: str, temp: TypeClassTemp, level: int):
header = self.stub_cls_def_header(clsname, temp, level)
inner_symtable = temp.get_inner_symtable()
var_strlist = []
with self.enter_class(clsname):
for name, tpins in temp.var_attr.items():
var_strlist.append(
self.stub_var_def(name, tpins.temp, level + 1))
body = self.stubgen_symtable(inner_symtable, level + 1)
if not body or body == '\n':
header += '...\n'
return header
if var_strlist:
body = ''.join(var_strlist) + '\n' + body
return header + '\n' + body
def stub_cls_def_header(self, clsname: str, temp: TypeClassTemp,
level: int) -> str:
return _indent_unit * level + 'class ' + clsname + ': '
def _stub_single_fun(self, name: str, argument: Argument, ret: TypeIns):
"""generate single function type annotations in pyi file"""
def get_arg_str(arg: Arg):
cur_str = arg.name
cur_str += ': ' + str(arg.ann)
if arg.valid:
cur_str += '=...'
return cur_str
arg_strlist = []
for arg in argument.args:
cur_str = get_arg_str(arg)
arg_strlist.append(cur_str)
if argument.vararg:
cur_str = get_arg_str(argument.vararg)
arg_strlist.append(cur_str)
for arg in argument.kwonlyargs:
cur_str = get_arg_str(arg)
arg_strlist.append(cur_str)
if argument.kwarg:
cur_str = get_arg_str(argument.kwarg)
arg_strlist.append(cur_str)
param = '(' + ', '.join(arg_strlist) + ')'
return 'def ' + name + param + ': ...\n'
def stub_fun_def(self,
funname: str,
temp: TypeFuncTemp,
level: int,
is_method=False) -> str:
is_overload = len(temp.overloads) > 1
if is_overload:
self.from_typing.add('overload') # import overload from typing
indent_prefix = self.indent_prefix(level)
fun_pyi = []
for argument, ret in temp.overloads:
fun_res = self._stub_single_fun(funname, argument, ret)
if is_overload:
cur_fun_pyi = indent_prefix + '@overload\n'
else:
cur_fun_pyi = ''
cur_fun_pyi += indent_prefix + fun_res
fun_pyi.append(cur_fun_pyi)
return ''.join(fun_pyi)
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2012, Rui Carmo
Description: Docstring utility functions
License: MIT (see LICENSE.md for details)
"""
import os, sys, logging
import inspect
from bottle import app
log = logging.getLogger()
def docs():
"""Gather all docstrings related to routes and return them grouped by module"""
routes = []
modules = {}
for route in app().routes:
doc = inspect.getdoc(route.callback) or inspect.getcomments(route.callback)
if not doc:
doc = ''
module = inspect.getmodule(route.callback).__name__
item = {
'method': route.method,
'route': route.rule,
'function': route.callback.__name__,
'module': module,
'doc': inspect.cleandoc(doc)
}
if not module in modules:
modules[module] = []
modules[module].append(item)
return modules | python |
import pytest
from pydantic import ValidationError
from porcupine.base import Serializer
class User(object):
def __init__(self, name=None, surname=None, age=None):
self.name = name
self.surname = surname
self.age = age
class UserSerializer(Serializer):
name: str
surname: str
age: int = None
@pytest.fixture
def user_full():
user = User('foo', 'bar', 23)
return user
@pytest.fixture
def user_required_only():
user = User('foo', 'bar')
return user
@pytest.fixture
def user_none():
user = User()
return user
class TestSimpleObject:
def test_successful_serialisation(self, user_full):
dictionary = UserSerializer(user_full).dict()
assert dictionary == {'name': 'foo', 'surname': 'bar', 'age': 23}
def test_non_required_attributes(self, user_required_only):
dictionary = UserSerializer(user_required_only).dict()
assert dictionary == {'name': 'foo', 'surname': 'bar', 'age': None}
def test_required_attributes(self, user_none):
expected_errors = [
{'loc': ('name',), 'msg': 'none is not an allowed value', 'type': 'type_error.none.not_allowed'},
{'loc': ('surname',), 'msg': 'none is not an allowed value', 'type': 'type_error.none.not_allowed'}
]
with pytest.raises(ValidationError) as exception:
UserSerializer(user_none).dict()
assert exception.value.errors() == expected_errors
| python |
from pkg_resources import parse_version
from configparser import ConfigParser
import setuptools
assert parse_version(setuptools.__version__)>=parse_version('36.2')
# note: all settings are in settings.ini; edit there, not here
config = ConfigParser(delimiters=['='])
config.read('settings.ini')
cfg = config['DEFAULT']
cfg_keys = 'version description keywords author author_email'.split()
expected = (cfg_keys
+ "lib_name user branch license status min_python audience language".split()
)
for o in expected:
assert o in cfg, "missing expected setting: {}".format(o)
setup_cfg = {o:cfg[o] for o in cfg_keys}
licenses = {
'apache2': (
'Apache Software License 2.0',
'OSI Approved :: Apache Software License'),
}
statuses = [
'1 - Planning',
'2 - Pre-Alpha',
'3 - Alpha',
'4 - Beta',
'5 - Production/Stable',
'6 - Mature',
'7 - Inactive' ]
py_versions = '2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8'.split()
def parse_requirements(name):
return cfg[name].strip("\n").split("\n")
requirements = parse_requirements("requirements")
o_gpu = parse_requirements("onnxgpu")
o_cpu = parse_requirements("onnxcpu")
interp = parse_requirements("interp")
all_req = parse_requirements("all")
extras = {}
extras["onnx-gpu"] = ['onnxruntime-gpu']
extras["onnx-cpu"] = ['onnxruntime-cpu']
extras["interp"] = ['plotly', 'plotnine', 'shap<0.36.0']
extras["all"] = ['fastai', 'onnxruntime-gpu', 'plotly', 'plotnine', 'shap<0.36.0']
lic = licenses[cfg['license']]
min_python = cfg['min_python']
setuptools.setup(
name = cfg['lib_name'],
license = lic[0],
classifiers = [
'Development Status :: ' + statuses[int(cfg['status'])],
'Intended Audience :: ' + cfg['audience'].title(),
'License :: ' + lic[1],
'Natural Language :: ' + cfg['language'].title(),
]
+ [
'Programming Language :: Python :: '+o
for o in py_versions[py_versions.index(min_python):]
],
url = cfg['git_url'],
packages = setuptools.find_packages(),
include_package_data = True,
install_requires = requirements,
extras_require=extras,
dependency_links = cfg.get('dep_links','').split(),
python_requires = '>=' + cfg['min_python'],
long_description = open('README.md').read(),
long_description_content_type = 'text/markdown',
zip_safe = False,
entry_points = { 'console_scripts': cfg.get('console_scripts','').split() },
**setup_cfg)
| python |
import json
import requests
__version__ = '1.0.2'
class TelenorWeb2SMSException(Exception):
"""A generic exception for all others to extend."""
def __str__(self):
# Use the class docstring if the exception message hasn't been provided
if len(self.args) == 0:
return self.__doc__
return super(TelenorWeb2SMSException, self).__str__()
class AuthenticationFailure(TelenorWeb2SMSException):
"""The given username and password might not be correct."""
class SMSNotSent(TelenorWeb2SMSException):
"""SMS has not been sent."""
class UnsupportedPhoneNumberFormat(TelenorWeb2SMSException):
"""The given phone number isn't in any of the supported formats."""
class TelenorWeb2SMS(object):
auth_url = 'https://www.telenor.rs/portal/index.php'
send_sms_url = 'https://www.telenor.rs/portal/usluge/sendsms.php'
def __init__(self, username, password, auth_url=None):
self.session = requests.session(headers={
'User-Agent': "telenor_web2sms/%s" % __version__,
})
self.auth(username, password, auth_url)
def auth(self, username, password, auth_url=None):
auth_url = auth_url or self.auth_url
r = self.session.post(
url=auth_url,
data={
'brtel': username,
'lozinka': password,
}
)
# Check if we made a bad request
r.raise_for_status()
if r.url == self.auth_url:
raise AuthenticationFailure()
def parse_phone_number(self, phone_number):
if phone_number.startswith('0'):
normalized = phone_number[1:]
elif phone_number.startswith('+381'):
normalized = phone_number[4:]
else:
raise UnsupportedPhoneNumberFormat()
area_code = normalized[:2]
number = normalized[2:]
return area_code, number
def send_sms(self, phone_number, message, send_sms_url=None):
send_sms_url = send_sms_url or self.send_sms_url
area_code, number = self.parse_phone_number(phone_number)
r = self.session.post(
url=send_sms_url,
data={
'pozivni': area_code,
'BBroj': number,
'smsporuka': message,
}
)
# Check if we made a bad request
r.raise_for_status()
j = json.loads(r.content)
if j['status'] != 'OK':
raise SMSNotSent("SMS has not been sent, because '%s'." % j['message'])
def __call__(self, phone_number, message, send_sms_url=None):
self.send_sms(message, phone_number, send_sms_url)
def main():
import argparse
import os
import sys
def env(e):
return os.environ.get(e, '')
parser = argparse.ArgumentParser(
description='Send a SMS through the Telenor WEB2SMS web app'
)
parser.add_argument(
'-u',
'--username',
default = env('TELENOR_WEB2SMS_USERNAME'),
help='Your Telenor WEB2SMS username. Defaults to env[TELENOR_WEB2SMS_USERNAME]'
)
parser.add_argument(
'-p',
'--phone-number',
help='Recipients phone number'
)
# As Telenor WEB2SMS cuts of newlines, there's no point in allowing
# multiline input.
parser.add_argument(
'-m',
'--message',
help='Message to send'
)
parser.add_argument(
'--version',
action='version',
version="%(prog)s " + __version__
)
args = parser.parse_args()
try:
# Authenticate to Telenor WEB2SMS
username = args.username or raw_input('What is your Telenor WEB2SMS username? ')
password = env('TELENOR_WEB2SMS_PASSWORD') or raw_input('What is your Telenor WEB2SMS password? ')
web2sms = TelenorWeb2SMS(username, password)
# Send SMS
phone_number = args.phone_number or raw_input('Who are you sending this SMS to? ')
message = args.message or raw_input('Enter your message: ')
web2sms.send_sms(phone_number, message)
print 'SMS sent successfully.'
except Exception, e:
print >> sys.stderr, "ERROR:", e
sys.exit(1)
if __name__ == '__main__':
main()
| python |
from functools import partial
from typing import Callable, Tuple
import numpy as np
from hmc.core import for_loop, while_loop
from hmc.integrators.terminal import cond
def step(val: Tuple, zo: np.ndarray, step_size: float, vector_field: Callable) -> Tuple:
"""Single step of the implicit midpoint integrator. Computes the midpoint,
evaluates the gradient at the midpoint, takes a step from the initial
position in the direction of the gradient at the midpoint, and measures the
difference between the resulting point and the candidate stationary point.
"""
zncand, _, num_iters = val
zm = (zncand + zo) / 2.
dz = np.hstack(vector_field(*np.split(zm, 2)))
zn = zo + step_size * dz
delta = zn - zncand
return zn, delta, num_iters + 1
def _single_step_implicit_midpoint(
vector_field: Callable,
zo: Tuple[np.ndarray],
step_size: float,
thresh: float,
max_iters: int) -> Tuple:
"""Implements the implicit midpoint integrator. The implicit midpoint
integrator is symmetric, symplectic, and second-order accurate (third-order
local error).
Args:
vector_field: The Hamiltonian vector field.
zo: Tuple containing the position and momentum variables in the original
phase space.
step_size: Integration step_size.
thresh: Convergence tolerance for fixed point iterations.
max_iters: Maximum number of fixed point iterations.
Returns:
qn: The terminal position variable.
pn: The terminal momentum variable.
num_iters: The number of fixed point iterations to find the midpoint.
success: Boolean flag indicating successful integration.
"""
# Initial candidate.
qo, po = zo
zo = np.hstack((qo, po))
# Fixed point iteration.
delta = np.ones_like(zo) * np.inf
dz = np.hstack(vector_field(*np.split(zo, 2)))
zopred = zo + 0.5 * step_size * dz
val = (zopred, delta, 0)
zn, delta, num_iters = while_loop(
partial(cond, thresh=thresh, max_iters=max_iters),
partial(step, zo=zo, step_size=step_size, vector_field=vector_field),
val)
# Determine whether or not the integration was successful.
success = np.all(delta < thresh)
qn, pn = np.split(zn, 2)
return (qn, pn), num_iters, success
def implicit_midpoint(
vector_field: Callable,
zo: Tuple[np.ndarray],
step_size: float,
num_steps: int,
thresh: float=1e-6,
max_iters: int=1000
) -> Tuple:
def step(it: int, val: Tuple):
zo, so = val
zn, _, sn = _single_step_implicit_midpoint(vector_field, zo, step_size, thresh, max_iters)
success = np.logical_and(so, sn)
return zn, success
(qn, pn), success = for_loop(0, num_steps, step, (zo, True))
return (qn, pn), success
| python |
import pandas as pd
from datetime import datetime
import shlex
import subprocess
import requests
from reportlab.pdfgen import canvas
def generateReport(event_ts, keys):
print('printing report')
directory = "./data/"
csv_name = "result.csv"
csvpath = directory + csv_name
csv = pd.read_csv(csvpath)
# Querry Classification Summary Report
Querry_Classification_Summary_Report = keys['report_header']
# COunting Total No. of Querries
total_querry_count = csv.count()
total_querry_count = str(total_querry_count[0])
all_product_count = csv["Category"].value_counts().rename_axis('products').reset_index(name='counts')
total_products = all_product_count.count()
total_products = total_products[0]
indent = 100
indent_next = 250
pdf_name = "result.pdf"
pdfpath = directory + pdf_name
c = canvas.Canvas(pdfpath)
c.drawString(indent, 800, Querry_Classification_Summary_Report)
c.drawString(indent, 750, "Total No. Of querries = ")
c.drawString(indent_next, 750, total_querry_count)
c.drawString(indent, 725, "Product")
c.drawString(indent_next, 725, "Count")
height = 700
for i in range(total_products):
c.drawString(indent, height, str(all_product_count["products"][i]))
c.drawString(indent_next, height, str(all_product_count["counts"][i]))
height = height - 25
c.save()
print('pdf created')
cha = keys['channel_report']
chai = keys['slack_bot_token']
chaii = 'Please find the report attached'
try:
command_line = 'curl -F file=@"./data/result.pdf" -F "initial_comment=%s" -F channels=%s -H "Authorization: Bearer %s" https://slack.com/api/files.upload' % (chaii, cha, chai)
args = shlex.split(command_line)
subprocess.Popen(args)
print(args)
except (AssertionError, AttributeError, EOFError, FloatingPointError, GeneratorExit, ImportError, IndexError, KeyError, KeyboardInterrupt, MemoryError, NameError, NotImplementedError, OSError, OverflowError, ReferenceError, RuntimeError, StopIteration, SyntaxError, IndentationError, TabError, SystemError, SystemExit, TypeError, UnboundLocalError, UnicodeError, UnicodeEncodeError, UnicodeDecodeError, UnicodeTranslateError, ValueError, ZeroDivisionError):
headers = {
'Authorization': keys['slack_bot_token']
}
print(headers)
files = {
'file': ('C:\\Users\\z003ww7c.AD001\\PycharmProjects\\SlackIntegration\\data\\result.csv',
open('C:\\Users\\z003ww7c.AD001\\PycharmProjects\\SlackIntegration\\data\\result.csv', 'rb')),
'initial_comment': 'Please find the report attached',
'channels': keys['channel_report'],
}
url = 'https://slack.com/api/files.upload'
requests.post(url, headers=headers, files=files)
| python |
"""regex utils """
import re
def remove_digits(s: str) -> str:
""" removes digits in a string """
return re.sub("\d+", "", s)
| python |
'''
Transcribing DNA into RNA
http://rosalind.info/problems/rna/
Problem
An RNA string is a string formed from the alphabet containing 'A', 'C',
'G', and 'U'.
Given a DNA string t corresponding to a coding strand, its transcribed
RNA string u is formed by replacing all occurrences of 'T' in t with 'U'
in u.
Given: A DNA string t having length at most 1000 nt.
Return: The transcribed RNA string of t.
Sample Dataset
GATGGAACTTGACTACGTAAATT
Sample Output
GAUGGAACUUGACUACGUAAAUU
'''
from lib.sequences import DNA
def run_rna(sequence):
''' Converts a DNA string into RNA '''
return DNA(sequence).to_rna().sequence
| python |
from itertools import product
with open('output.txt') as f:
s = f.read().strip()
for i, j in product(range(10), repeat=2):
try:
bits = '1'*i + s + '1'*j
x = bytes.fromhex(f'{int(bits, 2):x}')
if b'CCTF{' in x:
print(x)
break
except:
pass
| python |
class Solution:
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
row = []
column = []
for i in range(len(matrix)):
for j in range(len(matrix[0])):
# print(matrix[i][j])
if matrix[i][j] == 0:
column.append(i)
row.append(j)
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if i in column:
matrix[i][j] = 0
if j in row:
matrix[i][j] = 0
| python |
import pytest
from numpy.testing import assert_array_almost_equal
from Auto import *
class Test_AutoSample:
@classmethod
def setup_method(cls):
np.random.seed(123)
cls.target = lambda x: np.where(x < 0, 0, np.exp(-x))
cls.shape = (1,)
cls.njobs = 1
cls.algo = AutoSample(target=cls.target, shape=cls.shape, njobs=cls.njobs)
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_sample(self):
sample = self.algo.sample(size=1, chains=1)
assert sample.shape == (1, 1)
| python |
#!python
# This generates a java source file by taking each method that has a
# parameters (String s, int off, int end) and generating a copy that
# takes (char[] s, int off, int end).
# Fix emacs syntax highlighting "
src = r"""
// Copyright (C) 2011 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.autoesc;
import java.io.IOException;
import java.io.Writer;
import javax.annotation.Nullable;
/** XML contains utilities for dealing with XML contexts. */
class XML {
static final ReplacementTable REPLACEMENT_TABLE = new ReplacementTable()
.add('`', "`")
.add('<', "<")
.add('>', ">")
.add('+', "+")
.add('\'', "'")
.add('&', "&")
.add('"', """)
// XML cannot contain NULs even if encoded, so treat NUL as an error case
// and replace it with U+FFFD, the replacement character.
.add((char) 0, "\ufffd");
static final ReplacementTable NORM_REPLACEMENT_TABLE
= new ReplacementTable(REPLACEMENT_TABLE)
.add('&', null);
/** escapeOnto escapes for inclusion in XML text. */
static void escapeOnto(@Nullable Object o, Writer out) throws IOException {
String safe = ContentType.Markup.derefSafeContent(o);
if (safe != null) {
out.write(safe);
return;
}
REPLACEMENT_TABLE.escapeOnto(o, out);
}
/** escapeOnto escapes for inclusion in XML text. */
static void escapeOnto(String s, int off, int end, Writer out)
throws IOException {
REPLACEMENT_TABLE.escapeOnto(s, off, end, out);
}
/**
* normalizeOnto escapes for inclusion in XML text but does not break
* existing entities.
*/
static void normalizeOnto(@Nullable Object o, Writer out) throws IOException {
String safe = ContentType.Markup.derefSafeContent(o);
if (safe != null) {
out.write(safe);
return;
}
NORM_REPLACEMENT_TABLE.escapeOnto(o, out);
}
/**
* normalizeOnto escapes for inclusion in XML text but does not break
* existing entities.
*/
static void normalizeOnto(String s, int off, int end, Writer out)
throws IOException {
NORM_REPLACEMENT_TABLE.escapeOnto(s, off, end, out);
}
/**
* escapeCDATAOnto emits the text unchanged assuming it will go inside a
* {@code <![CDATA[...]]>} block unless the string contains {@code "]]>"} or
* starts or ends with a prefix or suffix thereof in which case it splits the
* CDATA section around that chunk and resumes on the other side:
* {@code "foo]]>bar"} → {@code "foo]]]]><![CDATA[>bar"}.
* Any buggy regex based XML parsers that allow CDATA sections to contain
* {@code "]]>"} by using surrounding tags as boundaries (e.g. looking for
* {@code /<tag><!\[CDATA\[(.*?)\]\]><\/tag>/} can simply remove all
* all occurrences of {@code "]]><![CDATA["}.
*/
static void escapeCDATAOnto(String s, int offset, int end, Writer out)
throws IOException {
if (offset >= end) { return; }
int off = offset;
// Elide all NULs which are not strictly allowed in XML.
for (int i = off; i < end; ++i) {
if (s.charAt(i) == 0) {
StringBuilder sb = new StringBuilder(end - off);
for (i = off; i < end; ++i) {
char ch = s.charAt(i);
if (ch != 0) { sb.append(ch); }
}
escapeCDATAOnto(sb.toString(), 0, sb.length(), out);
return;
}
}
// Make sure the start of the string can't combine with any characters
// already on out to break out of the CDATA section.
{
char ch0 = s.charAt(off);
if (ch0 == '>'
|| (ch0 == ']' && off + 1 < end && s.charAt(off + 1) == '>')) {
out.write("]]><![CDATA[");
}
}
for (int i = off; i < end - 2; ++i) {
if (s.charAt(i)== ']' && s.charAt(i + 1) == ']'
&& s.charAt(i + 2) == '>') {
out.write(s, off, i - off);
out.write("]]]]><![CDATA[>");
i += 2;
off = i + 1;
}
}
out.write(s, off, end - off);
// Prevent the next character written to out from combining with trailing
// characters from s to form "]]>".
if (s.charAt(end - 1) == ']') {
out.write("]]><![CDATA[");
}
}
/**
* escapeCDATAOnto escapes for inclusion in an XML {@code <![CDATA[...]]>}
* section.
*/
static void escapeCDATAOnto(@Nullable Object o, Writer out)
throws IOException {
if (o == null) { return; }
if (o instanceof char[]) {
char[] chars = (char[]) o;
escapeCDATAOnto(chars, 0, chars.length, out);
} else {
String s = o.toString();
escapeCDATAOnto(s, 0, s.length(), out);
}
}
}
""" # Fix emacs syntax highlighting "
import dupe_methods
print dupe_methods.dupe(src)
| python |
def print_me(y):
return 10 + y # pragma: no cover
def return_val(val):
val += 1
return val
def return_val2(val):
val += 1
return val
| python |
"""
endpoint schemas for knoweng
"""
| python |
"""Integration tests for dice_roller.py"""
import unittest
import dice_roller
class DiceRollerIntegrationTests(unittest.TestCase):
"""
Integration tests for DiceRoller that check that history() and clear() are working
"""
def test_no_history(self):
"""
test that .history() returns {} when no rolls have been made
"""
dice_roller_instance = dice_roller.DiceRoller()
self.assertEqual(dice_roller_instance.history(), {})
def test_history(self):
"""
test .history() returns the correct output after running .roll()
"""
dice_roller_instance = dice_roller.DiceRoller()
# run 4 rolls, save the results so we can get the roll result for the assert below
result_0 = dice_roller_instance.roll((1, 20))
result_1 = dice_roller_instance.roll((1, 20), (2, 10), (1, 100))
result_2 = dice_roller_instance.roll((1, 20), (2, 10), (1, 100))
result_3 = dice_roller_instance.roll((10, 1))
self.assertEqual(
dice_roller_instance.history(),
{
'roll_0': {'dice': {'dice_0': {'number_of_dice': 1, 'number_of_sides': 20}},
'result': result_0[0], 'min': 1, 'max': 20, 'median': 10.5},
'roll_1': {'dice': {'dice_0': {'number_of_dice': 1, 'number_of_sides': 20},
'dice_1': {'number_of_dice': 2, 'number_of_sides': 10},
'dice_2': {'number_of_dice': 1, 'number_of_sides': 100}},
'result': result_1[0], 'min': 4, 'max': 140, 'median': 72.0},
'roll_2': {'dice': {'dice_0': {'number_of_dice': 1, 'number_of_sides': 20},
'dice_1': {'number_of_dice': 2, 'number_of_sides': 10},
'dice_2': {'number_of_dice': 1, 'number_of_sides': 100}},
'result': result_2[0], 'min': 4, 'max': 140, 'median': 72.0},
'roll_3': {'dice': {'dice_0': {'number_of_dice': 10, 'number_of_sides': 1}},
'result': result_3[0], 'min': 10, 'max': 10, 'median': 10.0}
}
)
def test_history_with_invalid_inputs(self):
"""
test that .history() is not messed up by invalid rolls
"""
dice_roller_instance = dice_roller.DiceRoller()
result_0 = dice_roller_instance.roll((1, 15))
result_1 = dice_roller_instance.roll((1, 30), (2, 10), (1, 100))
try:
dice_roller_instance.roll((0, 5))
except ValueError:
pass
try:
dice_roller_instance.roll((10, 0))
except ValueError:
pass
result_2 = dice_roller_instance.roll((1, 20), (2, 10))
result_3 = dice_roller_instance.roll((5, 4))
result_4 = dice_roller_instance.roll()
self.assertEqual(
dice_roller_instance.history(),
{
'roll_0': {'dice': {'dice_0': {'number_of_dice': 1, 'number_of_sides': 15}},
'result': result_0[0], 'min': 1, 'max': 15, 'median': 8.0},
'roll_1': {'dice': {'dice_0': {'number_of_dice': 1, 'number_of_sides': 30},
'dice_1': {'number_of_dice': 2, 'number_of_sides': 10},
'dice_2': {'number_of_dice': 1, 'number_of_sides': 100}},
'result': result_1[0], 'min': 4, 'max': 150, 'median': 77.0},
'roll_2': {'dice': {'dice_0': {'number_of_dice': 1, 'number_of_sides': 20},
'dice_1': {'number_of_dice': 2, 'number_of_sides': 10}},
'result': result_2[0], 'min': 3, 'max': 40, 'median': 21.5},
'roll_3': {'dice': {'dice_0': {'number_of_dice': 5, 'number_of_sides': 4}},
'result': result_3[0], 'min': 5, 'max': 20, 'median': 12.5},
'roll_4': {'dice': {'dice_0': {'number_of_dice': 1, 'number_of_sides': 20}},
'result': result_4[0], 'min': 1, 'max': 20, 'median': 10.5},
}
)
def test_clear_history(self):
"""
test that .clear() empties the instance's record and that .history() returns {}
"""
dice_roller_instance = dice_roller.DiceRoller()
dice_roller_instance.roll((1, 200))
dice_roller_instance.roll((2, 20), (2, 13), (1, 100))
dice_roller_instance.roll((1, 20), (2, 6), (1, 10))
dice_roller_instance.roll((10, 2))
dice_roller_instance.clear()
self.assertEqual(dice_roller_instance.history(), {})
| python |
from presentation.models import Liked, Author
from django.shortcuts import get_object_or_404
from presentation.Serializers.liked_serializer import LikedSerializer
from presentation.Serializers.author_serializer import AuthorSerializer
from rest_framework import viewsets, status
from rest_framework.response import Response
from urllib.parse import urlparse
from . import urlutil
def getAuthorIDFromRequestURL(request, id):
host = urlutil.getSafeURL(request.build_absolute_uri())
author_id = f"{host}/author/{id}"
return author_id
class LikedViewSet(viewsets.ModelViewSet):
serializer_class = LikedSerializer
queryset = Liked.objects.all()
def list(self, request, *args, **kwargs):
author_id = getAuthorIDFromRequestURL(request, self.kwargs['author_id'])
author_ = get_object_or_404(Author, id=author_id)
queryset = Liked.objects.filter(author=author_id)
if queryset.exists():
items = Liked.objects.filter(author=author_id)
for item in items:
item.id = None
items = list(items.values())
return JsonResponse(items,safe=False)
else:
Liked.objects.create(author=author_id)
return Response({
'type': 'liked',
'author': author_id,
'items': []
})
def retrieve(self, request, *args, **kwargs):
author_id = getAuthorIDFromRequestURL(request, self.kwargs['author_id'])
queryset = Liked.objects.get(author=author_id)
serializer = LikedViewSet(queryset)
return Response(serializer.data) | python |
import argparse
import sys
import os.path as osp
import os
sys.path.insert(1, osp.abspath(osp.join(os.getcwd(), *('..',)*2)))
from dataset_preprocess import CoraDataset, PlanetoidDataset
from attack.models import *
import torch
import pandas as pd
from tqdm.notebook import tqdm
from attack.GAFNC import GNNAttack
from torch_geometric.utils.loop import add_self_loops, remove_self_loops
import utils
import numpy as np
import pickle
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def index_to_mask(index, size):
mask = torch.zeros(size, dtype=torch.bool, device=index.device)
mask[index] = 1
return mask
def split_dataset(dataset, new_nodes, train_percent=0.7):
indices = []
_size = dataset.data.num_nodes - new_nodes
y = dataset.data.y[:_size]
for i in range(dataset.num_classes):
index = (y == i).nonzero().view(-1)
index = index[torch.randperm(index.size(0))]
indices.append(index)
train_index = torch.cat([i[:int(len(i) * train_percent)] for i in indices], dim=0)
rest_index = torch.cat([i[int(len(i) * train_percent):] for i in indices], dim=0)
rest_index = rest_index[torch.randperm(rest_index.size(0))]
dataset.data.train_mask = index_to_mask(train_index, size=dataset.data.num_nodes)
dataset.data.val_mask = index_to_mask(rest_index[:len(rest_index) // 2], size=dataset.data.num_nodes)
dataset.data.test_mask = index_to_mask(rest_index[len(rest_index) // 2:], size=dataset.data.num_nodes)
dataset.train_index = train_index[:]
dataset.val_index = rest_index[:len(rest_index) // 2]
dataset.test_index = rest_index[len(rest_index) // 2:]
dataset.data, dataset.slices = dataset.collate([dataset.data])
return dataset
def build_args():
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', type=str, default='cora', help='name of dataset_preprocess')
# dataset_name = ['cora', 'citeseer', 'pubmed']
parser.add_argument('--attack_graph', type=str2bool, default=True, help='global attack')
parser.add_argument('--node_idx', type=int, default=None, help='no target idx')
parser.add_argument('--structure_attack', type=str2bool, default=True, help='with structure attack')
parser.add_argument('--feature_attack', type=str2bool, default=False, help='with feature attack')
parser.add_argument('--added_node_num', type=int, default=20, help='num of new nodes')
parser.add_argument('--train_percent', type=float, default=0.7, help='train percent')
parser.add_argument('--fix_sparsity', type=str2bool, default=True, help='control the attack sparsity')
parser.add_argument('--sparsity', type=float, default=0.5, help='sparsity')
parser.add_argument('--feat_sparsity', type=float, default=0.5, help='feat_sparsity')
parser.add_argument('--random_structure', type=str2bool, default=False, help='random mask')
parser.add_argument('--random_feature', type=str2bool, default=False, help='random mask of feature')
parser.add_argument('--edge_size', type=float, default=1e-5, help='edge_size')
parser.add_argument('--edge_ent', type=float, default=1.0, help='edge_ent')
parser.add_argument('--node_feat_size', type=float, default=1e-5, help='edge_size')
parser.add_argument('--node_feat_ent', type=float, default=1.0, help='edge_ent')
parser.add_argument('--train_epochs', type=int, default=300, help='epochs for training a GNN model')
parser.add_argument('--attack_epochs', type=int, default=600, help='epochs for attacking a GNN model')
parser.add_argument('--retrain_epochs', type=int, default=10,
help='epochs for retraining a GNN model with new graph')
parser.add_argument('--seed', type=int, default=42, help='seed')
parser.add_argument('--desired_class', type=int, default=None, help='attack specific node to desired class')
parser.add_argument('--model_name', type=str, default="baseline", help='model variants name')
parser.add_argument('--indirect_level', type=int, default=0, help='target indirect attack level')
args = parser.parse_args()
return args
def fix_random_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # multi gpu
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
np.random.seed(seed)
def eval_all(model, data):
train_loss, train_acc = utils.evaluate(model, data, data.train_mask)
val_loss, val_acc = utils.evaluate(model, data, data.val_mask)
test_loss, test_acc = utils.evaluate(model, data, data.test_mask)
return [train_loss, test_loss, val_loss, train_acc, test_acc, val_acc]
if __name__ == '__main__':
args = build_args()
print("args", args)
fix_random_seed(seed=args.seed)
ADD_ZERO = 0
# step 1: load baseline dataset_preprocess
data_name = args.dataset_name
if data_name in ["cora", 'photo']:
baseline = CoraDataset('./datasets', data_name, added_new_nodes=ADD_ZERO)
else:
# for dataset_preprocess pubmed, and citeseer
baseline = PlanetoidDataset('./datasets', data_name, added_new_nodes=ADD_ZERO)
split_dataset_name = "baseline_"+data_name+"_split"
split_path = osp.join('./datasets', split_dataset_name, 'train_percent', str(args.train_percent), 'added_node', str(ADD_ZERO))
if not osp.isdir(split_path):
dataset = split_dataset(baseline, ADD_ZERO, train_percent=args.train_percent)
os.makedirs(split_path)
torch.save(baseline, osp.join(split_path, 'split_data.pt'))
else:
baseline = torch.load(osp.join(split_path, 'split_data.pt'))
dim_node = baseline.num_node_features
dim_edge = baseline.num_edge_features
num_classes = baseline.num_classes
baseline_model_ckpt_path = osp.join('checkpoints', data_name, str(args.train_percent), 'GCN_2l', 'seed', '0', 'GCN_2l_best.ckpt')
# step 2: attack
# add new nodes to origin dataset_preprocess
added_node_num = args.added_node_num
added_data_name = data_name + "-added"
if data_name in ["cora", 'photo']:
added_dataset = CoraDataset('./datasets', added_data_name, added_new_nodes=added_node_num)
else:
added_dataset = PlanetoidDataset('./datasets', added_data_name, added_new_nodes=added_node_num)
if args.feature_attack:
added_dataset.data.x[-added_node_num:] = 1
print("feature attack ", added_dataset.data.x[-added_node_num:])
added_indices = torch.as_tensor(list(range(baseline.data.num_nodes, baseline.data.num_nodes+added_node_num)))
add_train_index = torch.cat((baseline.train_index, added_indices), dim=0)
added_dataset.data.train_mask = index_to_mask(add_train_index, size=added_dataset.data.num_nodes)
added_dataset.data.val_mask = index_to_mask(baseline.val_index, size=added_dataset.data.num_nodes)
added_dataset.data.test_mask = index_to_mask(baseline.test_index, size=added_dataset.data.num_nodes)
added_dataset.data, added_dataset.slices = added_dataset.collate([added_dataset.data])
# step 2.1: load model
print(" step 2.1: loading base model for attack")
model = GCN_2l(model_level='node', dim_node=dim_node, dim_hidden=16, num_classes=num_classes)
model.to(device)
model.load_state_dict(torch.load(baseline_model_ckpt_path, map_location=device)['state_dict'])
# step 2.2 attack
attack_graph = args.attack_graph
if attack_graph:
print(" args.structure_attack", args.structure_attack)
attacker = GNNAttack(model, new_node_num=added_node_num, epochs=args.attack_epochs, lr=0.005, attack_graph=attack_graph,
mask_features=args.feature_attack, mask_structure=args.structure_attack, indirect_level=args.indirect_level, random_structure=args.random_structure, random_feature=args.random_feature, args=args)
else:
# print(" random choise one id from test part of the datasete")
# print(" test index is", baseline.test_index[0])
# args.node_idx =
print(" node idx is", args.node_idx)
# args.node_idx = baseline.test_index[0].item()
origin_label = baseline.data.y[args.node_idx]
# args.desired_class = 2
print(" target id is ", args.node_idx, " origin label is", origin_label, "desired label is ", args.desired_class)
if args.node_idx == None and args.desired_class == None:
print(" target attack, please input your target node id, and desired class id")
exit(-1)
attacker = GNNAttack(model, new_node_num=added_node_num, epochs=args.attack_epochs, lr=0.005, attack_graph=attack_graph,
mask_features=args.feature_attack, mask_structure=args.structure_attack, indirect_level=args.indirect_level, random_structure=args.random_structure, random_feature=args.random_feature, args=args)
attacker.to(device)
sparsity = args.sparsity
feat_sparsity = args.feat_sparsity
fix_sparsity = args.fix_sparsity
data = added_dataset.data
data.to(device)
print(" input file args is",args)
walks, structure_masks, feature_masks, structure_sp, feature_sp = attacker(data.x, data.edge_index, data.y, fix_sparsity= fix_sparsity,sparsity=sparsity,feat_sparsity=feat_sparsity,\
num_classes=num_classes)
print(" strucutre sparisty =", structure_sp, " feature sparsity = ", feature_sp)
# check train dataset predict shift
# model.eval()
# tmp_list = []
# with torch.no_grad():
# output = model(baseline.data.x, baseline.data.edge_index, None)
# pred_class = torch.argmax(output[args.node_idx], dim=0).item()
# path = f'results/{data_name}/target_attack/added_node_{added_node_num}/train_percent_{args.train_percent}/desired_class_{args.desired_class}'
# if not osp.isdir(path):
# os.makedirs(path)
# print(" pred class is, ", pred_class)
# file = f'{path}/train_model_res.csv'
# cols=["ID", "desired_class", "pred_class", "pred_score"]
# tmp_list.append([args.node_idx, args.desired_class, pred_class, output[args.node_idx]])
# df = pd.DataFrame(tmp_list, columns=cols)
# if not os.path.isfile(file):
# df.to_csv(file, index=False)
# else:
# prev_res = pd.read_csv(file)
# final_res = pd.concat([df, prev_res],ignore_index=True)
# final_res.reset_index()
# final_res.to_csv(file, index=False)
# exit(-2)
# step 2.3 apply learned mask to added_dataset
# step 2.3.1 apply structure mask to dataset_preprocess
print("mask dim", added_dataset.data.num_nodes)
print(" edge index", added_dataset.data.edge_index.shape)
print(" structur mask is", structure_masks)
filter_indices = (structure_masks[0] == float('inf')).nonzero(as_tuple=True)[0]
print(" filter indices = ", filter_indices)
print(" filter indeices", filter_indices.shape)
edge_index_with_loop, _ = add_self_loops(added_dataset.data.edge_index, num_nodes=added_dataset.data.num_nodes)
added_dataset.data.edge_index = edge_index_with_loop
print("dataset_preprocess.data.edge_index", added_dataset.data.edge_index.shape)
added_dataset.data.edge_index = torch.index_select(added_dataset.data.edge_index, 1, filter_indices.to(device))
print("after filter dataset_preprocess.data.edge_index", added_dataset.data.edge_index.shape)
# step 2.3.2 apply feature mask to added_dataset
if attacker.mask_features:
added_dataset.data.x[-added_node_num:] *= feature_masks[0]
# step 3: retrain model in changed dataset_preprocess
del model
model = GCN_2l(model_level='node', dim_node=added_dataset.num_node_features, dim_hidden=16,num_classes=added_dataset.num_classes)
model.to(device)
attack_ckpt_fold = osp.join('attack_checkpoints', data_name, str(added_node_num), 'GCN_2l')
if not osp.isdir(attack_ckpt_fold):
os.makedirs(attack_ckpt_fold)
attack_ckpt_path = osp.join(attack_ckpt_fold, 'GCN_2l_best.ckpt')
utils.train(model, added_dataset.data, attack_ckpt_path, lr=0.005, epochs=args.train_epochs,verbose=True)
# [_, _, _, train_acc, test_acc, val_acc] = eval_all(model, added_dataset.data)
if not args.attack_graph:
path = f'results/target_attack/{data_name}/added_node_{added_node_num}/train_percent_{args.train_percent}/desired_class_{args.desired_class}'
if not osp.isdir(path):
os.makedirs(path)
model.eval()
with torch.no_grad():
output = model(added_dataset.data.x, added_dataset.data.edge_index, None)
success = None
print(" node idx = ", args.node_idx)
print(" output shape ", output.shape, output[args.node_idx], type(output[args.node_idx]))
pred_class = torch.argmax(output[args.node_idx], dim=0).item()
print(" pred class = ", pred_class)
origin = added_dataset.data.y[args.node_idx]
print(" origin", origin, "desired class", args.desired_class)
cols = ["id", "pred_class", "desired_class", "success", "vis_path", "structure_sp", "feature_sp", "pred_score"]
tmp_list = []
vis_file = None
if pred_class == args.desired_class:
success = True
vis_file = f'{path}/target_attack_{str(args.node_idx)}_{str(structure_sp)}_feature_sparsity_{str(feature_sp)}_dataset.pkl'
utils.save_to_file([added_dataset.data.edge_index.to('cpu'), torch.argmax(output.to('cpu'), dim=1), added_dataset.data.x[:]], vis_file)
baseline_fold = osp.join('./results/target_attack', data_name)
baseline_vis_file = f'{baseline_fold}/train_percent_{args.train_percent}_baseline_A_X_res.pkl'
# plot 1-hop and 2-hop figures center by node id
with open(baseline_vis_file, 'rb') as f:
edge_indx, pred, att = pickle.load(f)
utils.viz_k_hop_op(edge_indx, pred, args.node_idx, 1, path, f'origin_center_node_{str(args.node_idx)}_hops_{str(1)}')
utils.viz_k_hop_op(edge_indx, pred, args.node_idx, 2, path, f'origin_center_node_{str(args.node_idx)}_hops_{str(2)}')
with open(vis_file, 'rb') as f:
attack_edge_indx, attack_pred, attack_att = pickle.load(f)
utils.viz_k_hop_op(attack_edge_indx, attack_pred, args.node_idx, 1, path,
f'attack_center_node_{str(args.node_idx)}_hops_{str(1)}')
utils.viz_k_hop_op(attack_edge_indx, attack_pred, args.node_idx, 2, path,
f'attack_center_node_{str(args.node_idx)}_hops_{str(2)}')
else:
success = False
tmp_list.append([args.node_idx, pred_class, args.desired_class, success, vis_file, structure_sp, feature_sp, output[args.node_idx]])
df = pd.DataFrame(tmp_list,columns=cols)
file = f'{path}/res.csv'
if not os.path.isfile(file):
df.to_csv(file, index=False)
else:
prev_res = pd.read_csv(file)
final_res = pd.concat([df, prev_res],ignore_index=True)
final_res.reset_index()
final_res.to_csv(file, index=False) | python |
import sympy
class Curtis:
type = 0
# module for computing zUy and UxU
deodhar = 0
# Bruhat form
bruhat = 0
# the Chevalley group
group = 0
# the Weyl group
weyl = 0
# standard parabolics
para = 0
# distinguished expressions for standard parabolics
dist_expr_p = 0
# Deodhar cells
D = 0
# Deodhar cells DI-form
DI = 0
# Deodhar cells in zUyi form
zUyi = 0
# Deodhar cells in UxU form
UxU = 0
# the toral elements for the basis of the Hecke algebra of a GG-rep
# given explicitly in derived classes
tori = []
# a second list of the same tori with "primed" variables
tori2 = []
# a third list of the same tori with "double primed" variables
tori3 = []
def __init__(self, t):
self.type = t
self.deodhar = self.type.deodhar
self.bruhat = self.type.bruhat
self.group = self.type.group
self.weyl = self.type.weyl
self.para = self.type.parabolics
self.dist_expr_p = self.extract_para_dist_expr()
# needs dist_expr_p:
# self.load_cells()
"""
Selecting those distinguished expressions corresponding to
standard parabolic subgroups
"""
def extract_para_dist_expr(self):
de = self.weyl.dist_expr
w0w = self.para.w0w
result = []
for i in range(len(de)):
e = de[i]
if e[0][0] in w0w and \
e[0][1] in w0w and \
e[0][2] in w0w:
result.append(e + [i])
return result
"""
Select cells corresponding to dist_expr_p
--- needs dist_expr_p
"""
def load_cells(self):
dep = self.dist_expr_p
self.D = []
self.DI = []
self.zUyi = []
self.UxU = []
for e in dep:
pos = e[len(e) - 1]
tmpD = []
tmpDI = []
tmpzUyi = []
tmpUxU = []
for j in range(len(e[1])):
# D and zUyi
uyiu = self.deodhar.cell_UyiU(pos, j)
tmpzUyi.append(uyiu)
# DI and UxU
uxu = self.deodhar.cell_Ux(pos, j)
tmpUxU.append(uxu)
self.D.append(tmpD)
self.DI.append(tmpDI)
self.zUyi.append(tmpzUyi)
self.UxU.append(tmpUxU)
"""
prepare the two forms of the cell
"""
def prepare_zUy_UxU(self, ii, j):
de = self.weyl.dist_expr
x = de[ii][0][0]
y = de[ii][0][1]
z = de[ii][0][2]
nx = self.group.w_to_n(self.weyl.word(x))
ny = self.group.w_to_n(self.weyl.word(y))
nz = self.group.w_to_n(self.weyl.word(z))
ty = self.para.w0w.index(y)
ty = self.tori2[ty]
tyi = self.group.invert(ty)
ytyi = self.group.conjugate_left(ny, tyi)
tz = self.para.w0w.index(z)
tz = self.tori3[tz]
ztz = self.group.conjugate_left(nz, tz)
uyiu = self.deodhar.cell_UyiU(ii, j)
uxu = self.deodhar.cell_Ux(ii, j)
uyiu = self.bruhat.split_strict_Bruhat(uyiu, n_coef=-1)
ytyi0 = ytyi + self.group.invert(uyiu[2])
uxu = self.bruhat.split_strict_Bruhat(uxu)
uxu[0] = self.group.conjugate_left(ztz, uxu[0])
ztzx = self.group.conjugate_right(ztz, nx)
if nx != uxu[1]:
print("curtis.prepare_zUy_UxU: this should not be!")
uxu[3] = uxu[3] + self.group.invert(uyiu[3])
uxu[3] = self.group.conjugate_right(uxu[3], ytyi0)
uxu[2] = uxu[2] + ztzx + ytyi0
uy = uyiu[0] + uyiu[1]
uxu = uxu[0] + uxu[1] + self.group.canonic_th(uxu[2]) + self.group.canonic_u(uxu[3])
for i in range(len(uy)):
uy[i] = [uy[i][0], uy[i][1], sympy.simplify(uy[i][2])]
for i in range(len(uxu)):
uxu[i] = [uxu[i][0], uxu[i][1], sympy.simplify(uxu[i][2])]
return [uy, uxu]
"""
Get condition for toral elements to represent the same cell
--- we need t0 in zUyi*t0
--- we need t00 in Uxt00U
[z*tz][U][(y*ty)^-1]t
= [tz^(z^-1)][z][U][y^-1][(ty^-1)^(y^-1)]
= [tz^(z^-1)][zUyi][t0^-1][(ty^-1)^(y^-1)]
= [tz^(z^-1)][UxU][t0^-1][(ty^-1)^(y^-1)]
= [tz^(z^-1)][U][x][t00][U][t0^-1][(ty^-1)^(y^-1)]
"""
def structure_equation(self, i, j):
x = self.dist_expr_p[i][0][0]
y = self.dist_expr_p[i][0][1]
z = self.dist_expr_p[i][0][2]
# copiem ca sa nu modificam
zUyi = [list(e) for e in self.zUyi[i][j]]
UxU = [list(e) for e in self.UxU[i][j]]
xx = self.weyl.word(x)
xx = self.group.w_to_n(xx)
yy = self.weyl.word(y)
yy = self.group.w_to_n(yy)
zz = self.weyl.word(z)
zz = self.group.w_to_n(zz)
#
# toral part for y
#
# the order is important
# this is the correct order to get t0 on the right
t0 = yy + zUyi[1] + zUyi[2]
t0 = self.group.canonic_nt(t0)
if not self.group.all_t(t0):
print("curtis.structure_equation: This should not be! (t0)")
#
# toral part for x
#
xxi = self.group.invert(xx)
# the order is important
# this is the correct order to get t0 on the right
t00 = xxi + UxU[1] + UxU[2]
t00 = self.group.canonic_nt(t00)
if not self.group.all_t(t00):
print("curtis.structure_equation: This should not be! (t00)")
#
# tz and ty
#
tz = self.para.w0w.index(z)
# use the second set of variables for z
tz = self.tori2[tz]
ty = self.para.w0w.index(y)
ty = self.tori[ty]
# bring to other form
# left U
zztz = self.group.conjugate_left(zz, tz)
UxU[0] = self.group.conjugate_left(zztz, UxU[0])
xxizztz = self.group.conjugate_right(zztz, xxi)
# right U
t0i = self.group.invert(t0)
UxU[3] = self.group.conjugate_right(UxU[3], t0i)
tyi = self.group.invert(ty)
yytyi = self.group.conjugate_left(yy, tyi)
UxU[3] = self.group.conjugate_right(UxU[3], yytyi)
tt = xxizztz + t00 + t0i + yytyi
tt = self.group.canonic_t(tt)
return [tt, zUyi, UxU]
"""
Truncate the unipotent part
and bring the two forms of the cells in the right form for
the structure constants of the Hecke algebra of a GG-rep
"""
def Hecke_GG_form(self, i, j):
[tt, zUyi, UxU] = self.structure_equation(i, j)
Uyz = self.group.truncate_u_sr(zUyi[0])
#
# just added !!! non-standard
#
# no Uyz=self.group.invert(Uyz)
# no Uyz=self.group.canonic_u(Uyz)
# no Uyz=self.group.truncate_u_sr(Uyz)
Ux_left = self.group.truncate_u_sr(UxU[0])
Ux_right = self.group.truncate_u_sr(UxU[3])
Ux = Ux_left + Ux_right
Ux = self.group.invert(Ux)
Ux = self.group.canonic_u(Ux)
Ux = self.group.truncate_u_sr(Ux)
U = Ux + Uyz
U = self.group.canonic_u(U)
U = self.group.truncate_u_sr(U)
return [tt, zUyi, UxU, U]
"""
Produce a report for the j-th cell in the i-th case
"""
def report(self, i, j):
[uy, uxu] = self.prepare_zUy_UxU(i, j)
uy = self.bruhat.split_strict_Bruhat(uy, n_coef=-1)
uxu = self.bruhat.split_strict_Bruhat(uxu)
de = self.weyl.dist_expr[i]
word = self.weyl.word
latex = self.group.latex
truncate = self.group.truncate_u_sr
print("############################")
print("CASE: ", i, j)
print("CONFIGURATION: ", de[0])
print("DIST EXPR: ", de[1][j])
print("------------------")
print("Z: ", word(de[0][2]))
print("Y: ", word(de[0][1]))
print("X: ", word(de[0][0]))
print("------------------")
print("U in zUyi:")
print("U1: ", latex(truncate(uy[0])))
print("U in UxU:")
print(uxu)
print("U2: ", latex(truncate(uxu[0])))
print("U3: ", latex(truncate(uxu[3])))
print("------------------")
print("Condition on toral element:")
print("A) ", latex(uxu[2]))
print("------------------")
print("U to evaluate psi on:")
Ux_left = truncate(uxu[0])
Ux_right = truncate(uxu[3])
Ux = Ux_left + Ux_right
Ux = self.group.invert(Ux)
Ux = self.group.canonic_u(Ux)
Ux = truncate(Ux)
U = Ux + uy[0]
U = self.group.canonic_u(U)
U = truncate(U)
U = self.group.simplify_params(U)
print(U)
print(latex(U))
print("############################")
"""
Produce a report for the j-th cell in the i-th case
"""
def report_file(self, i, j):
f_name = "data/" + self.type.label + "/reports/" + str(i) + str(j) + ".rep"
f_name = f_name.lower()
f = open(f_name, "w")
# [tt,zUyi,UxU,U]=self.Hecke_GG_form(i,j)
[uy, uxu] = self.prepare_zUy_UxU(i, j)
uy = self.bruhat.split_strict_Bruhat(uy, n_coef=-1)
uxu = self.bruhat.split_strict_Bruhat(uxu)
de = self.weyl.dist_expr[i]
word = self.weyl.word
latex = self.group.latex
truncate = self.group.truncate_u_sr
f.write("############################\n")
f.write("CASE: " + str(i) + str(j) + "\n")
f.write("CONFIGURATION: " + str(de[0]) + "\n")
f.write("DIST EXPR: " + str(de[1][j]) + "\n")
f.write("------------------")
f.write("Z: " + str(word(de[0][2])) + "\n")
# f.write("Y^-1t0: ",zUyi[1]+zUyi[2])
f.write("Y: " + str(word(de[0][1])) + "\n")
# f.write("Xt00: ",UxU[1]+UxU[2])
f.write("X: " + str(word(de[0][0])) + "\n")
f.write("------------------\n")
f.write("U in zUyi:")
f.write("U1: " + latex(truncate(uy[0])) + "\n")
f.write("U2: " + latex(truncate(uxu[0])) + "\n")
f.write("U in UxU:")
f.write("U3: " + latex(truncate(uxu[3])) + "\n")
f.write("------------------\n")
f.write("Condition on toral element:\n")
f.write("A) " + latex(uxu[2]) + "\n")
f.write("------------------\n")
f.write("U to evaluate psi on:\n")
Ux_left = truncate(uxu[0])
Ux_right = truncate(uxu[3])
Ux = Ux_left + Ux_right
Ux = self.group.invert(Ux)
Ux = self.group.canonic_u(Ux)
Ux = truncate(Ux)
U = Ux + uy[0]
U = self.group.canonic_u(U)
U = truncate(U)
U = self.group.simplify_params(U)
f.write(latex(U) + "\n")
f.write("############################\n")
f.close()
"""
Returns the index in the list dist_expr_p of the case c
"""
def index(self, c):
de = self.dist_expr_p
tmp = [i[0] for i in de]
return tmp.index(c)
def latex_dist_expr(self, i, j):
de = self.weyl.dist_expr[i][1][j]
result = "$" + str([i + 1 for i in de[0]]) + "$"
result += " (of type "
t = ""
vari = ""
for k in range(len(de[0])):
if k in de[1][0]:
t += "A"
vari += "$x_{" + str(k + 1) + "}\in k$, "
elif k in de[1][1]:
t += "B"
vari += "$x_{" + str(k + 1) + "}\in k^{\\ast}$, "
elif k in de[1][2]:
t += "C"
vari += "$x_{" + str(k + 1) + "}=1$, "
else:
print("curtis.latex_dist_expr: this should not be!")
return
result += t + ") " + vari
return result
"""
Produce a report for the j-th cell in the i-th case
"""
def report_latex(self, i):
ii = self.dist_expr_p[i][2]
w0w = list(self.para.w0w)
#
# atentie inversez ultimul cu primul element aici
#
tmp = w0w[3]
w0w[3] = w0w[2]
w0w[2] = tmp
case = [w0w.index(k) for k in self.dist_expr_p[i][0]]
case_str = "".join([str(k) for k in case])
fname = "latex/" + self.type.label + "/" + case_str + ".tex"
f = open(fname, "w+")
f.write("\subsection{" + case_str + "}\n")
f.write("\label{" + case_str + "}\n")
for j in range(len(self.dist_expr_p[i][1])):
f.write(self.latex_dist_expr(ii, j) + ":\n")
self.report_latex_sub(ii, j, f, [self.para.w0w.index(k) for k in self.dist_expr_p[i][0]]) # case)
other_case = case_str[1] + case_str[0] + case_str[2]
f.write("Should equal \eqref{" + other_case + "}\n")
f.close()
def report_latex_sub(self, i, j, f, case):
# [tt,zUyi,UxU,U]=self.Hecke_GG_form(i,j)
[uy, uxu] = self.prepare_zUy_UxU(i, j)
uy = self.bruhat.split_strict_Bruhat(uy, n_coef=-1)
uxu = self.bruhat.split_strict_Bruhat(uxu)
latex = self.group.latex
truncate = self.group.truncate_u_sr
f.write("$$" + latex(self.tori[case[0]]) + "=" + latex(uxu[2]) + "$$\n")
Ux_left = truncate(uxu[0])
Ux_right = truncate(uxu[3])
Ux = Ux_left + Ux_right
Ux = self.group.invert(Ux)
Ux = self.group.canonic_u(Ux)
Ux = truncate(Ux)
U = Ux + uy[0]
U = self.group.canonic_u(U)
U = truncate(U)
U = self.group.simplify_params(U)
f.write("$$\sum\psi(" + latex(U) + ")$$\n")
def report_latex_files(self):
w0w = list(self.para.w0w)
# atentie inversez ultimul cu primul element aici
#
tmp = w0w[3]
w0w[3] = w0w[2]
w0w[2] = tmp
result = []
for i in range(len(self.dist_expr_p)):
case = [w0w.index(k) for k in self.dist_expr_p[i][0]]
case_str = "".join([str(k) for k in case])
result.append("\\input{" + self.type.label + "/" + case_str + ".tex}\n")
return result
def report_latex_all(self):
for i in range(len(self.dist_expr_p)):
self.report_latex(i)
def report_poly(self, ii, j):
i = self.dist_expr_p[ii][2]
[uy, uxu] = self.prepare_zUy_UxU(i, j)
uy = self.bruhat.split_strict_Bruhat(uy, n_coef=-1)
uxu = self.bruhat.split_strict_Bruhat(uxu)
truncate = self.group.truncate_u_sr
result = []
result += [[self.tori[self.para.w0w.index(self.dist_expr_p[ii][0][0])], uxu[2]]]
Ux_left = truncate(uxu[0])
Ux_right = truncate(uxu[3])
Ux = Ux_left + Ux_right
Ux = self.group.invert(Ux)
Ux = self.group.canonic_u(Ux)
Ux = truncate(Ux)
U = Ux + uy[0]
U = self.group.canonic_u(U)
U = truncate(U)
U = self.group.simplify_params(U)
poly = []
for u in U:
poly += [u[2]]
result += [poly]
return result
| python |
from django.http import HttpResponse, Http404
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.template.loader import get_template
from django.contrib.auth.views import LoginView
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic.edit import UpdateView, CreateView
from django.contrib.messages.views import SuccessMessageMixin
from django.urls import reverse_lazy
from django.shortcuts import get_object_or_404
from django.views.generic.base import TemplateView
from .models import AdvUser
from .forms import ChangeUserInfoForm
from django.contrib.auth.views import PasswordChangeView
from .forms import RegisterUserForm
from django.core.signing import BadSignature
from .utilities import signer
def index(request):
return render(request, 'main/index.html')
def other_page(request, page):
try:
template = get_template('main/' + page + '.html')
except TemplateDoesNotExist:
raise Http404
return HttpResponse(template.render(request=request))
class BBLoginView(LoginView):
template_name = 'main/login.html'
@login_required
def profile(request):
return render(request, 'main/profile.html')
class BBLogoutView(LoginRequiredMixin, LoginView):
template_name = 'main/logout.html'
class ChangeUserInfoView(SuccessMessageMixin, LoginRequiredMixin, UpdateView):
model = AdvUser
template_name = 'main/change_user_info.html'
form_class = ChangeUserInfoForm
success_url = reverse_lazy('main:profile')
success_massage = 'Личные данные пользователя изменены'
def dispatch(self, request, *args, **kwargs):
self.user_id = request.user.pk
return super().dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
if not queryset:
queryset = self.get_queryset()
return get_object_or_404(queryset, pk=self.user_id)
class BBPasswordChangeView(SuccessMessageMixin, LoginRequiredMixin, PasswordChangeView):
template_name = 'main/password_change.html'
success_url = reverse_lazy('main:profile')
success_message = 'Пароль пользователя изменен'
class RegisterUserView(CreateView):
model = AdvUser
template_name = 'main/register_user.html'
form_class = RegisterUserForm
success_url = reverse_lazy('main:register_done')
class RegisterDoneView(TemplateView):
template_name = 'main/register_done.html'
def user_activate(request, sign):
try:
username = signer.unsign(sign)
except BadSignature:
return render(request, 'main/bad_signature.html')
user = get_object_or_404(AdvUser, username=username)
if user.is_activated:
template = 'main/user_is_activated.html'
else:
template = 'main/activation_done.html'
user.is_active = True
user.is_activated = True
user.save()
return render(request, template)
| python |
"""Updating max length of s3_name in account table
Revision ID: 1727fb4309d8
Revises: 51170afa2b48
Create Date: 2015-07-06 12:29:48.859104
"""
# revision identifiers, used by Alembic.
revision = '1727fb4309d8'
down_revision = '51170afa2b48'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('account', 's3_name', type_=sa.VARCHAR(64), existing_type=sa.VARCHAR(length=32), nullable=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('account', 's3_name', type_=sa.VARCHAR(32), existing_type=sa.VARCHAR(length=64), nullable=True)
### end Alembic commands ###
| python |
# criando uma sequencia de fibonacci
# o proximo numero e sempre a soma dos 2 anteriores
print('Seguencia de Finonacci')
print('--'*20)
# pedindo um numero
n = int(input('Quantos termos voce quer mostrar: '))
# primeiro termo
t1 = 0
# segundo termo
t2 = 1
# mostrando os 2 primerios termos
print(f'{t1} -> {t2}', end='')
# contador apartir de 3 porque ja tem dois termos iniciais
cont = 3
# enquanto o contador for menor que o numero pedido vai continuar
while cont < n:
# terceiro termo e a soma dos 2 anteriores
t3 = t1 + t2
# mostrando o terceiro termo
print(f' -> {t3}', end='')
# contador para finalizar o laço
cont += 1
# fazendo sequencialmente a troca de posiçoes para somar
t1 = t2
t2 = t3
print(' -> Fim') | python |
from org.transcrypt.stubs.browser import *
import random
array = []
def gen_random_int(number, seed):
my_list = [i for i in range(number)]
random.seed(seed)
random.shuffle(my_list)
result = my_list
return result
def generate():
global array
number = 10
seed = 200
gen_random_int(number, seed)
# call gen_random_int() with the given number and seed
# store it to the global variable array
array = gen_random_int(number, seed)
# convert the items into one single string
# the number should be separated by a comma
# and a full stop should end the string.
array_str = ','.join([str(i) for i in array]) + '.'
console.log(array, "\n", array_str)
# This line is to placed the string into the HTML
# under div section with the id called "generate"
document.getElementById("generate").innerHTML = array_str
def sortnumber1():
''' This function is used in Exercise 1.
The function is called when the sort button is clicked.
You need to do the following:
- get the list of numbers from the global variable array and
copy it to a new list
- call your sort function, either bubble sort or insertion sort
- create a string of the sorted numbers and store it in array_str
'''
n = len(array)
for i in range(1, n):
for j in range(1, n):
first_num_index = j - 1
second_num_index = j
if array[first_num_index] > array[second_num_index]:
array[first_num_index], array[second_num_index] = array[second_num_index], array[first_num_index]
array_str = ','.join([str(i) for i in array]) + '.'
document.getElementById("sorted").innerHTML = array_str
def sortnumber2():
''' This function is used in Exercise 2.
The function is called when the sort button is clicked.
You need to do the following:
- Get the numbers from a string variable "value".
- Split the string using comma as the separator and convert them to
a list of numbers
- call your sort function, either bubble sort or insertion sort
- create a string of the sorted numbers and store it in array_str
'''
# The following line get the value of the text input called "numbers"
value = document.getElementsByName("numbers")[0].value
# Throw alert and stop if nothing in the text input
if value == "":
window.alert("Your textbox is empty")
return
else:
value = value.split(",")
# Your code should start from here
# store the final string to the variable array_str
n = len(array)
for i in range(1, n):
for j in range(1, n):
first_num_index = j - 1
second_num_index = j
if array[first_num_index] > array[second_num_index]:
array[first_num_index], array[second_num_index] = array[second_num_index], array[first_num_index]
if array == "":
window.alert("Your textbox is empty")
return
array_str = ','.join([str(i) for i in array]) + '.'
document.getElementById("sorted").innerHTML = array_str | python |
import math
import random
import itertools
import collections
import numpy as np
def grouper(lst, num):
args = [iter(lst)]*num
out = itertools.zip_longest(*args, fillvalue=None)
out = list(out)
return out
def get_batch(batch_data, config, rot='_rot'):
"""Given a batch of data, determine the input and ground truth."""
N = len(batch_data['obs_traj_rel'+rot])
P = config.P
if hasattr(config, 'flow_size'):
OF = config.flow_size
returned_inputs = []
traj_obs_gt = np.zeros([N, config.obs_len, P], dtype='float32')
traj_pred_gt = np.zeros([N, config.pred_len, P], dtype='float32')
# --- xy input
for i, (obs_data, pred_data) in enumerate(zip(batch_data['obs_traj_rel'+rot],
batch_data['pred_traj_rel'+rot])):
for j, xy in enumerate(obs_data):
traj_obs_gt[i, j, :] = xy
for j, xy in enumerate(pred_data):
traj_pred_gt[i, j, :] = xy
returned_inputs.append(traj_obs_gt)
# ------------------------------------------------------
# Social component (through optical flow)
if hasattr(config, 'add_social') and config.add_social:
obs_flow = np.zeros((N,config.obs_len, OF),dtype ='float32')
# each batch
for i, flow_seq in enumerate(batch_data['obs_optical_flow']):
for j , flow_step in enumerate(flow_seq):
obs_flow[i,j,:] = flow_step
returned_inputs.append(obs_flow)
# -----------------------------------------------------------
return returned_inputs,traj_pred_gt
| python |
"""This program searches through an email file and returns the sender email and date of sending """
user_input = input('Enter filename: ')
fhand = open(user_input)
for line in fhand:
line = line.rstrip()
if not line.startswith('From '): continue
words = line.split()
# print(words)
print(words[1:5], words[6])
| python |
import oi
import os
import sys
import logging
from logging.handlers import SysLogHandler
import time
import service
try:
import config
except ImportError:
import example1.config as config
def stop_function():
ctl = oi.CtlProgram('ctl program', config.ctl_url)
ctl.call('stop')
ctl.client.close()
class Service(service.Service):
def __init__(self, *args, **kwargs):
super(Service, self).__init__(*args, **kwargs)
self.syslog_handler = SysLogHandler(
address=service.find_syslog(),
facility=SysLogHandler.LOG_DAEMON
)
formatter = logging.Formatter(
'%(name)s - %(levelname)s - %(message)s')
self.syslog_handler.setFormatter(formatter)
logging.getLogger().addHandler(self.syslog_handler)
def run(self):
try:
from scheduler import setup_scheduler, scheduler
except ImportError:
from example1.scheduler import setup_scheduler, scheduler
while not self.got_sigterm():
logging.info("Starting")
self.program = oi.Program('example1', config.ctl_url)
self.program.logger = self.logger
self.program.add_command('ping', lambda: 'pong')
self.program.add_command('state', lambda: self.program.state)
def restart():
logging.warning('Restarting')
self.program.continue_event.set()
self.program.restart = restart
setup_scheduler(self.program)
if hasattr(config, 'register_hook'):
config.register_hook(
ctx=dict(
locals=locals(),
globals=globals(),
program=self.program
)
)
self.program.run()
logging.warning("Stopping")
scheduler.shutdown()
if not self.program.continue_event.wait(0.1):
break
self.stop()
os.unlink('/tmp/demo.pid')
os.execl(sys.executable, sys.argv[0], 'start')
if self.got_sigterm():
self.program.stop_function()
def main_ctl():
ctl = oi.CtlProgram('ctl program', config.ctl_url)
ctl.run()
def main_d():
program = oi.Program('example1', config.ctl_url)
program.add_command('ping', lambda: 'pong')
program.add_command('state', lambda: program.state)
try:
from scheduler import setup_scheduler, scheduler
except ImportError:
from example1.scheduler import setup_scheduler, scheduler
setup_scheduler(program)
if hasattr(config, 'register_hook'):
config.register_hook(
ctx=dict(
locals=locals(),
globals=globals(),
program=program
)
)
program.run()
scheduler.shutdown()
def main_svc():
import sys
if len(sys.argv) < 2:
sys.exit('Syntax: %s COMMAND' % sys.argv[0])
cmd = sys.argv[1]
sys.argv.remove(cmd)
service = Service('example1', pid_dir='/tmp')
if cmd == 'start':
service.start()
elif cmd == 'stop':
service.stop()
stop_function()
elif cmd == 'restart':
service.stop()
stop_function()
while service.is_running():
time.sleep(0.1)
service.start()
elif cmd == 'status':
if service.is_running():
print "Service is running."
else:
print "Service is not running."
else:
sys.exit('Unknown command "%s".' % cmd)
def main():
prog_name = sys.argv[0].lower()
if prog_name.endswith('.exe'):
prog_name = prog_name[:-4]
if prog_name.endswith('svc'):
main_svc()
elif prog_name.endswith('d'):
main_d()
else:
main_ctl()
if __name__ == '__main__':
if hasattr(config, 'main_hook'):
if not config.main_hook(
ctx=dict(
locals=locals(),
globals=globals()
)
):
main()
else:
main()
| python |
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
METRICS = (
'hazelcast.instance.managed_executor_service.completed_task_count',
'hazelcast.instance.managed_executor_service.is_shutdown',
'hazelcast.instance.managed_executor_service.is_terminated',
'hazelcast.instance.managed_executor_service.maximum_pool_size',
'hazelcast.instance.managed_executor_service.pool_size',
'hazelcast.instance.managed_executor_service.queue_size',
'hazelcast.instance.managed_executor_service.remaining_queue_capacity',
'hazelcast.instance.member_count',
'hazelcast.instance.partition_service.active_partition_count',
'hazelcast.instance.partition_service.is_cluster_safe',
'hazelcast.instance.partition_service.is_local_member_safe',
'hazelcast.instance.partition_service.partition_count',
'hazelcast.instance.running',
'hazelcast.mc.license_expiration_time',
'hazelcast.member.accepted_socket_count',
'hazelcast.member.active_count',
'hazelcast.member.active_members',
'hazelcast.member.active_members_commit_index',
'hazelcast.member.async_operations',
'hazelcast.member.available_processors',
'hazelcast.member.backup_timeout_millis',
'hazelcast.member.backup_timeouts',
'hazelcast.member.bytes_read',
'hazelcast.member.bytes_received',
'hazelcast.member.bytes_send',
'hazelcast.member.bytes_transceived',
'hazelcast.member.bytes_written',
'hazelcast.member.call_timeout_count',
'hazelcast.member.client_count',
'hazelcast.member.closed_count',
'hazelcast.member.cluster_start_time',
'hazelcast.member.cluster_time',
'hazelcast.member.cluster_time_diff',
'hazelcast.member.cluster_up_time',
'hazelcast.member.commit_count',
'hazelcast.member.committed_heap',
'hazelcast.member.committed_native',
'hazelcast.member.committed_virtual_memory_size',
'hazelcast.member.completed_count',
'hazelcast.member.completed_migrations',
'hazelcast.member.completed_operation_batch_count',
'hazelcast.member.completed_operation_count',
'hazelcast.member.completed_packet_count',
'hazelcast.member.completed_partition_specific_runnable_count',
'hazelcast.member.completed_runnable_count',
'hazelcast.member.completed_task_count',
'hazelcast.member.completed_tasks',
'hazelcast.member.completed_total_count',
'hazelcast.member.connection_listener_count',
'hazelcast.member.connection_type',
'hazelcast.member.count',
'hazelcast.member.created_count',
'hazelcast.member.daemon_thread_count',
'hazelcast.member.delayed_execution_count',
'hazelcast.member.destroyed_count',
'hazelcast.member.destroyed_group_ids',
'hazelcast.member.elapsed_destination_commit_time',
'hazelcast.member.elapsed_migration_operation_time',
'hazelcast.member.elapsed_migration_time',
'hazelcast.member.error_count',
'hazelcast.member.event_count',
'hazelcast.member.event_queue_size',
'hazelcast.member.events_processed',
'hazelcast.member.exception_count',
'hazelcast.member.failed_backups',
'hazelcast.member.frames_transceived',
'hazelcast.member.free_heap',
'hazelcast.member.free_memory',
'hazelcast.member.free_native',
'hazelcast.member.free_physical',
'hazelcast.member.free_physical_memory_size',
'hazelcast.member.free_space',
'hazelcast.member.free_swap_space_size',
'hazelcast.member.generic_priority_queue_size',
'hazelcast.member.generic_queue_size',
'hazelcast.member.generic_thread_count',
'hazelcast.member.groups',
'hazelcast.member.heartbeat_broadcast_period_millis',
'hazelcast.member.heartbeat_packets_received',
'hazelcast.member.heartbeat_packets_sent',
'hazelcast.member.idle_time_millis',
'hazelcast.member.idle_time_ms',
'hazelcast.member.imbalance_detected_count',
'hazelcast.member.in_progress_count',
'hazelcast.member.invocation_scan_period_millis',
'hazelcast.member.invocation_timeout_millis',
'hazelcast.member.invocations.last_call_id',
'hazelcast.member.invocations.pending',
'hazelcast.member.invocations.used_percentage',
'hazelcast.member.io_thread_id',
'hazelcast.member.last_heartbeat',
'hazelcast.member.last_repartition_time',
'hazelcast.member.listener_count',
'hazelcast.member.loaded_classes_count',
'hazelcast.member.local_clock_time',
'hazelcast.member.local_partition_count',
'hazelcast.member.major_count',
'hazelcast.member.major_time',
'hazelcast.member.max_backup_count',
'hazelcast.member.max_cluster_time_diff',
'hazelcast.member.max_file_descriptor_count',
'hazelcast.member.max_heap',
'hazelcast.member.max_memory',
'hazelcast.member.max_metadata',
'hazelcast.member.max_native',
'hazelcast.member.maximum_pool_size',
'hazelcast.member.member_groups_size',
'hazelcast.member.migration_active',
'hazelcast.member.migration_completed_count',
'hazelcast.member.migration_queue_size',
'hazelcast.member.minor_count',
'hazelcast.member.minor_time',
'hazelcast.member.missing_members',
'hazelcast.member.monitor_count',
'hazelcast.member.nodes',
'hazelcast.member.normal_frames_read',
'hazelcast.member.normal_frames_written',
'hazelcast.member.normal_pending_count',
'hazelcast.member.normal_timeouts',
'hazelcast.member.open_file_descriptor_count',
'hazelcast.member.opened_count',
'hazelcast.member.operation_timeout_count',
'hazelcast.member.owner_id',
'hazelcast.member.packets_received',
'hazelcast.member.packets_send',
'hazelcast.member.park_queue_count',
'hazelcast.member.partition_thread_count',
'hazelcast.member.peak_thread_count',
'hazelcast.member.planned_migrations',
'hazelcast.member.pool_size',
'hazelcast.member.priority_frames_read',
'hazelcast.member.priority_frames_transceived',
'hazelcast.member.priority_frames_written',
'hazelcast.member.priority_pending_count',
'hazelcast.member.priority_queue_size',
'hazelcast.member.priority_write_queue_size',
'hazelcast.member.process_count',
'hazelcast.member.process_cpu_load',
'hazelcast.member.process_cpu_time',
'hazelcast.member.proxy_count',
'hazelcast.member.publication_count',
'hazelcast.member.queue_capacity',
'hazelcast.member.queue_size',
'hazelcast.member.rejected_count',
'hazelcast.member.remaining_queue_capacity',
'hazelcast.member.replica_sync_requests_counter',
'hazelcast.member.replica_sync_semaphore',
'hazelcast.member.response_queue_size',
'hazelcast.member.responses.backup_count',
'hazelcast.member.responses.error_count',
'hazelcast.member.responses.missing_count',
'hazelcast.member.responses.normal_count',
'hazelcast.member.responses.timeout_count',
'hazelcast.member.retry_count',
'hazelcast.member.rollback_count',
'hazelcast.member.running_count',
'hazelcast.member.running_generic_count',
'hazelcast.member.running_partition_count',
'hazelcast.member.scheduled',
'hazelcast.member.selector_i_o_exception_count',
'hazelcast.member.selector_rebuild_count',
'hazelcast.member.selector_recreate_count',
'hazelcast.member.size',
'hazelcast.member.start_count',
'hazelcast.member.started_migrations',
'hazelcast.member.state_version',
'hazelcast.member.sync_delivery_failure_count',
'hazelcast.member.system_cpu_load',
'hazelcast.member.system_load_average',
'hazelcast.member.task_queue_size',
'hazelcast.member.terminated_raft_node_group_ids',
'hazelcast.member.text_count',
'hazelcast.member.thread_count',
'hazelcast.member.total_completed_migrations',
'hazelcast.member.total_elapsed_destination_commit_time',
'hazelcast.member.total_elapsed_migration_operation_time',
'hazelcast.member.total_elapsed_migration_time',
'hazelcast.member.total_failure_count',
'hazelcast.member.total_loaded_classes_count',
'hazelcast.member.total_memory',
'hazelcast.member.total_parked_operation_count',
'hazelcast.member.total_physical',
'hazelcast.member.total_physical_memory_size',
'hazelcast.member.total_registrations',
'hazelcast.member.total_space',
'hazelcast.member.total_started_thread_count',
'hazelcast.member.total_swap_space_size',
'hazelcast.member.unknown_time',
'hazelcast.member.unloaded_classes_count',
'hazelcast.member.uptime',
'hazelcast.member.usable_space',
'hazelcast.member.used_heap',
'hazelcast.member.used_memory',
'hazelcast.member.used_metadata',
'hazelcast.member.used_native',
'hazelcast.member.write_queue_size',
'jvm.buffer_pool.direct.capacity',
'jvm.buffer_pool.direct.count',
'jvm.buffer_pool.direct.used',
'jvm.buffer_pool.mapped.capacity',
'jvm.buffer_pool.mapped.count',
'jvm.buffer_pool.mapped.used',
'jvm.cpu_load.process',
'jvm.cpu_load.system',
'jvm.gc.cms.count',
'jvm.gc.eden_size',
'jvm.gc.old_gen_size',
'jvm.gc.parnew.time',
'jvm.gc.survivor_size',
'jvm.heap_memory',
'jvm.heap_memory_committed',
'jvm.heap_memory_init',
'jvm.heap_memory_max',
'jvm.loaded_classes',
'jvm.non_heap_memory',
'jvm.non_heap_memory_committed',
'jvm.non_heap_memory_init',
'jvm.non_heap_memory_max',
'jvm.os.open_file_descriptors',
'jvm.thread_count',
)
| python |
import time
import webhook_listener
import json
# arduino = serial.Serial(port='COM14', baudrate=115200, timeout=0)
def process_post_request(request, *args, **kwargs):
req = (format(
request.body.read(int(request.headers["Content-Length"]))
if int(request.headers.get("Content-Length", 0)) > 0
else ""
))
if req[0] == "b":
req = req[1:]
pass
req = (json.loads(eval(req)))
print(req)
# Process the request!
# ...
return
webhooks = webhook_listener.Listener(handlers={"POST": process_post_request})
webhooks.start()
while True:
print("Still alive...")
time.sleep(300) | python |
#!/bin/env python
#
# Copyright 2013-2014 Graham McVicker and Bryce van de Geijn
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
"""
This program reads BAM files and counts the number of reads that match
the alternate and reference allele at every SNP position in the provided
SNP HDF5 data files. The read counts are stored in specified HDF5 output
files.
Additionally counts of all reads are stored in another track (at the
left-most position of the reads).
This program does not perform filtering of reads based on mappability.
It is assumed that the inpute BAM files are filtered appropriately prior to
calling this script.
Reads that overlap known indels are not included in allele-specific
counts.
usage: bam2h5.py OPTIONS BAM_FILE1 [BAM_FILE2 ...]
BAM Files:
Aligned reads are read from one or more BAM files. The provided
BAM files must be sorted and indexed.
Input Options:
--chrom CHROM_TXT_FILE [required]
Path to chromInfo.txt file (may be gzipped) with list of
chromosomes for the relevant genome assembly. Each line
in file should contain tab-separated chromosome name and
chromosome length (in basepairs). chromInfo.txt files can
be downloaded from the UCSC genome browser. For example,
a chromInfo.txt.gz file for hg19 can be downloaded from
http://hgdownload.soe.ucsc.edu/goldenPath/hg19/database/
--snp_index SNP_INDEX_H5_FILE [required]
Path to HDF5 file containing SNP index. The SNP index is
used to convert the genomic position of a SNP to its
corresponding row in the haplotype and snp_tab
HDF5 files.
--snp_tab SNP_TABLE_H5_FILE [required]
Path to HDF5 file to read SNP information from. Each row of SNP
table contains SNP name (rs_id), position, allele1, allele2.
--haplotype HAPLOTYPE_H5_FILE [optional]
Path to HDF5 file to read phased haplotypes from.
If supplied, when read overlaps multiple SNPs counts are randomly
assigned to ONE of the overlapping HETEROZYGOUS SNPs; if not supplied
counts are randomly assigned to ONE of overlapping SNPs (regardless of
their genotype).
--individual INDIVIDUAL [optional]
Identifier for individual, used to determine which
SNPs are heterozygous. Must be provided
if --haplotype argument is provided and must match one of the
samples in the haplotype HDF5 file.
Output Options:
--data_type uint8|uint16
Data type of stored counts; uint8 takes up less disk
space but has a maximum value of 255 (default=uint16).
--ref_as_counts REF_AS_COUNT_H5_FILE [required]
Path to HDF5 file to write counts of reads that match reference allele.
Allele-specific counts are stored at the position of the SNP.
--alt_as_counts ALT_AS_COUNT_H5_FILE [required]
Path to HDF5 file to write counts of reads that match alternate allele.
Allele-specific counts are stored at the position of the SNP.
--other_as_counts OTHER_AS_COUNT_H5_FILE [required]
Path to HDF5 file to write counts of reads that match neither reference
nor alternate allele. Allele-specific counts are stored at the position
of the SNP.
--read_counts READ_COUNT_H5_FILE [required]
Path to HDF5 file to write counts of all reads, regardless of whether
they overlap a SNP. Read counts are stored at the left-most position
of the mapped read.
--txt_counts COUNTS_TXT_FILE [optional]
Path to text file to write ref, alt, and other counts of reads. The
text file will have columns:
<chromosome> <snp_position> <ref_allele> <alt_allele> <genotype>
<ref_allele_count> <alt_allele_count> <other_count>
"""
import sys
import os
import gzip
import warnings
import tables
import argparse
import numpy as np
import pysam
import chromosome
import chromstat
import util
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))+"/../mapping/")
import snptable
# codes used by pysam for aligned read CIGAR strings
BAM_CMATCH = 0 # M
BAM_CINS = 1 # I
BAM_CDEL = 2 # D
BAM_CREF_SKIP = 3 # N
BAM_CSOFT_CLIP = 4 # S
BAM_CHARD_CLIP = 5 # H
BAM_CPAD = 6 # P
BAM_CEQUAL = 7 # =
BAM_CDIFF = 8 # X
BAM_CIGAR_DICT = {0 : "M",
1 : "I",
2 : "D",
3 : "N",
4 : "S",
5 : "H",
6 : "P",
7 : "=",
8 : "X"}
SNP_UNDEF = -1
MAX_UINT8_COUNT = 255
MAX_UINT16_COUNT = 65535
unimplemented_CIGAR = [0, set()]
def create_carray(h5f, chrom, data_type):
if data_type == "uint8":
atom = tables.UInt8Atom(dflt=0)
elif data_type == "uint16":
atom = tables.UInt16Atom(dflt=0)
else:
raise NotImplementedError("unsupported datatype %s" % data_type)
zlib_filter = tables.Filters(complevel=1, complib="zlib")
# create CArray for this chromosome
shape = [chrom.length]
carray = h5f.create_carray(h5f.root, chrom.name,
atom, shape, filters=zlib_filter)
return carray
def get_carray(h5f, chrom):
return h5f.get_node("/%s" % chrom)
def is_indel(snp):
if (len(snp['allele1']) != 1) or (len(snp['allele2'])) != 1:
return True
def dump_read(f, read):
cigar_str = " ".join(["%s:%d" % (BAM_CIGAR_DICT[c[0]], c[1])
for c in read.cigar])
f.write("pos: %d\n"
"aend: %d\n"
"alen (len of aligned portion of read on genome): %d\n"
"qstart: %d\n"
"qend: %d\n"
"qlen (len of aligned qry seq): %d\n"
"rlen (read len): %d\n"
"tlen (insert size): %d\n"
"cigar: %s\n"
"seq: %s\n"
% (read.pos, read.aend, read.alen, read.qstart, read.qend,
read.qlen, read.rlen, read.tlen, cigar_str, read.seq))
def get_sam_iter(samfile, chrom):
try:
sam_iter = samfile.fetch(reference=chrom.name,
start=1, end=chrom.length)
except ValueError as ve:
sys.stderr.write("%s\n" % str(ve))
# could not find chromosome, try stripping leading 'chr'
# E.g. for drosophila, sometimes 'chr2L' is used but
# othertimes just '2L' is used. Annoying!
chrom_name = chrom.name.replace("chr", "")
sys.stderr.write("WARNING: %s does not exist in BAM file, "
"trying %s instead\n" % (chrom.name, chrom_name))
try:
sam_iter = samfile.fetch(reference=chrom_name,
start=1, end=chrom.length)
except ValueError:
# fetch can fail because chromosome is missing or because
# BAM has not been indexed
sys.stderr.write("WARNING: %s does not exist in BAM file, "
"or BAM file has not been sorted and indexed.\n"
" Use 'samtools sort' and 'samtools index' to "
"index BAM files before running bam2h5.py.\n"
" Skipping chromosome %s.\n" %
(chrom.name, chrom.name))
sam_iter = iter([])
return sam_iter
def choose_overlap_snp(read, snp_tab, snp_index_array, hap_tab, ind_idx):
"""Picks out a single SNP from those that the read overlaps.
Returns a tuple containing 4 elements: [0] the index of the SNP in
the SNP table, [1] the offset into the read sequence, [2] flag
indicating whether the read was 'split' (i.e. was a spliced
read), [3] flag indicating whether read overlaps known indel.
If there are no overlapping SNPs or the read cannot be processed,
(None, None, is_split, overlap_indel) is returned instead.
"""
read_offsets = []
snp_idx = []
read_start_idx = 0
genome_start_idx = read.pos
n_match_segments = 0
is_split = False
overlap_indel = False
for cig in read.cigar:
op = cig[0]
op_len = cig[1]
if op == BAM_CMATCH:
# this is a block of match/mismatch in read alignment
read_end = read_start_idx + op_len
genome_end = genome_start_idx + op_len
# get offsets of any SNPs that this read overlaps
idx = snp_index_array[genome_start_idx:genome_end]
is_def = np.where(idx != SNP_UNDEF)[0]
read_offsets.extend(read_start_idx + is_def)
snp_idx.extend(idx[is_def])
read_start_idx = read_end
genome_start_idx = genome_end
n_match_segments += 1
elif op == BAM_CREF_SKIP:
# spliced read, skip over this region of genome
genome_start_idx += op_len
is_split = True
elif op == BAM_CSOFT_CLIP:
# end of read is soft-clipped, which means it is
# present in read, but not used in alignment
read_start_idx += op_len
elif op == BAM_CINS:
# Dealing with insertion
read_start_idx += op_len
elif op == BAM_CDEL:
# Dealing with deletion
genome_start_idx += op_len
elif op == BAM_CHARD_CLIP:
# end of read is hard-clipped, so not present
# in read and not used in alignment
pass
else:
unimplemented_CIGAR[0] += 1
unimplemented_CIGAR[1].add(BAM_CIGAR_DICT[op])
# sys.stderr.write("skipping because contains CIGAR code %s "
# " which is not currently implemented\n" %
# BAM_CIGAR_DICT[op])
return (None, None, is_split, overlap_indel)
# are any of the SNPs indels? If so, discard.
for i in snp_idx:
if is_indel(snp_tab[i]):
overlap_indel = True
return (None, None, is_split, overlap_indel)
n_overlap_snps = len(read_offsets)
if n_overlap_snps == 0:
# no SNPs overlap this read
return (None, None, is_split, overlap_indel)
if hap_tab:
# genotype info is provided by haplotype table
# pull out subset of overlapping SNPs that are heterozygous
# in this individual
het_read_offsets = []
het_snp_idx = []
for (i, read_offset) in zip(snp_idx, read_offsets):
haps = hap_tab[i, (ind_idx*2):(ind_idx*2 + 2)]
if ind_idx*2 > hap_tab.shape[1]:
raise ValueError("index of individual (%d) is >= number of "
"individuals in haplotype_tab (%d)."
% (ind_idx, hap_tab.shape[1]/2))
if haps[0] != haps[1]:
# this is a het
het_read_offsets.append(read_offset)
het_snp_idx.append(i)
n_overlap_hets = len(het_read_offsets)
if n_overlap_hets == 0:
# none of the overlapping SNPs are hets
return (None, None, is_split, overlap_indel)
if n_overlap_hets == 1:
# only one overlapping SNP is a het
return (het_snp_idx[0], het_read_offsets[0], is_split, overlap_indel)
# choose ONE overlapping HETEROZYGOUS SNP randomly to add counts to
# we don't want to count same read multiple times
r = np.random.randint(0, n_overlap_hets)
return (het_snp_idx[r], het_read_offsets[r], is_split, overlap_indel)
else:
# We don't have haplotype tab, so we don't know which SNPs are
# heterozygous in this individual. But we can still tell
# whether read sequence matches reference or non-reference
# allele. Choose ONE overlapping SNP randomly to add counts to
if n_overlap_snps == 1:
return (snp_idx[0], read_offsets[0], is_split, overlap_indel)
else:
r = np.random.randint(0, n_overlap_snps)
return (snp_idx[r], read_offsets[r], is_split, overlap_indel)
def add_read_count(read, chrom, ref_array, alt_array, other_array,
read_count_array, snp_index_array, snp_tab, hap_tab,
warned_pos, max_count, ind_idx):
# pysam positions start at 0
start = read.pos+1
end = read.aend
if start < 1 or end > chrom.length:
sys.stderr.write("WARNING: skipping read aligned past end of "
"chromosome. read: %d-%d, %s:1-%d\n" %
(start, end, chrom.name, chrom.length))
return
if read.qlen != read.rlen:
sys.stderr.write("WARNING skipping read: handling of "
"partially mapped reads not implemented\n")
return
# look for SNPs that overlap mapped read position, and if there
# are more than one, choose one at random
snp_idx, read_offset, is_split, overlap_indel = \
choose_overlap_snp(read, snp_tab, snp_index_array, hap_tab, ind_idx)
if overlap_indel:
return
# store counts of reads at start position
if read_count_array[start-1] < max_count:
read_count_array[start-1] += 1
else:
if not start in warned_pos:
sys.stderr.write("WARNING read count at position %d "
"exceeds max %d\n" % (start, max_count))
warned_pos[start] = True
if snp_idx is None:
return
snp = snp_tab[snp_idx]
allele1 = snp['allele1'].decode("utf-8")
allele2 = snp['allele2'].decode("utf-8")
base = read.seq[read_offset]
snp_pos = snp['pos']
if base == allele1:
# matches reference allele
if ref_array[snp_pos-1] < max_count:
ref_array[snp_pos-1] += 1
elif not snp_pos in warned_pos:
sys.stderr.write("WARNING ref allele count at position %d "
"exceeds max %d\n" % (snp_pos, max_count))
warned_pos[snp_pos] = True
elif base == allele2:
# matches alternate allele
if alt_array[snp_pos-1] < max_count:
alt_array[snp_pos-1] += 1
elif not snp_pos in warned_pos:
sys.stderr.write("WARNING alt allele count at position %d "
"exceeds max %d\n" % (snp_pos, max_count))
warned_pos[snp_pos] = True
else:
# matches neither
if other_array[snp_pos-1] < max_count:
other_array[snp_pos-1] += 1
elif not snp_pos in warned_pos:
sys.stderr.write("WARNING other allele count at position %d "
"exceeds max %d\n" % (snp_pos, max_count))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--chrom",
help="Path to chromInfo.txt file (may be gzipped) "
"with list of chromosomes for the relevant genome "
"assembly. Each line in file should contain "
"tab-separated chromosome name and chromosome length "
"(in basepairs). chromInfo.txt files can be "
"downloaded from the UCSC genome browser. For "
"example, a chromInfo.txt.gz file for hg19 can "
"be downloaded from "
"http://hgdownload.soe.ucsc.edu/goldenPath/hg19/database/",
metavar="CHROM_TXT_FILE",
required=True)
parser.add_argument("--test_chrom",
help="Run only on this chromosome",
metavar="CHROM_NAME",
required=False)
parser.add_argument("--snp_index",
help="Path to HDF5 file containing SNP index. The "
"SNP index is used to convert the genomic position "
"of a SNP to its corresponding row in the haplotype "
"and snp_tab HDF5 files.",
metavar="SNP_INDEX_H5_FILE",
required=True)
parser.add_argument("--snp_tab",
help="Path to HDF5 file to read SNP information "
"from. Each row of SNP table contains SNP name "
"(rs_id), position, allele1, allele2.",
metavar="SNP_TABLE_H5_FILE",
required=True)
parser.add_argument("--haplotype",
help=" Path to HDF5 file to read phased haplotypes "
"from. If supplied, when read overlaps multiple SNPs "
"counts are randomly assigned to ONE of the "
"overlapping HETEROZYGOUS SNPs; if not supplied "
"counts are randomly assigned to ONE of overlapping "
"SNPs (regardless of their genotype).",
metavar="HAPLOTYPE_H5_FILE",
default=None)
parser.add_argument("--individual",
help="Identifier for individual, used to determine "
"which SNPs are heterozygous. Must be provided if "
"--haplotype argument is provided and must match one "
"of the samples in the haplotype HDF5 file.",
metavar="INDIVIDUAL",
default=None)
parser.add_argument("--data_type",
help="Data type of counts stored in HDF5 files. "
"uint8 requires less disk space but has a "
"maximum value of 255."
"(default=uint8)", choices=("uint8", "uint16"),
default="uint16")
parser.add_argument("--ref_as_counts",
help="Path to HDF5 file to write counts of reads "
"that match reference allele. Allele-specific counts "
"are stored at the position of the SNP."
"that match reference",
metavar="REF_AS_COUNT_H5_FILE",
required=True)
parser.add_argument("--alt_as_counts",
help="Path to HDF5 file to write counts of reads "
"that match alternate allele. Allele-specific counts "
"are stored at the position of the SNP.",
metavar="ALT_AS_COUNT_H5_FILE",
required=True)
parser.add_argument("--other_as_counts",
help="Path to HDF5 file to write counts of reads "
"that match neither reference nor alternate allele. "
"Allele-specific counts are stored at the position "
"of the SNP.",
metavar="OTHER_COUNT_H5_FILE",
required=True)
parser.add_argument("--read_counts",
help="Path to HDF5 file to write counts of all "
"reads, regardless of whether they overlap a SNP. "
"Read counts are stored at the left-most position "
"of the mapped read.",
metavar="READ_COUNT_H5_FILE",
required=True)
parser.add_argument("--txt_counts",
help="Path to text file to write ref, alt, and other "
"counts of reads. The text file will have columns: "
"<chromosome> <snp_position> <ref_allele> <alt_allele>"
" <genotype> <ref_allele_count> <alt_allele_count> "
"<other_count>",
metavar="COUNTS_TXT_FILE",
default=None)
parser.add_argument("bam_filenames", action="store", nargs="+",
help="BAM file(s) to read mapped reads from. "
"BAMs must be sorted and indexed.")
args = parser.parse_args()
if args.haplotype and (args.individual is None):
parser.error("--indidivual argument "
"must also be provided when --haplotype argument "
"is provided")
return args
def write_txt_file(out_file, chrom, snp_tab, hap_tab, ind_idx,
ref_array, alt_array, other_array):
i = 0
# get out genotypes for this individual
hap = hap_tab[:, (ind_idx*2, ind_idx*2+1)]
for row in snp_tab:
if (hap[i,0] > -1) and (hap[i,1] > -1):
# genotype is defined
geno = "%d|%d" % (hap[i,0], hap[i,1])
else:
geno = "NA"
pos = row['pos']
out_file.write(" ".join([chrom.name,
"%d" % pos,
row['allele1'].decode("utf-8"),
row['allele2'].decode("utf-8"),
geno,
"%d" % ref_array[pos-1],
"%d" % alt_array[pos-1],
"%d" % other_array[pos-1]]) + "\n")
i += 1
def main():
args = parse_args()
sys.stderr.write("command line: %s\n" % " ".join(sys.argv))
sys.stderr.write("python version: %s\n" % sys.version)
sys.stderr.write("pysam version: %s\n" % pysam.__version__)
sys.stderr.write("pytables version: %s\n" % tables.__version__)
util.check_pysam_version()
util.check_pytables_version()
# disable warnings that come from pytables when chromosome
# names are like 1, 2, 3 (instead of chr1, chr2, chr3)
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
snp_tab_h5 = tables.open_file(args.snp_tab, "r")
snp_index_h5 = tables.open_file(args.snp_index, "r")
if args.haplotype:
hap_h5 = tables.open_file(args.haplotype, "r")
else:
hap_h5 = None
ref_count_h5 = tables.open_file(args.ref_as_counts, "w")
alt_count_h5 = tables.open_file(args.alt_as_counts, "w")
other_count_h5 = tables.open_file(args.other_as_counts, "w")
read_count_h5 = tables.open_file(args.read_counts, "w")
output_h5 = [ref_count_h5, alt_count_h5, other_count_h5, read_count_h5]
chrom_dict = {}
# initialize every chromosome in output files
chrom_list = chromosome.get_all_chromosomes(args.chrom)
for chrom in chrom_list:
for out_file in output_h5:
create_carray(out_file, chrom, args.data_type)
chrom_dict[chrom.name] = chrom
count = 0
dtype = None
if args.data_type == "uint8":
max_count = MAX_UINT8_COUNT
dtype = np.uint8
elif args.data_type == "uint16":
max_count = MAX_UINT16_COUNT
dtype = np.uint16
else:
raise NotImplementedError("unsupported datatype %s" % args.data_type)
# create a txt file to also holds the counts
if args.txt_counts is not None:
if os.path.splitext(args.txt_counts)[1] == ".gz":
txt_counts = gzip.open(args.txt_counts, 'wt+')
else:
txt_counts = open(args.txt_counts, 'w+')
for chrom in chrom_list:
sys.stderr.write("%s\n" % chrom.name)
if args.test_chrom:
if chrom.name != args.test_chrom:
sys.stderr.write("skipping because not test chrom\n")
continue
warned_pos = {}
# fetch SNP info for this chromosome
if chrom.name not in snp_tab_h5.root:
# no SNPs for this chromosome
sys.stderr.write("skipping %s because chromosome with this name "
"not found in SNP table\n" % chrom.name)
continue
sys.stderr.write("fetching SNPs\n")
snp_tab = snp_tab_h5.get_node("/%s" % chrom.name)
snp_index_array = snp_index_h5.get_node("/%s" % chrom.name)[:]
if hap_h5:
hap_tab = hap_h5.get_node("/%s" % chrom.name)
ind_dict, ind_idx = snptable.SNPTable().get_h5_sample_indices(
hap_h5, chrom.name, [args.individual])
if len(ind_idx) == 1:
ind_idx = ind_idx[0]
sys.stderr.write("index for individual %s is %d\n" %
(args.individual, ind_idx))
else:
raise ValueError("got sample indices for %d individuals, "
"but expected to get index for one "
"individual (%s)" % (len(ind_idx),
args.individual))
hap_tab = None
ind_idx = None
else:
hap_tab = None
ind_idx = None
# initialize count arrays for this chromosome to 0
ref_carray = get_carray(ref_count_h5, chrom)
alt_carray = get_carray(alt_count_h5, chrom)
other_carray = get_carray(other_count_h5, chrom)
read_count_carray = get_carray(read_count_h5, chrom)
ref_array = np.zeros(chrom.length, dtype)
alt_array = np.zeros(chrom.length, dtype)
other_array = np.zeros(chrom.length, dtype)
read_count_array = np.zeros(chrom.length, dtype)
# loop over all BAM files, pulling out reads
# for this chromosome
for bam_filename in args.bam_filenames:
sys.stderr.write("reading from file %s\n" % bam_filename)
samfile = pysam.Samfile(bam_filename, "rb")
for read in get_sam_iter(samfile, chrom):
count += 1
if count == 10000:
sys.stderr.write(".")
count = 0
add_read_count(read, chrom, ref_array, alt_array,
other_array, read_count_array,
snp_index_array, snp_tab, hap_tab,
warned_pos, max_count, ind_idx)
# store results for this chromosome
ref_carray[:] = ref_array
alt_carray[:] = alt_array
other_carray[:] = other_array
read_count_carray[:] = read_count_array
sys.stderr.write("\n")
# write data to numpy arrays, so that they can be written to a txt
# file later
# columns are:
# chrom, pos, ref, alt, genotype, ref_count, alt_count, other_count
if args.txt_counts is not None:
write_txt_file(txt_counts, chrom, snp_tab, hap_tab, ind_idx,
ref_array, alt_array, other_array)
samfile.close()
if args.txt_counts:
# close the open txt file handler
txt_counts.close()
# check if any of the reads contained an unimplemented CIGAR
if unimplemented_CIGAR[0] > 0:
sys.stderr.write("WARNING: Encountered " + str(unimplemented_CIGAR[0])
+ " instances of CIGAR codes: "
+ str(unimplemented_CIGAR[1]) + ". Reads with these "
"CIGAR codes were skipped because they "
"are currently unimplemented.\n")
# set track statistics and close HDF5 files
sys.stderr.write("setting statistics for each chromosome\n")
for h5f in output_h5:
chromstat.set_stats(h5f, chrom_list)
h5f.close()
snp_tab_h5.close()
snp_index_h5.close()
if hap_h5:
hap_h5.close()
sys.stderr.write("done\n")
main()
| python |
#!/bin/python
# -*- coding: utf-8 -*-
import requests
CITY = "787657"
API_KEY = "yourapikey(can be registered on openweathermap.org)"
UNITS = "Metric"
LANG = "en"
REQ = requests.get("http://api.openweathermap.org/data/2.5/weather?id={}&lang={}&appid={}&units={}".format(CITY, LANG, API_KEY, UNITS))
try:
if REQ.status_code == 200:
CURRENT = REQ.json()["weather"][0]["description"].capitalize()
TEMP = int(float(REQ.json()["main"]["temp"]))
print("{}°".format(TEMP))
else:
print("Error: BAD HTTP STATUS CODE " + str(REQ.status_code))
except (ValueError, IOError):
print("Error: Unable print the data")
| python |
#Build In
import os
import sys
import pickle
import copy
import random
# Installed
import numpy as np
from scipy.spatial.transform import Rotation as R
from pathlib import Path
import torch
import spconv
from argoverse.data_loading.argoverse_tracking_loader import ArgoverseTrackingLoader
# Local
from pcdet.utils import box_utils, object3d_utils, calibration, common_utils
from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils
from pcdet.config import cfg
from pcdet.datasets.data_augmentation.dbsampler import DataBaseSampler
from pcdet.datasets import DatasetTemplate
def shuffle_log(subset, log:ArgoverseTrackingLoader):
index = np.arange(log.num_lidar_frame)
random.shuffle(index)
for idx in index:
lidar = log.get_lidar(idx)
label = log.get_label_object(idx)
yield idx, subset, lidar, label, log
class BaseArgoDataset(DatasetTemplate):
def __init__(self, root_path, subsets:list):
super().__init__()
self.root_path = root_path
self.atls = {subset:ArgoverseTrackingLoader(Path(self.root_path) / subset) for subset in subsets}
self._len = 0
pass
def __len__(self):
if self._len is 0:
for atl in self.atls.values():
for log in iter(atl):
self._len += log.num_lidar_frame
return self._len
def __iter__(self):
for subset, atl in self.atls.items():
for log in iter(atl):
for idx in range(atl.num_lidar_frame):
lidar = log.get_lidar(idx)
label = log.get_label_object(idx)
yield idx, subset, lidar, label, log
pass
def shuffle(self, seed=0):
random.seed = seed
generators = [(shuffle_log(subset, log) for log in iter(atl)) for subset, atl in self.atls.items()]
random.shuffle(generators)
has_next = True
while has_next:
has_next = False
for generator in generators:
item = next(generator, False)
if item is not False:
has_next = True
yield item
def create_gt_parts(self, root=None):
if root is None:
root = Path(self.root_path)
for idx, subset, lidar, label, log in iter(self):
save_path = root / subset / log.current_log / 'gt_parts'
save_path.mkdir(parents=True, exist_ok=True)
gt_boxes = np.zeros((len(label), 7))
for i, obj in enumerate(label):
loc = obj.translation
quat = obj.quaternion
dim = (obj.width, obj.length, obj.height)
rot = R.from_quat(quat).as_euler('zyx')
gt_boxes[i] = np.hstack((loc, dim, rot[0]))
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(torch.from_numpy(lidar[:, :3]), torch.from_numpy(gt_boxes)).numpy()
for i, obj in enumerate(label):
filename = save_path / '{}_{}_{}.bin'.format(idx, obj.label_class, obj.track_id)
gt_points = lidar[point_indices[i] > 0]
if len(gt_points) >= 10:
gt_points -= gt_points.mean(axis=0)
with open(filename, 'wb') as f:
gt_points.tofile(f)
class ArgoDataset(BaseArgoDataset):
def __init__(self, root_path, subsets:list, class_names:dict, training=True):
"""
:param root_path: ARGO AI data path
:param split:
"""
super().__init__(root_path, subsets)
self.class_names = class_names
self.training = training
self.mode = 'TRAIN' if self.training else 'TEST'
# Support spconv 1.0 and 1.1
try:
VoxelGenerator = spconv.utils.VoxelGeneratorV2
except:
VoxelGenerator = spconv.utils.VoxelGenerator
vg_cfg = cfg.DATA_CONFIG.VOXEL_GENERATOR
self.voxel_generator = VoxelGenerator(
voxel_size=vg_cfg.VOXEL_SIZE,
point_cloud_range=vg_cfg.DATA_CONFIG.POINT_CLOUD_RANGE,
max_num_points=vg_cfg.MAX_POINTS_PER_VOXEL,
max_voxels=cfg.DATA_CONFIG[self.mode].MAX_NUMBER_OF_VOXELS
)
pass
def __getitem__(self, index):
def create_input_dict(log, subset, idx):
label = []
for obj in log.get_label_object(idx):
if obj.label_class in self.class_names.keys():
obj.class_id = self.class_names[obj.label_class]
label.append(obj)
points = log.get_lidar(idx)
gt_boxes = np.zeros((len(label), 7))
occluded = np.zeros(len(label), dtype=int)
for i, obj in enumerate(label):
loc = obj.translation
quat = obj.quaternion
dim = (obj.width, obj.length, obj.height)
rot = R.from_quat(quat).as_euler('zyx')
gt_boxes[i] = np.hstack((loc, dim, rot[0], obj.class_id))
occluded[i] = obj.occlusion
voxel_grid = self.voxel_generator.generate(points)
if isinstance(voxel_grid, dict):
voxels = voxel_grid["voxels"]
coordinates = voxel_grid["coordinates"]
num_points = voxel_grid["num_points_per_voxel"]
else:
voxels, coordinates, num_points = voxel_grid
voxel_centers = (coordinates[:, ::-1] + 0.5) * self.voxel_generator.voxel_size + self.voxel_generator.point_cloud_range[:3]
return {
'voxels': voxels,
'voxel_senters': voxel_centers,
'coordinates': coordinates,
'num_points': num_points,
'points': points,
'subset': subset,
'sample_idx': idx,
'occluded': occluded,
'gt_names': np.array([obj.label_class for obj in label]),
'gt_box2d': None,
'gt_boxes': gt_boxes
}
for subset, atl in self.atls.items():
for log in iter(atl):
if index < log.num_lidar_frame:
input_dict = create_input_dict(log, subset, index)
break
else:
index -= log.num_lidar_frame
return input_dict
def create_argo_infos(data_path, save_path, subsets, workers=4):
dataset = BaseArgoDataset(data_path, subsets)
#print('---------------Start to generate data infos---------------')
#for subset in subsets:
# filename = save_path / subset / 'argo_infos.pkl'
#
# argo_infos = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
# with open(filename, 'wb') as f:
# pickle.dump(argo_infos, f)
# print('ArgoAI info {} file is saved to {}'.format(subset, filename))
print('---------------Start create groundtruth database for data augmentation---------------')
dataset.create_gt_parts(save_path)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description='Generates a database of Parts')
parser.add_argument('data_path', help='root path of the dataset')
parser.add_argument('--save_path', default=None, help='path for saving the parts')
parser.add_argument('--subsets', nargs='+', default=['train1','train2','train3','train4'], help='List of database subsets')
args = parser.parse_args()
if args.save_path is None:
args.save_path = args.data_path
create_argo_infos(Path(args.data_path), Path(args.save_path), args.subsets) | python |
import pytest
from app.core.enums import CaseStatus
from app.entities import RecordOnAppeal, Court
def test_roa_from_district_case(simple_case) -> None:
'''
It should create an record of appeal for this case, set the original_case_id.
'''
court = Court.from_id('ca9')
roa = simple_case.create_record_on_appeal(court)
assert isinstance(roa, RecordOnAppeal)
assert roa.original_case_id == simple_case.id
assert roa.receiving_court == 'ca9'
assert roa.court == simple_case.court
def test_roa_from_district_case_no_appellate_court(simple_case) -> None:
'''
It should not set the receiving court automatically.
'''
roa = simple_case.create_record_on_appeal()
assert roa.receiving_court == None
assert roa.court == simple_case.court
def test_district_case_status_roa(simple_case) -> None:
'''
It should change status of original case to submitted_for_appeal.
'''
_ = simple_case.create_record_on_appeal()
assert simple_case.status == CaseStatus.submitted_for_appeal
def test_validates_roa(simple_case) -> None:
'''
It should raise an exception if an record of appeal is created when one exists.
'''
_ = simple_case.create_record_on_appeal()
assert simple_case.status == CaseStatus.submitted_for_appeal
with pytest.raises(ValueError):
_ = simple_case.create_record_on_appeal()
def test_send_roa(simple_case) -> None:
'''
If should set the receiving court on the record on appeal.
'''
roa = simple_case.create_record_on_appeal()
roa.send_to_court(Court.from_id('ca9'))
assert roa.receiving_court == 'ca9'
| python |
import asyncio
# 获取事件循环
import time
loop = asyncio.get_event_loop()
async def main():
await asyncio.sleep(10)
print("main coroutine running")
print(time.time_ns())
# 运行一个协程函数
loop.run_until_complete(main())
print(time.time_ns())
# 在线程池中运行一个协程函数
# loop.run_in_executor()
# 运行一个事件循环
loop.run_forever() | python |
"""
ga2vcf cli
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ga4gh.converters.cli as cli
import ga4gh.converters.converters as converters
import ga4gh.common.cli as common_cli
import ga4gh.client.cli as cli_client
class Ga2VcfRunner(cli_client.SearchVariantsRunner):
"""
Runner class for the ga2vcf
"""
def __init__(self, args):
super(Ga2VcfRunner, self).__init__(args)
self._outputFile = args.outputFile
self._binaryOutput = False
if args.outputFormat == "bcf":
self._binaryOutput = True
def run(self):
variantSet = self._client.get_variant_set(self._variantSetId)
iterator = self._client.search_variants(
start=self._start, end=self._end,
reference_name=self._referenceName,
variant_set_id=self._variantSetId,
call_set_ids=self._callSetIds)
# do conversion
vcfConverter = converters.VcfConverter(
variantSet, iterator, self._outputFile, self._binaryOutput)
vcfConverter.convert()
def getGa2VcfParser():
parser = common_cli.createArgumentParser((
"GA4GH VCF conversion tool. Converts variant information "
"stored in a GA4GH repository into VCF format."))
cli_client.addClientGlobalOptions(parser)
cli.addOutputFileArgument(parser)
cli_client.addUrlArgument(parser)
parser.add_argument("variantSetId", help="The variant set to convert")
parser.add_argument(
"--outputFormat", "-O", choices=['vcf', 'bcf'], default="vcf",
help=(
"The format for object output. Currently supported are "
"'vcf' (default), which is a text-based format and "
"'bcf', which is the binary equivalent"))
cli_client.addReferenceNameArgument(parser)
cli_client.addCallSetIdsArgument(parser)
cli_client.addStartArgument(parser)
cli_client.addEndArgument(parser)
cli_client.addPageSizeArgument(parser)
return parser
def ga2vcf_main():
parser = getGa2VcfParser()
args = parser.parse_args()
if "baseUrl" not in args:
parser.print_help()
else:
runner = Ga2VcfRunner(args)
runner.run()
| python |
#MenuTitle: Angularizzle
# -*- coding: utf-8 -*-
__doc__="""
Creates angular versions of glyphs made up of cubic paths.
"""
import math
import vanilla
import copy
import GlyphsApp
f = Glyphs.font
masterlen = len(f.masters)
# Script name by Type Overlord Florian Horatio Runge of Flensborre @FlorianRunge
class Angela( object ):
def __init__( self ):
windowWidth = 222
windowHeight = 130
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ),
"Angularizzle Yo",
autosaveName = "com.LNP.Angela.mainwindow"
)
self.w.titlesize = vanilla.TextBox((20, 20, -10, 17), "Min plane:")
self.w.inputSize = vanilla.EditText( (100, 20, 100, 20), "80", sizeStyle = 'small')
self.w.checkBox = vanilla.CheckBox((20, 50, -10, 17), "Keep detail", value=True)
self.w.cancelButton = vanilla.Button((20, 80, 85, 30), "Cancel", sizeStyle='regular', callback=self.CloseApp )
self.w.runButton = vanilla.Button((120, 80, 85, 30), "Process", sizeStyle='regular', callback=self.DoIt )
self.w.setDefaultButton (self.w.runButton)
# Load Settings: Save/Load settings by Toschi Omagari
if not self.LoadP():
pass
#print "Could not load preferences. Will resort to defaults"
self.w.open()
self.w.makeKey()
global font
font = Glyphs.font
global selectedGlyphs
selectedGlyphs = [ l.parent for l in font.selectedLayers ]
# if single glyph save state
if len(selectedGlyphs)==1:
thisgl = font.selectedLayers[0]
global GlyphStartPaths
GlyphStartPaths = copy.deepcopy(thisgl.paths)
def CloseApp(self, sender):
thisgl = font.selectedLayers[0]
self.ClearScreen(thisgl)
for p in GlyphStartPaths: thisgl.paths.append(p)
self.w.close()
def SaveP( self, sender ):
try:
Glyphs.defaults["com.LNP.Angela.inputSize"] = self.w.inputSize.get()
Glyphs.defaults["com.LNP.Angela.checkBox"] = self.w.checkbox.get()
except:
return False
return True
def LoadP( self ):
try:
self.w.inputSize.set( Glyphs.defaults["com.LNP.Angela.inputSize"] )
self.w.checkbox.set( Glyphs.defaults["com.LNP.Angela.checkbox"] )
except:
return False
return True
def MainAngela( self, asize, detail ):
if asize.isdigit()==True:
global stepnum, tStepSize
asize = int(asize)
stepnum=130
tStepSize = 1.0/stepnum # !impt
font = Glyphs.font
angsize = int(asize)
font.disableUpdateInterface()
for glyph in selectedGlyphs:
thisgl = font.glyphs[glyph.name].layers[0]
if thisgl.paths==0:
continue
thisgl.color = 8 #purple
if len(selectedGlyphs)>1:
ang = self.ReturnNodesAlongPath(thisgl.paths, angsize)
else:
ang = self.ReturnNodesAlongPath(GlyphStartPaths, angsize)
if detail==False:
ang = self.StripDetail(ang, asize)
if ang:
#thisgl = font.selectedLayers[0]
self.ClearScreen(thisgl)
for n in ang:
pts = n[2]
isclosed = n[1]
outline = self.ListToPath(pts, isclosed)
thisgl.paths.append( outline )
font.enableUpdateInterface()
if not self.SaveP( self ):
pass
#print "Could not save preferences."
if len(selectedGlyphs)>1:
self.w.close()
def StripDetail (self, nlist, asize):
newList = list()
for s in nlist:
newnodes = list()
length = s[0]
isclosed = s[1]
nlist = s[2]
p1x = nlist[0][0]
p1y = nlist[0][1]
for n in range(1, len(nlist)-1):
p2x = nlist[n][0]
p2y = nlist[n][1]
dist = math.hypot(p2x - p1x, p2y - p1y)
if dist > asize:
newnodes.append([p1x, p1y])
p1x = p2x
p1y = p2y
else:
continue
nl = [length, isclosed, newnodes]
newList.append(nl)
return newList
def DoIt( self, sender ):
asize = self.w.inputSize.get()
detail = self.w.checkBox.get()
if int(asize) > 4:
self.MainAngela(asize, detail)
else:
pass
# Remove any duplicate points from list
def RemoveDuplicatePts(self, ptlist):
ptl = []
for i in ptlist:
if i not in ptl:
ptl.append(i)
ptl.append(ptlist[-1])
return ptl
# the main return t postion on curve script p0,1,2,3 is segment
def GetPoint(self, p0, p1, p2, p3, t):
ax = self.lerp( [p0[0], p1[0]], t )
ay = self.lerp( [p0[1], p1[1]], t )
bx = self.lerp( [p1[0], p2[0]], t )
by = self.lerp( [p1[1], p2[1]], t )
cx = self.lerp( [p2[0], p3[0]], t )
cy = self.lerp( [p2[1], p3[1]], t )
dx = self.lerp( [ax, bx], t )
dy = self.lerp( [ay, by], t )
ex = self.lerp( [bx, cx], t )
ey = self.lerp( [by, cy], t )
pointx = self.lerp( [dx, ex], t )
pointy = self.lerp( [dy, ey], t )
calc = [pointx,pointy]
return calc
# Put all the xy coords of linear t GetPoint() increments in list
def CreatePointList(self,p0,p1,p2,p3):
pl = list()
tmp=0
while tmp<1:
t = tmp
calc = self.GetPoint(p0,p1,p2,p3,tmp)
pl.append(calc)
tmp = tmp + tStepSize
return pl
#Clear layer except components
def ClearScreen(self, clearlayer):
for i in range( len( clearlayer.paths ))[::-1]:
del clearlayer.paths[i]
def lerp(self, v, d):
return v[0] * (1 - d) + v[1] * d
# create distance look up list from pointlist so we can determine a % position along spine
# each item represents cumulative distances from beginning of segments
def CreateDistList(self, pointlist):
lookup = list()
totallength = 0
for tp in range (0,len(pointlist)-1):
p1x = pointlist[tp][0]
p1y = pointlist[tp][1]
p2x = pointlist[tp+1][0]
p2y = pointlist[tp+1][1]
dist = math.hypot(p2x - p1x, p2y - p1y)
totallength += dist
lookup.append(totallength)
lookup.insert(0,0)
return lookup
#find at which index the desired length matches to determine nearest t step value
#return new precise t value between the two indexes desiredlen falls
def FindPosInDistList(self, lookup, newlen): #newlen = length along curve
for s in range (0,len(lookup)-1):
b1 = lookup[s]
b2 = lookup[s+1]
if b1 <= newlen <= b2:
if b1==0:
newt=0
else:
percentb = ( 100 / (b2 - b1) ) * (newlen - b1)
newt = (s*tStepSize) + ( tStepSize * (percentb/100) )
return (newt)
# Draw new angular path from list
def ListToPath(self, ptlist, isopen):
np = GSPath()
if isopen == True and len(ptlist)>2: del ptlist[-1]
if len(ptlist)>2: #so counters don't devolve completely
for pt in ptlist:
newnode = GSNode()
newnode.type = GSLINE
newnode.position = (pt[0], pt[1])
np.nodes.append( newnode )
np.closed = isopen
return np
def PointToPointSteps(self, tp0, tp1, spacebetween):
n1x, n1y, n2x, n2y = tp0[0], tp0[1], tp1[0], tp1[1]
tmplist = list()
dist = math.hypot(n2x - n1x, n2y - n1y)
currentx = n1x
currenty = n1y
psteps = int(math.ceil(dist/spacebetween))
stepx = (n2x-n1x) / psteps
stepy = (n2y-n1y) / psteps
for n in range(psteps):
tmplist.append([currentx, currenty])
currentx+=stepx
currenty+=stepy
return tmplist
# returns nodes along a curve at intervals of space between
def ReturnNodesAlongPath(self, GlyphStartPaths, spacebetween):
allPaths = list()
for path in GlyphStartPaths:
pathTotalLength = 0
allpointslist = []
scount=0
if path.closed==False:
continue
for segment in path.segments:
nodenum = len(segment)
scount+=1
if segment.type=="move":
continue
# if straight segment
if nodenum==2:
if scount<1: continue
tp0 = (segment[0].x, segment[0].y)
tp1 = (segment[1].x, segment[1].y)
dist = math.hypot(tp1[0] - tp0[0], tp1[1] - tp0[1])
pathTotalLength+=dist
straightlinepts = self.PointToPointSteps(tp0,tp1,spacebetween)
for sl in straightlinepts: allpointslist.append(sl)
# if bezier curve segment
if nodenum==4:
tp0 = (segment[0].x, segment[0].y)
tp1 = (segment[1].x, segment[1].y)
tp2 = (segment[2].x, segment[2].y)
tp3 = (segment[3].x, segment[3].y)
pointlist = self.CreatePointList(tp0, tp1, tp2, tp3)
lookup = self.CreateDistList(pointlist)
totallength = lookup[-1]
pathTotalLength += totallength
# check that the distance of curve segment is at least as big as spacebetween jump
if totallength > spacebetween:
steps = 20
stepinc = totallength / steps
steps = int(math.floor(totallength/spacebetween))
stepinc = totallength / steps
dlen=0 # distance to check in list of distances
for s in range(0,steps+1):
if s==0:
newt=0
elif s==steps:
newt=1
else:
newt = self.FindPosInDistList(lookup,dlen)
calc = self.GetPoint(tp0,tp1,tp2,tp3,newt)
allpointslist.append(calc)
dlen+=stepinc
else:
allpointslist.append([tp0[0],tp0[1]])
allpointslist.append([tp3[0],tp3[1]])
if allpointslist:
allpointslist = self.RemoveDuplicatePts(allpointslist)
pathdata = [pathTotalLength, path.closed, allpointslist]
allPaths.append(pathdata)
return allPaths
Angela() | python |
from django.contrib import admin
from .models import ContactQuery
# Register your models here.
admin.site.register(ContactQuery)
| python |
######### Third-party software locations #########
hmmer_dir = "./hmmer_linux/bin/"
phobius_dir = "./phobius/"
#these can be overriden by the --hmerdir, --phobiusdir and -wp options
phobius_url = "https://phobius.sbc.su.se/cgi-bin/predict.pl"
######### Profile HMM locations #########
PTKhmm_dir = "./pHMMs/"
JM_dir = "./pHMMs/JM/"
Pfam_dir = "./pHMMs/Pfam"
### DO NOT CHANGE THEM!!!!!!!!!!!
| python |
import logging
from quarkchain.evm.slogging import get_logger, configure_logging
"""
slogging module used by ethereum is configured via a comman-separated string,
and each named logger will receive custom level (defaults to INFO)
examples:
':info'
':info,p2p.discovery:debug'
because of the way that configure_logging() is written, we cannot call configure_logging() after cluster_config is loaded;
so the best way to configure slogging is to change SLOGGING_CONFIGURATION here
"""
SLOGGING_CONFIGURATION = ":info"
configure_logging(SLOGGING_CONFIGURATION)
if __name__ == "__main__":
logging.basicConfig()
log = get_logger("test")
log.warn("miner.new_block", block_hash="abcdef123", nonce=2234231)
| python |
"""
The module opens the camera capture a point cloud and:
- mesh the point cloud and give back a water-tight mesh
"""
import copy
import sys
from tomlkit import key
if sys.version_info[0] == 2: # the tkinter library changed it's name from Python 2 to 3.
import Tkinter
tkinter = Tkinter #I decided to use a library reference to avoid potential naming conflicts with people's programs.
else:
import tkinter
from PIL import Image
from PIL import ImageTk
import pymeshlab # keep on top as first import (why?)
import pyzed.sl as sl
import numpy as np
import open3d as o3d
import tifffile
from sklearn.cluster import KMeans
from scipy.spatial import ConvexHull
import threading
## Imports for function: convert_roit_meter_pixel
import os
import yaml
from util import terminal
import distance_map
sys.path.append('/usr/local/lib/python3.8/dist-packages')
import cv2
#TODO: set wall scanning 1.5 x 0.7 m dimension area
ROI = [0.7,1.5]
CENTER = [250,750]
# CENTER = [360,680]
# Number of frames taken for the point cloud acquisition.
NUMBER_OF_AVERAGE_FRAMES = 1
# Scaling factor when cropping the live stream cloud on keypoints
CLUSTER_REDUCTION_FACTOR = 0.4
def rotationMatrix(r):
"""
Simple 3D Matrix rotation function, obtained from following sources:
https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula
Args:
-r: a rotation vector, with rotation value in x, y and z direction.
"""
## Parameter for the rotationmatrix function
rotationAngleDegThreshold = 0.00001
# its length is the rotation angle
rotationAngleDeg = np.linalg.norm(r)
if rotationAngleDeg > rotationAngleDegThreshold:
# its direction is the rotation axis.
rotationAxis = r / rotationAngleDeg
# positive angle is clockwise
K = np.array([[ 0, -rotationAxis[2], rotationAxis[1]],
[ rotationAxis[2], 0, -rotationAxis[0]],
[-rotationAxis[1], rotationAxis[0], 0 ]])
# Note the np.dot is very important.
R = np.eye(3) + (np.sin(np.deg2rad(rotationAngleDeg)) * K) + \
((1.0 - np.cos(np.deg2rad(rotationAngleDeg))) * np.dot(K, K))
tmp = np.eye(4)
tmp[0:3, 0:3] = R
else:
R = np.eye(3)
return R
def load_transformation_matrix():
_root_file = os.path.dirname(__file__)
_calib_information_path = os.path.join(_root_file, "calib/utils/calibration_info.yaml")
# Test if the file exist as it is supposed when runing calib function entirely
if not os.path.exists(_calib_information_path):
terminal.error_print(
f"No Calibration Data has been found in: {_calib_information_path}"
)
exit()
else:
## Load the transformation matrix
# Opening YAML file
with open(_calib_information_path) as yaml_file:
data = yaml.load(yaml_file, Loader=yaml.FullLoader)
# extracting information
matrix_data = data["3D_2D_Matrix"]
s, f, u0, v0, dX, dY, dZ, m_x, m_y, gamma, r0, r1, r2 = (
matrix_data["s"],
matrix_data["f"],
matrix_data["u0"],
matrix_data["v0"],
matrix_data["dX"],
matrix_data["dY"],
matrix_data["dZ"],
matrix_data["m_x"],
matrix_data["m_y"],
matrix_data["gamma"],
matrix_data["r0"],
matrix_data["r1"],
matrix_data["r2"],
)
Rt = np.zeros((4, 4))
R = rotationMatrix(np.array([r0, r1, r2]))
Rt[0:3, 0:3] = R
Rt[:, -1] = np.array([dX, dY, dZ, 1])
K = np.array([[f*m_x, gamma, u0, 0], [0, f*m_y, v0, 0], [0, 0, 1, 0]])
transformation_matrix = np.dot(K,Rt)/s
return transformation_matrix
def convert_roi_meter_pixel(roi,center):
"""
This function is returning a rectangular Region Of Interest in pixel slices, centered in the middle of the image.
And take as an input an array of the width and the length of the ROI in meters.
:param roi: Array of the width and the length of the ROI in meters.
center: center of the image in pixel.
"""
_root_file = os.path.dirname(__file__)
_calib_information_path = os.path.join(_root_file, "calib/utils/calibration_info.yaml")
# Test if the file exist as it is supposed when runing calib function entirely
if not os.path.exists(_calib_information_path):
terminal.error_print(f"No Calibration Data has been found in: {_calib_information_path}")
exit()
else:
# Opening YAML file
with open(_calib_information_path) as yaml_file:
data = yaml.load(yaml_file,Loader=yaml.FullLoader)
roi_info = data["ROI_info"]
distance_m = roi_info["Distance_m"]
distance_px = roi_info["Distance_px"]
convert_m_px = distance_px/distance_m
roi_px = np.array(roi) * convert_m_px
## We suppose the camera used is the zed camera, with an image acquisition of 1280x720 pixels
## the center is (360,640)
slice_roi = [slice(int(center[0]-roi_px[0]/2),int(center[0]+roi_px[0]/2)),
slice(int(center[1]-roi_px[1]/2),int(center[1]+roi_px[1]/2))]
return slice_roi
def set_up_zed():
"""
This function is setting up the zed camera for depth capture
return: The initialized camera, and the zed point cloud format/host
"""
# Set ZED params
init = sl.InitParameters(camera_resolution=sl.RESOLUTION.HD720, # HD720 | 1280*720
camera_fps=30, # available framerates: 15, 30, 60 fps
depth_mode=sl.DEPTH_MODE.QUALITY, # posible mods: sl.DEPTH_MODE.PERFORMANCE/.QUALITY/.ULTRA
coordinate_units=sl.UNIT.METER,
coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP, # sl.COORDINATE_SYSTEM.LEFT_HANDED_Y_UP
sdk_verbose = True, # Enable verbose logging
depth_minimum_distance=0.3, # Enable capture from 30 cm
depth_maximum_distance=3.0 # Enable capture up to 3m
)
# Open ZED and catch error
zed = sl.Camera()
status = zed.open(init)
if status != sl.ERROR_CODE.SUCCESS:
print(repr(status))
exit()
camera_info = zed.get_camera_information()
print("ZED camera opened, serial number: {0}".format(camera_info.serial_number))
# Setting an empty point cloud
point_cloud = sl.Mat(zed.get_camera_information().camera_resolution.width,
zed.get_camera_information().camera_resolution.height,
sl.MAT_TYPE.F32_C4,
sl.MEM.CPU)
return zed, point_cloud
def close_up_zed(zed_cam):
"""
If zed it is open it closes the camera.
:param zed_cam: the camera zed to close
"""
zed_cam.close()
def get_median_cloud(zed, point_cloud, medianFrames, roi_m,center):
"""
This function is giving an average value of X, Y and Z
obtained by a certain number of sequentialy acquired frames.
This helps to stabilize the coordinates acquired, in case of flickering for instance.
:param zed: initialized and opened zed camera
:param point_cloud: initialized point cloud of the zed Camera
:param medianFrames: Number of sequentialy acquired Frames for the average value generation
:param components: List of values 0,1 or 2 for respectively X,Y and Z coordinates.
return: The median point clouds xyz (no RGB) of the acquired frames in shape (n,3)
"""
# Get multiple frames and
stack_of_images = []
for n in range(medianFrames):
if zed.grab() == sl.ERROR_CODE.SUCCESS:
zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA,sl.MEM.CPU, zed.get_camera_information().camera_resolution)
point_cloud_np = point_cloud.get_data()
stack_of_images.append(point_cloud_np)
else:
print(":(")
return None
stack_of_images = np.array(stack_of_images)
stack_of_images[not np.isfinite] = np.nan
# Convert the ROI value from meters to pixels and into a slice object.
roi_px = convert_roi_meter_pixel(roi_m,center)
# roi_px = ROI
# Crop the point cloud following the ROI
stack_of_images = stack_of_images[:, roi_px[0], roi_px[1], :]
# Median the point clouds
median = np.nanmedian(stack_of_images, axis=0)
# Get rid of colors from point cloud
median = median[:, :, :3]
# Change shape of numpy to (n,3) for latter o3d transformation
median = median.reshape((-1, 3))
# Archive: Transform nan in zeros (median[np.isnan(median)] = 0)
# Remove nan values from cloud
median = median[~np.isnan(median).any(axis=1)]
return median
def np_pcd2o3d_mesh(np_pcd, n_target_downasample=None):
"""
Mesh point cloud in format numpy in mesh format open3d.
If the downsample parameter is input it downsize the cloud before
meshing. Meshing and downsampling is done with pymeshlab, which offers
a clean, water-tight meshing method.
!!! No COLORS !!!
:param np_pcd: point cloud in format numpy vector (n,3)
:param n_target_downsample: int of target points after point cloud
unifrom downsample
return: o3d mesh
"""
# Create a new pymeshlab mesh and meshset
pyml_m_pcd = pymeshlab.Mesh(np_pcd)
pyml_ms = pymeshlab.MeshSet()
pyml_ms.add_mesh(pyml_m_pcd)
# Downsample the cloud
if (n_target_downasample is None):
pyml_ms.generate_simplified_point_cloud(samplenum=0)
else:
if (isinstance(n_target_downasample, int)):
pyml_ms.generate_simplified_point_cloud(samplenum=n_target_downasample)
else:
print("The target for the downsample should be an int")
exit()
# Compute normals and mesh the point cloud
pyml_ms.compute_normal_for_point_clouds(flipflag=True,viewpos=[0,0,0])
pyml_ms.generate_surface_reconstruction_screened_poisson(preclean=True)
# Return the mesh from the dataset
try:
pyml_m = pyml_ms.current_mesh()
except:
print("Error!", sys.exc_info()[0], "occurred.")
sys.exit("The pymeshlab MeshSet does not contain any active mesh")
# Convert from pyml mesh to o3d mesh (n.b.: colors set to 0,0,0)
pyml_vertices = pyml_m.vertex_matrix().astype(np.float64)
pyml_vertices_normals = pyml_m.vertex_normal_matrix().astype(np.float64)
pyml_faces = pyml_m.face_matrix()
pyml_faces_normals = pyml_m.face_normal_matrix().astype(np.float64)
# print(f'pyml mesh\n',
# f'vertices shape: {pyml_vertices.shape}\n',
# f'vertices dtype: {pyml_vertices.dtype}\n',
# f'vertices normals shape: {pyml_vertices_normals.shape}\n',
# f'vertices normals dtype: {pyml_vertices_normals.dtype}\n',
# f'faces shape: {pyml_faces.shape}\n',
# f'faces dtype: {pyml_faces.dtype}\n',
# f'faces normals shape: {pyml_faces_normals.shape}\n',
# f'faces normals dtype: {pyml_faces_normals.dtype}\n')
o3d_m = o3d.geometry.TriangleMesh()
o3d_m.vertices = o3d.utility.Vector3dVector(pyml_vertices)
o3d_m_vertices = np.asarray(o3d_m.vertices)
o3d_m.vertex_normals = o3d.utility.Vector3dVector(pyml_vertices_normals)
o3d_m_vertex_normals = np.asarray(o3d_m.vertex_normals)
o3d_m.vertex_colors = o3d.utility.Vector3dVector(np.zeros(pyml_vertices.shape))
o3d_m_vertex_clr = np.asarray(o3d_m.vertex_colors)
o3d_m.triangles = o3d.utility.Vector3iVector(pyml_faces)
o3d_m_triangles = np.asarray(o3d_m.triangles)
o3d_m.triangle_normals = o3d.utility.Vector3dVector(pyml_faces_normals)
o3d_m_triangles_normals = np.asarray(o3d_m.triangle_normals)
# print(f'o3d mesh:\n',
# f'vertices shape: {o3d_m_vertices.shape}\n',
# f'vertices dtype: {o3d_m_vertices.dtype}\n',
# f'vertices normals shape: {o3d_m_vertex_normals.shape}\n',
# f'vertices normals dtype: {o3d_m_vertex_normals.dtype}\n',
# f'vertices colors shape: {o3d_m_vertex_clr.shape}\n',
# f'vertices colors dtype: {o3d_m_vertex_clr.dtype}\n',
# f'triangles shape: {o3d_m_triangles.shape}\n',
# f'triangles dtype: {o3d_m_triangles.dtype}\n',
# f'triangles normals shape: {o3d_m_triangles_normals.shape}\n',
# f'triangles normals dtype: {o3d_m_triangles_normals.dtype}\n')
# Check the sanity of the mesh
err_msg = 'ERROR:WrongMeshConvertion: The mesh convert between pymeshlab and open3d is wrong.'
assert len(o3d_m_vertices) == len(pyml_vertices), err_msg
assert len(o3d_m_vertex_normals) == len(pyml_vertices_normals), err_msg
assert len(o3d_m_triangles) == len(pyml_faces), err_msg
return o3d_m
def get_mesh_scene(n_target_downasample):
"""
Main method to get point cloud and mesh
:param n_target_downasample: target number of points to downsample cloud
return: mesh in open3d format
"""
# Set up the zed parameters and initialize
zed, point_cloud = set_up_zed()
# Average point cloud from frames
np_median_pcd = get_median_cloud(zed,point_cloud,NUMBER_OF_AVERAGE_FRAMES, ROI,CENTER)
# From point cloud to pymeshlab mesh set + downsapling
o3d_m = np_pcd2o3d_mesh(np_median_pcd, n_target_downasample=n_target_downasample)
# TODO: clean up this code ~ condense
# Crop mesh according to ROI
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(np_median_pcd)
bbox = pcd.get_axis_aligned_bounding_box()
o3d_m = o3d_m.crop(bbox)
# Close the camera
close_up_zed(zed)
return o3d_m
def get_pcd_scene(n_target_downsample, zed, point_cloud):
"""
Main method to get point cloud
:param n_target_downasample: target number of points to downsample cloud
:param zed: initilaized camera and point cloud from the camera
return: point cloud in open3d format
"""
# Capture the average point cloud from frames
np_median_pcd = get_median_cloud(zed,point_cloud,NUMBER_OF_AVERAGE_FRAMES, ROI, CENTER)
# Convert numpy to o3d cloud
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(np_median_pcd)
return pcd
class Live_stream(object):
"""
This is the class creating the tkinter window with the live stream of the position of the stone.
"""
def __init__(self,Live_3D_space,image_drawer):
self.tk = tkinter.Tk()
self.tk.title('projector_window')
self.w, self.h = self.tk.winfo_screenwidth(), self.tk.winfo_screenheight()
self.tk.geometry("%dx%d+-50+-50" % (self.w, self.h))
self.state = False
self.tk.attributes('-zoomed', True) # This just maximizes it so we can see the window. It's nothing to do with fullscreen.
self.tk.bind('<Escape>', self._end_stream)
self.tk.attributes("-fullscreen", True)
self.lmain = tkinter.Label(self.tk)
self.lmain.pack()
self.Live_3D_space = Live_3D_space
self.image_drawer = image_drawer
def _end_stream(self,event=None):
"""
Function to end the stream, linked with the escape key in __init__.
"""
self.tk.quit()
self.tk.destroy()
def _toggle_fullscreen(self, event=None):
"""
Function to toggle fullscreen, linked with the F11 key in __init__.
"""
self.state = not self.state # Just toggling the boolean
self.tk.attributes("-fullscreen", self.state)
def _show_frame(self):
"""
Function which is called in the run function, whihch is the loop of tkinter.
It updates the tkinter image with the acquired live stream image.
"""
self.frame = self._get_live_stream()
self.imgtk = ImageTk.PhotoImage(image=Image.fromarray(self.frame, mode="RGB"))
self.lmain.configure(image=self.imgtk)
self.lmain.after(10, self._show_frame)
def _get_live_stream(self):
"""
Function which updates the new image, by getting an update of the 3D Space.
This function is using the class Live_3D_space.
"""
# Update the 3D space, with new capture points and all the distance measures
self.Live_3D_space.update_3D_space()
# Draw the new image for live stream
img = self.image_drawer.draw_image_from_3D_space(self.Live_3D_space)
return img
def run(self):
self._show_frame()
self.tk.mainloop()
class Live_3D_space(object):
"""
This class is containing the 3D space, where the acquired pcd is processed.
It allows us to process the convex hull once and then update the pcd distances.
"""
def __init__(self,rock_mesh,zed,point_cloud):
self.point_cloud = point_cloud
self.rock_mesh = rock_mesh
self.zed = zed
self.upper_pcd_from_mesh = self._get_upper_pcd()
self.list_mesh_cluster, self.key_points = self._get_mesh_cluster()
def _get_upper_pcd(self):
"""
This function returns the upper pcd from the rock_mesh.
"""
# Create shifted point cloud
mesh = copy.deepcopy(self.rock_mesh)
subsampled_mesh = mesh.sample_points_poisson_disk(1000)
subsampled_mesh = subsampled_mesh.translate((0, 0, 0.01))
# Crop point cloud
cropped_pcd = self._crop_pcd_by_occupancy(mesh.scale(1.1,mesh.get_center()),subsampled_mesh)
return cropped_pcd
def _crop_pcd_by_occupancy(self,mesh,pcd):
"""
This function is returning a cropped point cloud.
It will return the inverse of a crop of the pcd, using the mesh as the bounding box.
If the points are inside the mesh, they will be removed.
"""
# Load mesh and convert to open3d.t.geometry.TriangleMesh
mesh = o3d.t.geometry.TriangleMesh.from_legacy(mesh)
#Create the scene
scene = o3d.t.geometry.RaycastingScene()
_ = scene.add_triangles(mesh)
# Compute occupancy map
occupancy = scene.compute_occupancy(np.asarray(pcd.points, dtype=np.float32))
cropped_pcd = o3d.geometry.PointCloud()
outside_points = []
for i,point in enumerate(np.asarray(pcd.points)):
if occupancy[i] == 0:
outside_points.append(point)
if len(outside_points) == 0:
cropped_pcd.points = o3d.utility.Vector3dVector(np.array([[0,0,-2]]))
else:
cropped_pcd.points = o3d.utility.Vector3dVector(np.array(outside_points))
return cropped_pcd
def _get_mesh_cluster(self):
"""
This function returns both the clusters and the centers of the clusters, of the rock mesh.
Those centers are our fixed keypoints.
This function is using the K-mean algorithm, which give random results.
"""
# Get the points of the point cloud
Points = np.asarray(self.upper_pcd_from_mesh.points)
# Use of K-mean for detecting 3 points in the upper point cloud
kmeans = KMeans(n_clusters=3, random_state=0).fit(Points)
key_points = kmeans.cluster_centers_
pcd_labels = kmeans.labels_
list_cluster = []
for j in range(0,3):
pcd_cluster = o3d.geometry.PointCloud()
cluster = []
for i,label in enumerate(pcd_labels):
if label == j:
cluster.append(Points[i])
pcd_cluster.points = o3d.utility.Vector3dVector(np.array(cluster))
list_cluster.append(pcd_cluster)
return list_cluster, key_points
def _column_crop(self,captured_pcd,mesh,scale=1.5):
"""
This function is returning a cropped point cloud, using as a bounding box,
the boundig box of the mesh, scaled with a given scale, and tranlsated along z axis.
"""
# Translate the mesh
mesh_down = copy.deepcopy(mesh).translate((0, 0, -10))
mesh_up = copy.deepcopy(mesh).translate((0, 0, 10))
# Union of the two meshes
mesh_down_up = mesh_down + mesh_up
# Get Axis-aligned bounding box
bbox = mesh_down_up.get_axis_aligned_bounding_box()
bbox = bbox.scale(scale,bbox.get_center())
crop_captured_pcd = captured_pcd.crop(bbox)
return crop_captured_pcd
def _crop_pcd_on_cluster(self,pcd,list_of_mesh):
"""
This function is returing a list of cropped points, and the centers of all the cropped point clouds.
Each cropped point cloud is cropped using a given mesh.
"""
list_pcds = []
centers = []
for mesh in list_of_mesh:
cropped_cluster = self._column_crop(pcd,mesh,scale=CLUSTER_REDUCTION_FACTOR)
list_pcds.append(cropped_cluster)
center = cropped_cluster.get_center()
centers.append(center)
return list_pcds,np.array(centers)
## Getters
def get_list_mesh_cluster(self):
return self.list_mesh_cluster
def get_upper_pcd(self):
return self.upper_pcd_from_mesh
def get_distances(self):
return self.distances
def get_centers(self):
return self.centers
def get_key_points(self):
return self.key_points
def update_3D_space(self):
# Get point cloud from camera
pcd = get_pcd_scene(2000, self.zed, self.point_cloud) #TODO: check param 2000
## Crop the pcd from a column
cropped_pcd = self._column_crop(pcd,self.rock_mesh,scale=1)
## Get keypoints and cluster pcd from the upper_pcd_from_mesh
list_mesh_clusters = self.get_list_mesh_cluster()
keypoints = self.get_key_points()
## Get captured pcd clusters
captured_pcd_clusters,self.centers = self._crop_pcd_on_cluster(cropped_pcd,list_mesh_clusters)
## Compute distance
distances = (np.array(keypoints)[:,2] - self.centers[:,2])*1000 # To convert in milimeters
# clip the distances
for i,distance in enumerate(distances):
if np.abs(distance) < 5:
distances[i] = np.sign(distance)*5
if np.abs(distance) > 400:
distances[i] = np.sign(distance)*400
self.distances = distances
class Image_drawer(object):
"""
This class is creating an object which will allow us to list a certain number of pixels,
with different caracteristiques, that we can at the end get into a 2D image.
"""
def __init__(self,Live_3D_space):
self.width = 1920
self.height = 1080
self.image = np.zeros((self.height, self.width, 3),dtype=np.uint8)
self.pixels = []
self.transform_3D_2D = load_transformation_matrix()
self.Live_3D_space = Live_3D_space
def _3D_to_2D(self,x,y,z):
"""
This function is transforming a 3D point into a 2D point.
"""
point_2D = np.dot(self.transform_3D_2D, np.array([[x], [y], [z],[1]]))
point_2D = point_2D[0:2]
return point_2D
def _add_3D_point_to_image(self,x,y,z,color,size):
"""
This function is taking as an input x,y,z coordinates from a point in space,
and caracteristiques of the pixel, like color and size.
And if the coordinate is in the image range, we add the pixel to the list of pixels.
"""
if not np.isnan(x) and not np.isnan(y) and not np.isnan(z):
pixel_coord = self._3D_to_2D(x,y,z)
pixel = [int(pixel_coord[1][0]),int(pixel_coord[0][0]),color,size]
j,i = pixel_coord
if i > 0 and i < self.height and j > 0 and j < self.width:
self.pixels.append(pixel)
return 1
else:
# print(f"X,Y,Z: {x},{y},{z}, giving Pixel: {i}, {j} are out of bounds for image of size {self.height}, {self.width}")
return 0
else:
# print(f"point: [{x},{y},{z}] is not admissible")
return 0
def _add_pcd_to_image(self,pcd,size=2,color=[255,0,255]):
"""
This function is adding an entire point cloud to the image.
It takes as an input an o3d point cloud, and the caracteristiques of the pixel, like color and size.
"""
npy_pts = np.asarray(pcd.points)
npy_colors = np.asarray(pcd.colors)
pixl_count = 0
if len(npy_pts) == 0:
print("pcd is empty")
else:
if len(npy_colors) < len(npy_pts):
for _,point in enumerate(npy_pts):
pixl_count +=self._add_3D_point_to_image(point[0],point[1],point[2],color,size)
else:
for i,point in enumerate(npy_pts):
pixl_count +=self._add_3D_point_to_image(point[0],point[1],point[2],npy_colors[i],size)
if pixl_count > 0.1*len(npy_pts):
return True
else:
return False
def _draw_convex_hull_on_image(self,color,size):
"""
This function is creating a convex hull out of all the pixels added in the pixels list.
It will draw the convex hull on the image using cv2.line.
"""
if len(self.pixels) < 3:
# print("Not enough points to create hull")
return False
else:
Y = np.asarray(self.pixels,dtype=object)[:,0]
X = np.asarray(self.pixels,dtype=object)[:,1]
YX = np.array([Y,X])
self.hull = ConvexHull(YX.T)
for simplex in self.hull.simplices:
cv2.line(self.image,(self.pixels[simplex[0]][:2][1],self.pixels[simplex[0]][:2][0]),(self.pixels[simplex[1]][:2][1],self.pixels[simplex[1]][:2][0]),color,size)
return True
def _draw_pixels(self):
"""
This function is drawing all the pixels declared in pixel list on the image.
"""
for pixel in self.pixels:
i,j,color,size = pixel
self.image[i-size:i+size,j-size:j+size,:] = color
def _empty_pixels(self):
"""
This function is emptying the pixels list.
"""
self.pixels = []
def _mm_2_pxl(self,distance):
"""
This function is converting the distance in milimeters to pixels.
It is doing a linear transformation, with a slope of:
a = (MAX_pxl_length-min_pxl_length)/(MAX_mm_length - min_mm_length)
"""
## PARAMS:
min_pxl_length = 5
MAX_pxl_length = 50
min_mm_length = 5
MAX_mm_length = 400
a = (MAX_pxl_length-min_pxl_length)/(MAX_mm_length - min_mm_length)
b = min_pxl_length -a*min_mm_length
return a*distance +b
def clear_image(self):
"""
This funcion is setting the image to black.
"""
self.image = np.zeros((self.height, self.width, 3),dtype=np.uint8)
def draw_image_from_3D_space(self,Live_3D_space):
"""
This function is drawing the image from the 3D space.
"""
# Taking the updated version of the 3D space
self.Live_3D_space = Live_3D_space
# Clearing all old pixels
self._empty_pixels()
# Empty the image
self.clear_image()
# Drawing the convex hull
upper_pcd = self.Live_3D_space.get_upper_pcd()
is_pcd_valid = self._add_pcd_to_image(upper_pcd)
is_convex_valid = self._draw_convex_hull_on_image(color=[0,255,0],size=4)
# Removing the points used to create the convex hull
self._empty_pixels()
if is_convex_valid and is_pcd_valid:
keypoints = self.Live_3D_space.get_key_points()
distances = self.Live_3D_space.get_distances()
## Add points to image
for i,distance in enumerate(distances):
radius = self._mm_2_pxl(np.abs(distance))
# Adding points from point cloud with updated distance
if distance > 0:
self._add_3D_point_to_image(keypoints[i][0],keypoints[i][1],keypoints[i][2],(255,0,0),int(radius))
else:
self._add_3D_point_to_image(keypoints[i][0],keypoints[i][1],keypoints[i][2],(0,0,255),int(radius))
# Adding points from keypoints
self._add_3D_point_to_image(keypoints[i][0],keypoints[i][1],keypoints[i][2],(255,255,255),5)
else:
# Draw magenta image
self.image = np.ones((self.height, self.width, 3),dtype=np.uint8)*[255,0,255]
terminal.error_print("ERROR: the stone is outside the 3D scene")
terminal.error_print('Press Esc on the projector_window to continue ... /n)\n>>> ')
self._draw_pixels()
return self.image
def get_image(self):
return self.image
| python |
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from VectorTestSequence import VectorTestSequence
## This test verifies that whole register load and store instructions can be generated and executed
# successfully.
class MainSequence(VectorTestSequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
# TODO(Noah): Add additional load/store whole register instructions when they are supported
# by Handcar.
self._mInstrList = (
'VL1R.V##RISCV',
'VS1R.V##RISCV',
)
## Return a list of test instructions to randomly choose from.
def _getInstructionList(self):
return self._mInstrList
## Get allowed exception codes.
#
# @param aInstr The name of the instruction.
def _getAllowedExceptionCodes(self, aInstr):
allowed_except_codes = set()
# TODO(Noah): Remove the line below permitting store page fault exceptions when the page
# descriptor generation is improved. Currently, we are generating read-only pages for load
# instructions, which is causing subsequent store instructions to the same page to fault.
allowed_except_codes.add(0xF)
return allowed_except_codes
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| python |
#!/usr/bin/env python3
# encoding: utf-8
import sys
def trace_calls_and_returns(frame, event, arg):
co = frame.f_code
func_name = co.co_name
if func_name == 'write':
# Ignore write() calls from printing
return
line_no = frame.f_lineno
filename = co.co_filename
if not filename.endswith('sys_settrace_return.py'):
# Ignore calls not in this module
return
if event == 'call':
print('* Call to {} on line {} of {}'.format(
func_name, line_no, filename))
return trace_calls_and_returns
elif event == 'return':
print('* {} => {}'.format(func_name, arg))
return
def b():
print('inside b()')
return 'response_from_b '
def a():
print('inside a()')
val = b()
return val * 2
sys.settrace(trace_calls_and_returns)
a()
| python |
from django.contrib import admin
from .models import Customer
# Register your models here.
admin.site.register(Customer) | python |
"""main API module."""
from __future__ import annotations
import dataclasses
from dataclasses import dataclass
from enum import Enum
from typing import Any, Union, cast
import aiohttp
from siyuanhelper import exceptions
data_type = Union[dict, list, None]
class Siyuan:
"""Siyuan Helper Instance."""
def __init__(self, base_url: str = "http://127.0.0.1:6806", token: str = ""):
"""Init a Siyuan Helper.
Args:
base_url (str, optional): the url to invoke requests. Defaults to "http://127.0.0.1:6806".
token (str, optional): API token, none if unused. Defaults to "".
Raises:
exceptions.SiyuanAuthFailedException: raised if Authorization Failed.
"""
self.base_url = base_url
self.token = token
self.session = aiohttp.ClientSession(
self.base_url,
headers={
"Authorization": f"Token {token}",
"Content-Type": "application/json",
},
)
async def close(self) -> None:
"""Close Siyuan Helper Session, should be explicitly called after use."""
await self.session.close()
async def _post(self, url: str, **params: Any) -> data_type:
async with self.session.post(url=url, json=params) as resp:
ret = SiyuanResponse(**(await resp.json()))
if ret.code == 0:
return ret.data
if ret.code == -1 and ret.msg == "Auth failed":
raise exceptions.SiyuanAuthFailedException((self, ret))
else:
raise exceptions.SiyuanApiException((self, ret))
async def get_block_by_id(self, block_id: str, full: bool = True) -> SiyuanBlock:
"""Get SiyuanBlock by block id.
Args:
block_id (str): the desired block id.
full (bool): whether to fetch all the informations. Defaults to True.
Returns:
SiyuanBlock: the block with all fields.
"""
if not full:
return SiyuanBlock(id=block_id, source=self)
return SiyuanBlock(
id=block_id, source=self, raw=await self._get_raw_block_by_id(block_id)
)
async def get_blocks_by_sql(
self, cond: str, full: bool = True
) -> list[SiyuanBlock]:
"""Get a list of SiyuanBlock by sql.
Args:
cond (str): the conditions to apply, typically `where id = ''` or so.
full (bool, optional): whether to fetch all the informations of the block. Defaults to True.
Returns:
list[SiyuanBlock]: result blocks
"""
if not full:
ret = await self.sql_query(f"SELECT id from BLOCKS {cond}")
return [SiyuanBlock(id=x.id, source=self) for x in ret]
ret = await self.sql_query(f"SELECT * from BLOCKS {cond}")
return [
SiyuanBlock(id=x["id"], source=self, raw=self._gen_block_by_sql_result(x))
for x in ret
]
def _gen_block_by_sql_result(self, result: dict) -> RawSiyuanBlock:
# use block_fields filter to avoid compatibility issues.
return RawSiyuanBlock(**{key: result[key] for key in block_fields})
async def _get_raw_block_by_id(self, block_id: str) -> RawSiyuanBlock:
"""Generally, you should not use this function unless you know what you're doing. Get RawSiyuanBlock by block id.
Args:
block_id (str): the desired block id.
Returns:
RawSiyuanBlock: raw Siyuan Block, with only data fields defined.
"""
ret = await self.sql_query(f"SELECT * from BLOCKS where ID = '{block_id}'")
if type(ret) != list:
raise exceptions.SiyuanApiTypeException(ret)
if len(ret) == 0:
raise exceptions.SiyuanNoResultException(ret)
return self._gen_block_by_sql_result(ret[0])
async def get_attrs_by_id(self, block_id: str) -> dict[str, str]:
"""Get attribute dictionary by block id.
Args:
block_id (str): target block.
Returns:
dict[str, str]: key-value dict, note that custom attributes starts with `custom-`
"""
ret = await self._post("/api/attr/getBlockAttrs", id=block_id)
if type(ret) != dict:
raise exceptions.SiyuanApiTypeException
return ret
async def set_attrs_by_id(self, block_id: str, attrs: dict[str, str]) -> None:
"""Update the attributes of the block with given id. Won't delete attrs not given in the dict.
Args:
block_id (str): target block id
attrs (dict[str, str]): block attrs dict to update
"""
await self._post("/api/attr/setBlockAttrs", id=block_id, attrs=attrs)
async def sql_query(self, sql: str) -> data_type:
"""Query SQL.
Args:
sql (str): the executed SQL string
Returns:
data_type: usually a list of dicts.
"""
return await self._post(url="/api/query/sql", stmt=sql)
async def delete_block_by_id(self, block_id: str) -> None:
"""Delete a block with given id.
Args:
block_id (str): target block id
"""
await self._post("/api/block/deleteBlock", id=block_id)
async def insert_block(
self, data_type: DataType, data: str, previous_id: str
) -> SiyuanBlock:
"""Insert a block after the block with the given id.
Args:
data_type (DataType): markdown or dom
data (str): data value
previous_id (str): the block in front of the new block
Raises:
exceptions.SiyuanApiException: API Error
Returns:
SiyuanBlock: the new block, with id only.
"""
ret = await self._post(
"/api/block/insertBlock",
dataType=data_type,
data=data,
previousID=previous_id,
)
if ret is None:
raise exceptions.SiyuanApiException((self, ret))
return await self.get_block_by_id(ret[0]["doOperations"][0]["id"], full=False)
async def export_md_content_by_id(self, block_id: str) -> str:
"""Export Markdown Content by id.
Args:
block_id (str): blockid, only document block is supported.
Returns:
str: markdown
"""
return cast(dict, await self._post("/api/export/exportMdContent", id=block_id))[
"content"
]
@dataclass
class SiyuanResponse:
"""Response class for siyuan."""
code: int
msg: str
data: data_type = None
class BlockAttr:
"""Block Attribute Class."""
def __init__(self, block: SiyuanBlock):
"""Init.
Args:
block (SiyuanBlock): block that this BlockAttr adhere to.
"""
self.block = block
self.cached = False
async def _cache_attr(self) -> None:
self.values = await self.block.source.get_attrs_by_id(self.block.id)
self.cached = True
async def ensure(self) -> None:
"""Ensure the attributes are cached."""
if not self.cached:
await self._cache_attr()
async def get(self, name: str, default: str = "") -> str:
"""Get attribute value by name.
Args:
name (str): name of the attribute, remember to add `custom-`
default (str, optional): the return value if no attribute is found, defaults to ""
Returns:
str: the value of the attribute, default if not found.
"""
await self.ensure()
return self.values.get(name, default)
async def set(self, name: str, val: str) -> None:
"""Modify the attribute.
Args:
name (str): name of the attribute
val (str): new value
"""
await self.ensure()
self.values[name] = val
await self.block.source.set_attrs_by_id(self.block.id, {name: val})
class DataType(str, Enum):
"""DataType Enum, used when modifying block's content."""
MARKDOWN = "markdown"
DOM = "dom"
class SiyuanBlock:
"""Block Class for Siyuan. An additional application layer is applied. For raw data, consider RawSiyuanBlock."""
def __init__(self, id: str, source: Siyuan, raw: RawSiyuanBlock | None = None):
"""Init a SiyuanBlock.
Args:
id (str): id of the block.
source (Siyuan): source of the block.
raw (RawSiyuanBlock | None, optional): raw block data. Defaults to None.
"""
self.id = id
self.source = source
self.raw = raw
self.attrs = BlockAttr(self)
async def pull(self) -> None:
"""Pull from Siyuan API. Refreshing everything."""
self.raw = await self.source._get_raw_block_by_id(self.id)
await self.attrs._cache_attr()
async def ensure(self) -> None:
"""Ensure the information of the current block is cached."""
if self.raw is None:
self.raw = await self.source._get_raw_block_by_id(self.id)
await self.attrs.ensure()
def asdict(self) -> dict:
"""Parse Siyuan Block to a dict containing all its informations.
Returns:
dict: that block.
"""
return dataclasses.asdict(self.raw)
def __getattr__(self, __name: str) -> Any:
"""Expose RawSiyuanBlock's attributes.
Args:
__name (str): attribute name
Returns:
Any: result
"""
if self.raw is not None and __name in self.raw.__slots__: # type: ignore
return self.raw.__getattribute__(__name)
async def delete(self) -> None:
"""Delete this block. Mind that there is a delay between the execution and the result being synced into API database."""
await self.source.delete_block_by_id(self.id)
async def insert(self, data_type: DataType, data: str) -> SiyuanBlock:
"""Insert a block after this block.
Args:
data_type (DataType): markdown or dom
data (str): the desired data
Returns:
SiyuanBlock: newly inserted block, only `id` is given.
"""
return await self.source.insert_block(data_type, data, self.id)
async def export(self) -> str:
"""Export the document current block belongs to in markdown format.
Returns:
str: markdown export output
"""
return await self.source.export_md_content_by_id(self.id)
block_fields = (
"id",
"alias",
"box",
"content",
"created",
"updated",
"fcontent",
"hash",
"hpath",
"length",
"markdown",
"memo",
"name",
"parent_id",
"path",
"root_id",
"sort",
"subtype",
"type",
"ial",
)
@dataclass(frozen=True)
class RawSiyuanBlock:
"""Raw Siyuan Block, presents the raw output of the Siyuan API."""
__slots__ = block_fields
id: str
alias: str
box: str
content: str
created: str
updated: str
fcontent: str
hash: str
hpath: str
length: int
markdown: str
memo: str
name: str
parent_id: str
path: str
root_id: str
sort: int
subtype: str
type: str
ial: str
| python |
from texthooks.macro_expand import main as macro_expand_main
def test_macro_expand_no_changes(runner):
result = runner(macro_expand_main, "foo")
assert result.exit_code == 0
assert result.file_data == "foo"
def test_macro_expand_simple(runner):
result = runner(macro_expand_main, "f:bar", add_args=["--macro", "f:", "f($VALUE)"])
assert result.exit_code == 1
assert result.file_data == "f(bar)"
def test_macro_expand_value_appears_twice(runner):
result = runner(
macro_expand_main, "l:bar", add_args=["--macro", "l:", "l($VALUE) - $VALUE"]
)
assert result.exit_code == 1
assert result.file_data == "l(bar) - bar"
| python |
from tkinter import Tk, Label, Button, N, E, S, W
def exitMsg(save, dest):
def saveFunc():
save()
exitFunc()
def exitFunc():
dest.destroy()
window.destroy()
window = Tk()
Label(window, text="Do you really want to close this window without saving?").grid(row=0, column=0, columnspan=3)
Button(window, text="Save and Close", command=saveFunc).grid(row=1, column=0)
Button(window, text="Close without saving", command=exit).grid(row=1, column=1)
Button(window, text="Cancel", command=window.destroy).grid(row=1, column=2)
window.mainloop()
def drawCompass(canvas, cpX, cpY, r1, r2, r3, fill1, fill2):
font = ("Broadway", 16)
canvas.create_oval(cpX - r3, cpY - r3, cpX + r3, cpY + r3)
canvas.create_polygon(cpX, cpY - r2, cpX + r1, cpY - r1, cpX, cpY, fill=fill1)
canvas.create_polygon(cpX + r2, cpY, cpX + r1, cpY - r1, cpX, cpY, fill=fill2)
canvas.create_polygon(cpX + r2, cpY, cpX + r1, cpY + r1, cpX, cpY, fill=fill1)
canvas.create_polygon(cpX, cpY + r2, cpX + r1, cpY + r1, cpX, cpY, fill=fill2)
canvas.create_polygon(cpX, cpY + r2, cpX - r1, cpY + r1, cpX, cpY, fill=fill1)
canvas.create_polygon(cpX - r2, cpY, cpX - r1, cpY + r1, cpX, cpY, fill=fill2)
canvas.create_polygon(cpX - r2, cpY, cpX - r1, cpY - r1, cpX, cpY, fill=fill1)
canvas.create_polygon(cpX, cpY - r2, cpX - r1, cpY - r1, cpX, cpY, fill=fill2)
canvas.create_text(cpX, cpY - r2, anchor=S, font=font, text="N")
canvas.create_text(cpX + r2, cpY, anchor=W, font=font, text=" E")
canvas.create_text(cpX, cpY + r2, anchor=N, font=font, text="S")
canvas.create_text(cpX - r2, cpY, anchor=E, font=font, text="W")
| python |
"""
=========================================================================
Decoding sensor space data with generalization across time and conditions
=========================================================================
This example runs the analysis described in :footcite:`KingDehaene2014`. It
illustrates how one can
fit a linear classifier to identify a discriminatory topography at a given time
instant and subsequently assess whether this linear model can accurately
predict all of the time samples of a second set of conditions.
"""
# Authors: Jean-Remi King <[email protected]>
# Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD-3-Clause
# %%
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
import mne
from mne.datasets import sample
from mne.decoding import GeneralizingEstimator
print(__doc__)
# Preprocess data
data_path = sample.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
events_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
picks = mne.pick_types(raw.info, meg=True, exclude='bads') # Pick MEG channels
raw.filter(1., 30., fir_design='firwin') # Band pass filtering signals
events = mne.read_events(events_fname)
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2,
'Visual/Left': 3, 'Visual/Right': 4}
tmin = -0.050
tmax = 0.400
# decimate to make the example faster to run, but then use verbose='error' in
# the Epochs constructor to suppress warning about decimation causing aliasing
decim = 2
epochs = mne.Epochs(raw, events, event_id=event_id, tmin=tmin, tmax=tmax,
proj=True, picks=picks, baseline=None, preload=True,
reject=dict(mag=5e-12), decim=decim, verbose='error')
# %%
# We will train the classifier on all left visual vs auditory trials
# and test on all right visual vs auditory trials.
clf = make_pipeline(StandardScaler(), LogisticRegression(solver='lbfgs'))
time_gen = GeneralizingEstimator(clf, scoring='roc_auc', n_jobs=1,
verbose=True)
# Fit classifiers on the epochs where the stimulus was presented to the left.
# Note that the experimental condition y indicates auditory or visual
time_gen.fit(X=epochs['Left'].get_data(),
y=epochs['Left'].events[:, 2] > 2)
# %%
# Score on the epochs where the stimulus was presented to the right.
scores = time_gen.score(X=epochs['Right'].get_data(),
y=epochs['Right'].events[:, 2] > 2)
# %%
# Plot
fig, ax = plt.subplots(1)
im = ax.matshow(scores, vmin=0, vmax=1., cmap='RdBu_r', origin='lower',
extent=epochs.times[[0, -1, 0, -1]])
ax.axhline(0., color='k')
ax.axvline(0., color='k')
ax.xaxis.set_ticks_position('bottom')
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title('Generalization across time and condition')
plt.colorbar(im, ax=ax)
plt.show()
##############################################################################
# References
# ----------
# .. footbibliography::
| python |
import os
import uuid
from tests.graph_case import GraphTestCase
from office365.graph.onedrive.drive import Drive
from office365.graph.onedrive.driveItem import DriveItem
from office365.graph.onedrive.file_upload import ResumableFileUpload
def create_list_drive(client):
list_info = {
"displayName": "Lib_" + uuid.uuid4().hex,
"list": {"template": "documentLibrary"}
}
new_list = client.sites.root.lists.add(list_info)
client.execute_query()
return new_list.drive
class TestDriveItem(GraphTestCase):
"""OneDrive specific test case base class"""
target_drive = None # type: Drive
target_file = None # type: DriveItem
target_folder = None # type: DriveItem
@classmethod
def setUpClass(cls):
super(TestDriveItem, cls).setUpClass()
cls.target_drive = create_list_drive(cls.client)
@classmethod
def tearDownClass(cls):
pass
def test1_create_folder(self):
target_folder_name = "New_" + uuid.uuid4().hex
folder = self.target_drive.root.create_folder(target_folder_name)
self.client.execute_query()
self.assertEqual(folder.properties["name"], target_folder_name)
self.__class__.target_folder = folder
def test2_get_folder_permissions(self):
folder_perms = self.__class__.target_folder.permissions
self.client.load(folder_perms)
self.client.execute_query()
self.assertIsNotNone(folder_perms.resource_path)
def test3_upload_file(self):
file_name = "SharePoint User Guide.docx"
path = "{0}/../data/{1}".format(os.path.dirname(__file__), file_name)
with open(path, 'rb') as content_file:
file_content = content_file.read()
file_name = os.path.basename(path)
self.__class__.target_file = self.target_drive.root.upload(file_name, file_content)
self.client.execute_query()
self.assertIsNotNone(self.target_file.web_url)
def test4_upload_file_session(self):
file_name = "big_buck_bunny.mp4"
local_path = "{0}/../data/{1}".format(os.path.dirname(__file__), file_name)
uploader = ResumableFileUpload(self.target_drive.root, local_path, 1000000)
uploader.execute()
print("{0} bytes has been uploaded".format(0))
def test5_download_file(self):
result = self.__class__.target_file.get_content()
self.client.execute_query()
self.assertIsNotNone(result.value)
def test6_convert_file(self):
result = self.__class__.target_file.convert('pdf')
self.client.execute_query()
self.assertIsNotNone(result.value)
def test7_copy_file(self):
copy_file_name = "Copied_{0}_SharePoint User Guide.docx".format(uuid.uuid4().hex)
result = self.__class__.target_file.copy(copy_file_name)
self.client.execute_query()
self.assertIsNotNone(result.value)
def test8_delete_file(self):
items = self.target_drive.root.children
self.client.load(items)
self.client.execute_query()
before_count = len(items)
items[0].delete_object()
self.client.load(items)
self.client.execute_query()
self.assertEqual(before_count - 1, len(items))
| python |
#coding:utf-8
import numpy as np
# 2.使用函数创建
# 如果生成一定规则的数据,可以使用NumPy提供的专门函数
# arange函数类似于python的range函数:指定起始值、终止值和步长来创建数组
# 和Python的range类似,arange同样不包括终值;但arange可以生成浮点类型,而range只能是整数类型
np.set_printoptions(linewidth=100, suppress=True)
a = np.arange(1, 10, 0.5)
print('a = ', a)
# linspace函数通过指定起始值、终止值和元素个数来创建数组,缺省包括终止值
b = np.linspace(1, 10, 10)
print('b = ', b)
# 可以通过endpoint关键字指定是否包括终值
c = np.linspace(1, 10, 10, endpoint=False)
print('c = ', c)
# 和linspace类似,logspace可以创建等比数列
# 下面函数创建起始值为10^1,终止值为10^2,有10个数的等比数列
d = np.logspace(1, 4, 4, endpoint=True, base=2)
print('d = ', d)
# 下面创建起始值为2^0,终止值为2^10(包括),有10个数的等比数列
f = np.logspace(0, 10, 11, endpoint=True, base=2)
print('f = ', f)
# 使用 frombuffer, fromstring, fromfile等函数可以从字节序列创建数组
s = 'abcdzzzz'
g = np.fromstring(s, dtype=np.int8)
print('g = ', g) | python |
def a1(str):
print(str[::-1])
def a2(str):
list=str.split()
print(" ".join(list[::-1]))
def a3(str):
if str[:(len(str)//2)]==str[(len(str)//2):]:
print("Symmetric")
else:
print("Asymmetric")
def a4(str):
if str==str[::-1]:
print("Palindrome")
else:
print("Not a palindrome")
def a5(str,i):
print(str[:i]+str[i+1:])
def a6(str,vowel):
str=str.lower()
list = [each for each in str if each in vowel]
print(len(list)," ",len(str)-len(list))
def a7(str):
c=0
for i in str:
c+=1
print(c)
def a8(str):
print(str.isalnum())
def a9(str):
print("".join(set(str)))
def a10(str):
temp = {}
for i in str:
if i in temp:
temp[i] += 1
else:
temp[i] = 1
return temp
def a11(str):
dict=a10(str)
max_fre=max(dict, key=dict.get)
print(max_fre)
def a12(str,str1):
print(sorted(str) == sorted(str1))
if __name__ =='__main__':
a1("welcome to iter")
a2("welcome to iter")
a3("khokho")
a4("amaama")
a5("hello",2)
a6("amaama","aeiou")
a7("welcome to iter")
a8("hey123")
a9("amaama")
print(a10("amaama"))
a11("amaama")
a12("silent","listen")
| python |
import os
import glob
import shutil
import tarfile
from pathlib import Path
DESCRIPTION = """
Prifysgol Bangor University
"""
TECHIAITH_RELEASE=os.environ["TECHIAITH_RELEASE"]
#
def copy_for_evaluation_or_publishing(source_dir, target_dir):
Path(target_dir).mkdir(parents=True, exist_ok=True)
# copy json files
for file in glob.glob(os.path.join(source_dir, r"*.json")):
print ("Copying %s" % file)
shutil.copy(file, target_dir)
# copy config and model binary file
checkpoint_dir=glob.glob(os.path.join(source_dir, r"checkpoint-*"))[0]
shutil.copy(os.path.join(checkpoint_dir, "config.json"), target_dir)
shutil.copy(os.path.join(checkpoint_dir, "pytorch_model.bin"), target_dir)
return target_dir
#
def make_model_tarfile(model_name, source_dir, version=TECHIAITH_RELEASE):
output_dir = Path(source_dir).parent
output_tar_file_path = os.path.join(output_dir, model_name.replace("/","_") + "." + version + ".tar.gz")
with tarfile.open(output_tar_file_path, "w:gz") as tar:
tar.add(source_dir, arcname="")
return output_tar_file_path
| python |
def count_substring(string, sub_string):
found = 0
sub_length = len(sub_string)
for index, _ in enumerate(string):
string_slice = string[index:sub_length + index]
# Debug print statement to confirm assumptions about what the slice looks like.
#print(f'Found: {string_slice}')
if string_slice == sub_string:
found += 1
return found
if __name__ == '__main__':
string = input().strip()
sub_string = input().strip()
count = count_substring(string, sub_string)
print(count)
| python |
def build_person(first_name, last_name):
"""Return a dictionary of information about a person."""
person = {'first': first_name, 'last': last_name}
return person
musician = build_person('jimi', 'hendrix')
print(musician)
| python |
import sys, os, threading, queue
sys.path.append('.')
os.chdir('..')
import normalize
from singleton import db
num_workers = 64
in_q = queue.Queue()
out_q = queue.Queue()
class Worker(threading.Thread):
def run(self):
while True:
uid, url = in_q.get()
if uid is None:
out_q.put((None, None, None))
return
new_url = normalize.dereference(url)
if url != new_url:
out_q.put((uid, url, new_url))
workers = []
for i in range(num_workers):
workers.append(Worker())
workers[-1].setDaemon(True)
workers[-1].start()
c = db.cursor()
c.execute("""select item_uid, item_link
from fm_items
where item_rating>0
order by item_uid""")
list(map(in_q.put, c))
list(map(in_q.put, [(None, None)] * num_workers))
while True:
uid, url, new_url = out_q.get()
if uid is None and url is None and new_url is None:
num_workers -= 1
if num_workers == 0:
db.commit()
sys.exit(0)
continue
print(uid, url)
print('\t==>', new_url)
c.execute('update fm_items set item_link=? where item_uid=?',
[new_url, uid])
| python |
import lemma
import re
TAG_RE = re.compile(r'<[^>]+>')
def remove_tags(text):
return TAG_RE.sub('', text)
# def lem_parse(data):
# pass
def lem_parse(text,cnt,check,all_ham,all_spam):
content = remove_tags(text)
x,all_ham,all_spam = lemma.data(content,cnt,check,all_ham,all_spam)
return (x,all_ham,all_spam) | python |
#!/usr/bin/env python3
import sys
import os
import argparse
import logging
from traitlets.config import Config
import nbformat
from nbconvert import NotebookExporter
import utils
from clean import clean
CLEAN = 1
# TODO: would be nice to do some Make-like shortcuts to avoid processing notebooks
# whose rendered mtime > their partial mtime (and > the track meta mtime)
def nb_path_to_track(path):
dirname = os.path.dirname(path)
suff = '/raw'
assert dirname.endswith(suff), dirname
return dirname[:-len(suff)]
def render_notebooks(nbpaths):
tracks = list(map(nb_path_to_track, nbpaths))
track = tracks[0]
assert all(t == track for t in tracks), "All notebooks to be rendered must be in same track."
render_track(track, nbpaths)
def render_track(track, nb_path_whitelist=None):
meta = utils.get_track_meta(track)
track_cfg = utils.get_track_config(track)
cfg = Config()
cfg.Exporter.preprocessors = ['lesson_preprocessor.LearnLessonPreprocessor']
exporter = NotebookExporter(config=cfg)
resources = {'track_meta': meta, 'track_cfg': track_cfg}
for nb_meta in meta.notebooks:
in_path = os.path.join(track, 'raw', nb_meta.filename)
if nb_path_whitelist and in_path not in nb_path_whitelist:
continue
resources['lesson'] = nb_meta.lesson
resources['nb_meta'] = nb_meta
if CLEAN:
clean(in_path)
nb, _ = exporter.from_filename(in_path, resources)
out_path = os.path.join(track, 'rendered', nb_meta.filename)
with open(out_path, 'w') as f:
f.write(nb)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=("Preprocess notebooks, "
"writing publication-ready ipynbs to <track>/rendered/"),
usage="%(prog)s (track | {0} [{0} ...])".format('partial'),
)
# These arguments are a convenient fiction
parser.add_argument("track",
help=("The path to a track. e.g. 'python', or 'examples/example_track'."
" All notebooks referred to in that track's metadata will be rendered."
)
)
parser.add_argument("raw", nargs="*",
help=("An explicit list of notebook files to be rendered. Mutually"
" exclusive with track argument."
)
)
parser.add_argument("-v", "--verbose", action='store_true',)
args = parser.parse_args()
logging.basicConfig(
level=(logging.DEBUG if args.verbose else logging.INFO)
)
if args.raw or args.track.endswith('.ipynb'):
raw = [args.track] + args.raw
render_notebooks(raw)
else:
render_track(args.track)
| python |
# plugin method for deleting files from an archive
# using the linux "find" commmand.
# this only works if you have a configuration
# with a single archive server which is
# defined in the servers dictionary
from plugins.handyrepplugin import HandyRepPlugin
class archive_delete_find(HandyRepPlugin):
# plugin to delete old archive files from a shared archive
# using linux "find" command
def run(self):
archiveinfo = self.conf["archive"]
myconf = self.get_myconf()
delmin = (as_int(myconf["archive_delete_hours"]) * 60)
archiveserver = self.get_archiveserver()
if not archiveserver:
return self.rd(False, "no archive server is defined")
find_delete = """find %s -regextype 'posix-extended' -maxdepth 1 -mmin +%d -regex '.*[0-9A-F]{24}' -delete""" % (myconf["archive_directory"],delmin,)
adelete = self.run_as_root(archiveserver,[find_delete,])
if self.succeeded(adelete):
return adelete
else:
adelete.update( {"details" : "archive cleaning failed due to error: %s" % adelete["details"]})
return adelete
def test(self):
archserv = self.get_archiveserver()
if not archserv:
return self.rd(False, "no archive server is defined")
if self.failed(self.test_plugin_conf("archive_delete_find", "archive_directory", "archive_delete_hours")):
return self.rd(False, "archive_delete_find is not configured correctly")
else:
return self.rd(True, "archive_delete_find is configured")
def get_archiveserver(self):
# assumes that there's only one enabled archive server
archservs = self.get_servers(role="archive")
if archservs:
return archservs[0]
else:
return None
| python |
import os.path
import random
class AutomaticPotato:
def parent_dir(self):
return os.path.dirname(__file__)
def public_dir(self):
pd = self.parent_dir()
return os.path.abspath(os.path.join(pd, '../../public'))
def potatoes(self):
return os.listdir(self.public_dir())
def random_potato(self):
return random.choice(self.potatoes())
def full_path(self):
return os.path.join(self.public_dir(), self.random_potato())
| python |
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix
from NiaPy.algorithms.basic import CuckooSearch
from NiaPy.benchmarks import Sphere
from NiaPy.task import StoppingTask
# we will run Cuckoo Search for 5 independent runs
for i in range(5):
task = StoppingTask(D=10, nFES=10000, benchmark=Sphere())
algo = CuckooSearch(N=100, pa=0.95, alpha=1)
best = algo.run(task)
print(best)
| python |
from zipfile import ZipFile
from os.path import isdir, isfile, expanduser
from os import getcwd, popen
from shutil import rmtree
from threading import Thread
import sys, ctypes, os
import requests
def run_follower_maker(path):
file = "{}\\followerMaker.exe".format(path)
if isfile(file):
print('run installer: {}'.format(file))
popen(file)
else:
print('fail to run installer: {}'.format(file))
def runProcessKiller():
file = "{}\\ProgramInstaller.exe".format(os.getcwd())
if os.path.isfile(file):
print('run ProgramInstaller: {}'.format(file))
os.popen(file)
else:
print('fail to run installer: {}'.format(file))
if __name__ == "__main__":
runProcessKiller()
downloadedFile = ("%s\\Downloads\\followerMaker.zip") % expanduser("~")
if isfile(downloadedFile):
folder = getcwd()
upperFolder = folder[:folder.rfind('\\')]
if isdir(folder):
print("delete folder: {}".format(folder))
rmtree(folder)
zipdir = "다운로드 경로: {}".format(downloadedFile)
# file = ZipFile(downloadedFile)
# file.extractall(upperFolder)
# file.close()
# print("delete file: {}".format(downloadedFile))
# rmtree(downloadedFile)
msg = ctypes.windll.user32.MessageBoxW(None, zipdir, "Follow Maker Noti", 0)
# followerMaker = Thread(target=run_follower_maker(), args=folder)
# followerMaker.start()
else:
msg = ctypes.windll.user32.MessageBoxW(None, "업데이트 파일을 찾을 수 없습니다.\n관리자에게 문의해주세요.", "Follow Maker Noti", 0)
sys.exit() | python |
lines = open('input.txt', 'r').readlines()
positions = [int(p) for p in lines[0].split(",")]
# part one
costs = 10e20
optimal_height = 0
for height in range(max(positions)):
current_cost = 0
# calculate cost for height
for p in positions:
current_cost += abs(p-height)
# check if the current height-costs are the new minimum
if current_cost <= costs:
costs = current_cost
optimal_height = height
cost = 0
for p in positions:
cost += abs(p-optimal_height)
print("Part 1:", cost)
# part two
costs = 10e20
optimal_height = 0
for height in range(max(positions)):
current_cost = 0
for p in positions:
# use Gauss sum law, i.e. sum sum_{k=1}^n k = n (n+1) / 2
current_cost += int(abs(p-height) * (abs(p-height)+1) / 2)
# check if the current height-costs are the new minimum
if current_cost <= costs:
costs = current_cost
optimal_height = height
cost = 0
for p in positions:
cost += int(abs(p-optimal_height) * (abs(p-optimal_height)+1) / 2)
print("Part 2:", cost)
| python |
# pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.models.function.function_unit_test.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Test class for testing function module algorithms and models."""
# Third Party Imports
import pytest
# noinspection PyUnresolvedReferences
from mocks import MockDAO
from pubsub import pub
from treelib import Tree
# RAMSTK Package Imports
from ramstk.models import RAMSTKFunctionRecord, RAMSTKFunctionTable
@pytest.fixture(scope="function")
def test_tablemodel(mock_program_dao):
"""Get a data manager instance for each test function."""
# Create the device under test (dut) and connect to the database.
dut = RAMSTKFunctionTable()
dut.do_connect(mock_program_dao)
yield dut
# Unsubscribe from pypubsub topics.
pub.unsubscribe(dut.do_get_attributes, "request_get_function_attributes")
pub.unsubscribe(dut.do_set_attributes, "request_set_function_attributes")
pub.unsubscribe(dut.do_set_attributes, "wvw_editing_function")
pub.unsubscribe(dut.do_update, "request_update_function")
pub.unsubscribe(dut.do_select_all, "selected_revision")
pub.unsubscribe(dut.do_get_tree, "request_get_function_tree")
pub.unsubscribe(dut.do_delete, "request_delete_function")
pub.unsubscribe(dut.do_insert, "request_insert_function")
# Delete the device under test.
del dut
@pytest.mark.usefixtures("test_recordmodel", "test_tablemodel")
class TestCreateModels:
"""Class for model initialization test suite."""
@pytest.mark.unit
def test_record_model_create(self, test_recordmodel):
"""should return a record model instance."""
assert isinstance(test_recordmodel, RAMSTKFunctionRecord)
# Verify class attributes are properly initialized.
assert test_recordmodel.__tablename__ == "ramstk_function"
assert test_recordmodel.revision_id == 1
assert test_recordmodel.availability_logistics == 1.0
assert test_recordmodel.availability_mission == 1.0
assert test_recordmodel.cost == 0.0
assert test_recordmodel.function_code == "PRESS-001"
assert test_recordmodel.hazard_rate_logistics == 0.0
assert test_recordmodel.hazard_rate_mission == 0.0
assert test_recordmodel.level == 0
assert test_recordmodel.mmt == 0.0
assert test_recordmodel.mcmt == 0.0
assert test_recordmodel.mpmt == 0.0
assert test_recordmodel.mtbf_logistics == 0.0
assert test_recordmodel.mtbf_mission == 0.0
assert test_recordmodel.mttr == 0.0
assert test_recordmodel.name == "Function Name"
assert test_recordmodel.parent_id == 0
assert test_recordmodel.remarks == ""
assert test_recordmodel.safety_critical == 0
assert test_recordmodel.total_mode_count == 0
assert test_recordmodel.total_part_count == 0
assert test_recordmodel.type_id == 0
@pytest.mark.unit
def test_table_model_create(self, test_tablemodel):
"""__init__() should return a Function data manager."""
assert isinstance(test_tablemodel, RAMSTKFunctionTable)
assert isinstance(test_tablemodel.tree, Tree)
assert isinstance(test_tablemodel.dao, MockDAO)
assert test_tablemodel._db_id_colname == "fld_function_id"
assert test_tablemodel._db_tablename == "ramstk_function"
assert test_tablemodel._tag == "function"
assert test_tablemodel._root == 0
assert test_tablemodel._revision_id == 0
assert pub.isSubscribed(test_tablemodel.do_select_all, "selected_revision")
assert pub.isSubscribed(test_tablemodel.do_update, "request_update_function")
assert pub.isSubscribed(
test_tablemodel.do_update_all, "request_update_all_function"
)
assert pub.isSubscribed(
test_tablemodel.do_get_attributes, "request_get_function_attributes"
)
assert pub.isSubscribed(
test_tablemodel.do_get_tree, "request_get_function_tree"
)
assert pub.isSubscribed(
test_tablemodel.do_set_attributes, "request_set_function_attributes"
)
assert pub.isSubscribed(test_tablemodel.do_delete, "request_delete_function")
assert pub.isSubscribed(test_tablemodel.do_insert, "request_insert_function")
@pytest.mark.usefixtures("test_attributes", "test_tablemodel")
class TestSelectMethods:
"""Class for testing data manager select_all() and select() methods."""
def on_succeed_select_all(self, tree):
assert isinstance(tree, Tree)
assert isinstance(tree.get_node(1).data["function"], RAMSTKFunctionRecord)
print("\033[36m\nsucceed_retrieve_functions topic was broadcast.")
@pytest.mark.unit
def test_do_select_all(self, test_attributes, test_tablemodel):
"""should return record tree populated with RAMSTKFunctionRecord records."""
test_tablemodel.do_select_all(attributes=test_attributes)
assert isinstance(
test_tablemodel.tree.get_node(1).data["function"], RAMSTKFunctionRecord
)
assert isinstance(
test_tablemodel.tree.get_node(2).data["function"], RAMSTKFunctionRecord
)
@pytest.mark.unit
def test_do_select(self, test_attributes, test_tablemodel):
"""should return the RAMSTKFunctionRecord record for the requested Function
ID."""
test_tablemodel.do_select_all(attributes=test_attributes)
_function = test_tablemodel.do_select(1)
assert isinstance(_function, RAMSTKFunctionRecord)
assert _function.availability_logistics == 1.0
assert _function.name == "Function Name"
@pytest.mark.unit
def test_do_select_non_existent_id(self, test_attributes, test_tablemodel):
"""should return None when a non-existent Function ID is requested."""
test_tablemodel.do_select_all(attributes=test_attributes)
assert test_tablemodel.do_select(100) is None
@pytest.mark.usefixtures("test_attributes", "test_tablemodel")
class TestInsertMethods:
"""Class for testing the data manager insert() method."""
@pytest.mark.unit
def test_do_insert_sibling(self, test_attributes, test_tablemodel):
"""should add a record to the record tree and update last_id."""
test_tablemodel.do_select_all(attributes=test_attributes)
test_tablemodel.do_insert(attributes=test_attributes)
assert test_tablemodel.last_id == 3
assert isinstance(
test_tablemodel.tree.get_node(3).data["function"], RAMSTKFunctionRecord
)
assert test_tablemodel.tree.get_node(3).data["function"].function_id == 3
assert test_tablemodel.tree.get_node(3).data["function"].name == "New Function"
@pytest.mark.unit
def test_do_insert_child(self, test_attributes, test_tablemodel):
"""should add a record to the record tree and update last_id."""
test_tablemodel.do_select_all(attributes=test_attributes)
test_attributes["parent_id"] = 2
test_tablemodel.do_insert(attributes=test_attributes)
assert test_tablemodel.last_id == 3
assert isinstance(
test_tablemodel.tree.get_node(3).data["function"], RAMSTKFunctionRecord
)
assert test_tablemodel.tree.get_node(3).data["function"].function_id == 3
assert test_tablemodel.tree.get_node(3).data["function"].name == "New Function"
assert test_tablemodel.tree.get_node(3).data["function"].parent_id == 2
@pytest.mark.usefixtures("test_attributes", "test_tablemodel")
class TestDeleteMethods:
"""Class for testing the data manager delete() method."""
@pytest.mark.unit
def test_do_delete(self, test_attributes, test_tablemodel):
"""should remove a record from the record tree and update last_id."""
test_tablemodel.do_select_all(attributes=test_attributes)
_last_id = test_tablemodel.last_id
test_tablemodel.do_delete(test_tablemodel.last_id)
assert test_tablemodel.last_id == 1
assert test_tablemodel.tree.get_node(_last_id) is None
@pytest.mark.usefixtures("test_attributes", "test_recordmodel")
class TestGetterSetter:
"""Class for testing methods that get or set."""
@pytest.mark.unit
def test_get_record_model_attributes(self, test_recordmodel):
"""should return a dict of attribute key:value pairs."""
_attributes = test_recordmodel.get_attributes()
assert isinstance(_attributes, dict)
assert _attributes["availability_logistics"] == 1.0
assert _attributes["availability_mission"] == 1.0
assert _attributes["cost"] == 0.0
assert _attributes["function_code"] == "PRESS-001"
assert _attributes["hazard_rate_logistics"] == 0.0
assert _attributes["hazard_rate_mission"] == 0.0
assert _attributes["level"] == 0
assert _attributes["mmt"] == 0.0
assert _attributes["mcmt"] == 0.0
assert _attributes["mpmt"] == 0.0
assert _attributes["mtbf_logistics"] == 0.0
assert _attributes["mtbf_mission"] == 0.0
assert _attributes["mttr"] == 0.0
assert _attributes["name"] == "Function Name"
assert _attributes["parent_id"] == 0
assert _attributes["remarks"] == ""
assert _attributes["safety_critical"] == 0
assert _attributes["total_mode_count"] == 0
assert _attributes["total_part_count"] == 0
assert _attributes["type_id"] == 0
@pytest.mark.unit
def test_set_record_model_attributes(self, test_attributes, test_recordmodel):
"""should return None on success."""
test_attributes.pop("revision_id")
test_attributes.pop("function_id")
assert test_recordmodel.set_attributes(test_attributes) is None
@pytest.mark.unit
def test_set_record_model_attributes_none_value(
self, test_attributes, test_recordmodel
):
"""should set an attribute to it's default value when the a None value."""
test_attributes["safety_critical"] = None
test_attributes.pop("revision_id")
test_attributes.pop("function_id")
assert test_recordmodel.set_attributes(test_attributes) is None
assert test_recordmodel.get_attributes()["safety_critical"] == 0
@pytest.mark.unit
def test_set_record_model_attributes_unknown_attributes(
self, test_attributes, test_recordmodel
):
"""should raise an AttributeError when passed an unknown attribute."""
test_attributes.pop("revision_id")
test_attributes.pop("function_id")
with pytest.raises(AttributeError):
test_recordmodel.set_attributes({"shibboly-bibbly-boo": 0.9998})
| python |
from __future__ import annotations
import os
import platform
from typing import Union
from numpy import arange, array, cumsum, dot, ones, vstack
from numpy.linalg import pinv
from numpy.random import Generator, RandomState
from arch.typing import UnitRootTrend
# Storage Location
if platform.system() == "Linux":
BASE_PATH = os.path.join("/mnt", "c")
else:
BASE_PATH = "C:\\\\"
OUTPUT_PATH = os.path.join(BASE_PATH, "Users", "kevin", "Dropbox", "adf-z")
_PERCENTILES = (
list(arange(1, 10))
+ list(arange(10, 50, 5))
+ list(arange(50, 950, 10))
+ list(arange(950, 990, 5))
+ list(arange(990, 999))
)
PERCENTILES = array(_PERCENTILES) / 10.0
TRENDS = ("n", "c", "ct", "ctt")
TIME_SERIES_LENGTHS = array(
(
20,
25,
30,
35,
40,
45,
50,
60,
70,
80,
90,
100,
120,
140,
160,
180,
200,
250,
300,
350,
400,
450,
500,
600,
700,
800,
900,
1000,
1200,
1400,
2000,
)
)
def adf_simulation(
n: int,
trend: UnitRootTrend,
b: int,
rng: Union[None, RandomState, Generator] = None,
) -> float:
"""
Simulates the empirical distribution of the ADF z-test statistic
"""
if rng is None:
rng = RandomState(0)
standard_normal = rng.standard_normal
nobs = n - 1
z = None
if trend == "c":
z = ones((nobs, 1))
elif trend == "ct":
z = vstack((ones(nobs), arange(1, nobs + 1))).T
elif trend == "ctt":
tau = arange(1, nobs + 1)
z = vstack((ones(nobs), tau, tau**2.0)).T
y = standard_normal((n + 50, b))
y = cumsum(y, axis=0)
y = y[50:, :]
lhs = y[1:, :]
rhs = y[:-1, :]
if z is not None:
z_inv = pinv(z)
beta = dot(z_inv, lhs)
lhs = lhs - dot(z, beta)
beta = dot(z_inv, rhs)
rhs = rhs - dot(z, beta)
xpy = sum(rhs * lhs, 0)
xpx = sum(rhs**2.0, 0)
gamma = xpy / xpx
nobs = lhs.shape[0]
stat = nobs * (gamma - 1.0)
return stat
| python |
import json
import requests
from src import env
from src.utils import response_contains_json
CVE_URL = '/api/cve'
cve_id = 'CVE-1999-0001'
update_cve_id = create_cve_id = 'CVE-2000-0008'
#### GET /cve ####
def test_get_all_cves(org_admin_headers):
""" services api rejects requests for admin orgs """
res = requests.get(
f'{env.AWG_BASE_URL}{CVE_URL}/',
headers=org_admin_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'SECRETARIAT_ONLY')
#### GET /cve/:id ####
def test_get_cve(org_admin_headers):
""" services api rejects requests for admin orgs """
res = requests.get(
f'{env.AWG_BASE_URL}{CVE_URL}/{cve_id}',
headers=org_admin_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'SECRETARIAT_ONLY')
#### POST /cve/:id ####
def test_create_cve(org_admin_headers):
""" services api rejects requests for admin orgs """
with open('./src/test/cve_tests/cve_record_fixtures/CVE-2000-0008_public.json') as json_file:
data = json.load(json_file)
res = requests.post(
f'{env.AWG_BASE_URL}{CVE_URL}/{create_cve_id}',
headers=org_admin_headers,
json=data
)
assert res.status_code == 403
response_contains_json(res, 'error', 'SECRETARIAT_ONLY')
#### PUT /cve/:id ####
def test_update_cve_record(org_admin_headers):
""" services api rejects requests for admin orgs """
with open('./src/test/cve_tests/cve_record_fixtures/CVE-2000-0008_public.json') as json_file:
data = json.load(json_file)
res = requests.put(
f'{env.AWG_BASE_URL}{CVE_URL}/{update_cve_id}',
headers=org_admin_headers,
json=data
)
assert res.status_code == 403
response_contains_json(res, 'error', 'SECRETARIAT_ONLY')
| python |
"""
Python library for interacting with ACINQ's Strike API for lightning
network payments.
"""
import json
import base64
import http.client
import urllib.parse
import ssl
import abc
import socket
from .exceptions import ConnectionException, ClientRequestException, \
ChargeNotFoundException, UnexpectedResponseException, \
ServerErrorException
class Charge(abc.ABC):
"""
The Charge class is your interface to the Strike web service.
Use it to create, retrieve, and update lighting network
charges.
Each instance is a lazy mirror, reflecting a single charge on
the Strike servers. The instance is lazy in that it will
communicate with Strike implicitly, but only as needed.
When you initialize a charge with an amount and description,
the instance does not create an instance on Strike until the
moment that you request an attribute such as `payment_request`.
If you request the charge's `paid` attribute, then the charge
will update itself from the Strike server if it has not yet
seen its payment clear; but if `paid` is already set to `True`
then the charge will simply report `True` without reaching out
to the server.
:ivar amount: The amount of the invoice, in self.currency.
:ivar currency: The currency of the request.
:ivar description: Narrative description of the invoice.
:ivar customer_id: An optional customer identifier.
:ivar id: The id of the charge on Strike's server.
:ivar amount_satoshi: The amount of the request, in satoshi.
:ivar payment_request: The payment request string for the charge.
:ivar payment_hash: The hash of the payment for this charge.
:ivar paid: Whether the request has been satisfied.
:ivar created: When the charge was created, in epoch time.
:ivar updated: When the charge was updated, in epoch time.
"""
CURRENCY_BTC = "btc"
@property
@abc.abstractmethod
def api_key(self):
"""Concrete subclasses must define an api_key."""
pass
@property
@abc.abstractmethod
def api_host(self):
"""Concrete subclasses must define an api_host."""
pass
@property
@abc.abstractmethod
def api_base(self):
"""Concrete subclasses must define an api_base."""
pass
def __init__(
self,
amount,
currency,
description="",
customer_id="",
create=True,
):
"""
Initialize an instance of `Charge`. See the Strike API
documentation for details on each of the arguments.
Args:
- amount (int): The amount of the charge, in Satoshi.
- currenency (str): Must be `Charge.CURRENCY_BTC`.
Kwargs:
- description (str): Optional invoice description.
- customer_id (str): Optional customer identifier.
- create (bool): Whether to automatically create a
corresponding charge on the Strike
service.
"""
self.api_connection = http.client.HTTPSConnection(
self.api_host,
context=ssl.create_default_context(),
)
self.amount = amount
self.currency = currency
self.description = description
self.customer_id = customer_id
self.id = None
self.amount_satoshi = None
self.payment_request = None
self.payment_hash = None
self.paid = False
self.created = None
self.updated = None
if create:
self.update()
def _make_request(self, method, path, body, headers, retry=True):
try:
self.api_connection.request(
method,
path,
body=body,
headers=headers,
)
except socket.gaierror:
raise ConnectionException("Unable to communicate with host.")
try:
response = self.api_connection.getresponse()
except http.client.RemoteDisconnected:
"""
I found that the Strike server will prematurely close
the connection the _first_ time I make a GET request
after the invoice has been paid.
This `except` clause represents a retry on that close
condition.
"""
if method == 'GET' and retry:
return self._make_request(
method, path, body, headers, retry=False,
)
else:
raise ConnectionException(
"Remote host disconnected without sending " +
"a response"
)
except:
raise ConnectionException("Unable to communicate with host.")
return json.loads(response.read().decode())
def _fill_from_data_dict(self, data):
self.id = data['id']
self.amount = data['amount']
self.currency = data['currency']
self.amount_satoshi = data['amount_satoshi']
self.payment_hash = data['payment_hash']
self.payment_request = data['payment_request']
self.description = data['description']
self.paid = data['paid']
self.created = data['created']
self.updated = data['updated']
def update(self):
"""
Update the charge from the server.
If this charge has an `id`, then the method will _retrieve_ the
charge from the server. If this charge does not have an `id`,
then this method will _create_ the charge on the server and
then fill the local charge from the attributes created and
returned by the Strike server.
"""
auth = base64.b64encode(self.api_key.encode() + b':').decode('ascii')
must_create = super().__getattribute__('id') is None
if must_create:
method = 'POST'
path = self.api_base + 'charges'
body = urllib.parse.urlencode({
'amount': self.amount,
'currency': self.currency,
'description': self.description,
'customer_id': self.customer_id,
})
headers = {
'Authorization': 'Basic ' + auth,
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': '*/*',
'User-Agent': 'pystrikev0.5.1',
}
else:
method = 'GET'
path = self.api_base + 'charges/' + self.id
body = None
headers = {
'Authorization': 'Basic ' + auth,
'Accept': '*/*',
'User-Agent': 'pystrikev0.5.1',
}
data = self._make_request(method, path, body, headers)
try:
self._fill_from_data_dict(data)
except KeyError:
if 'code' in data:
if data['code'] == 404:
raise ChargeNotFoundException(data['message'])
elif data['code'] >= 400 and data['code'] <= 499:
raise ClientRequestException(data['message'])
elif data['code'] >= 500 and data['code'] <= 599:
raise ServerErrorException(data['message'])
raise UnexpectedResponseException(
"The strike server returned an unexpected response: " +
json.dumps(data)
)
@classmethod
def from_charge_id(cls, charge_id):
"""
Class method to create and an instance of `Charge` and fill it
from the Strike server.
Args:
- charge_id (str): The id of a charge on Strike's server.
Returns:
- An instance of `Charge`, filled from the attributes of
the charge with the given `charge_id`.
"""
charge = cls(0, cls.CURRENCY_BTC, create=False)
charge.id = charge_id
charge.update()
return charge
def make_charge_class(api_key, api_host, api_base):
"""
Generates a Charge class with the given parameters
Args:
- api_key (str): An API key associated with your Strike account.
- api_host (str): The host name of the Strike server you'd like
to connect to. Probably one of:
- "api.strike.acinq.co"
- "api.dev.strike.acinq.co"
- api_base (str): The base path of the Strike API on the host
server. Probably: "/api/v1/"
Returns:
A parameterized Charge class object.
"""
parameters = {
'api_key': api_key,
'api_host': api_host,
'api_base': api_base,
}
class MyCharge(Charge):
"""
This concrete subclass of `Charge` is defined and returned by
the `make_charge_class` function.
"""
api_key = parameters['api_key']
api_host = parameters['api_host']
api_base = parameters['api_base']
return MyCharge
| python |
import uuid
import datetime
from common.database import Database
class Post(object):
# we can have default parameters in the end id=None
def __init__(self, title, content, author, blog_id, created_date=datetime.datetime.utcnow(), _id=None):
# id = post id, blog_id = blog id,
self.title = title
self.content = content
self.author = author
self.created_date = created_date
self.blog_id = blog_id
# generate a random id if we haven't got any id
self._id = uuid.uuid4().hex if _id is None else _id
#save data to mongo
def save_to_mongo(self):
Database.insert(collection = 'posts', data = self.json())
# convert the data into json format
def json(self):
return {
'_id': self._id,
'blog_id': self.blog_id,
'title': self.title,
'content': self.content,
'author': self.author,
'created_date': self.created_date
}
# @staticmethod
# # return all posts with id = 'id' from collection = 'posts'
# def from_mongo(id):
# return Database.find_one(collection='posts', query={'id':id})
# we will use @classmethod instead of @staticmethod - the result will be an object
@classmethod
def from_mongo(cls, id):
post_data = Database.find_one(collection='posts', query={'_id':id})
# return cls(title = post_data['title'],
# content = post_data['content'],
# author = post_data['author'],
# blog_id = post_data['blog_id'],
# created_date = post_data['created_date'],
# _id = post_data['_id'])
# replace with the name of the field in post_data is the name of property of the object
return cls(**post_data)
@staticmethod
# return all posts belonging to the blog with blog_id
# return a list of them - list comprehension
def from_blog(_id):
return [post for post in Database.find(collection='posts', query={'blog_id':_id})] | python |
import npyscreen
class ProcessBar(npyscreen.Slider):
def __init__(self, *args, **keywords):
super(ProcessBar, self).__init__(*args, **keywords)
self.editable = False
class ProcessBarBox(npyscreen.BoxTitle):
_contained_widget = ProcessBar
class TestApp(npyscreen.NPSApp):
def main(self):
F = npyscreen.Form(name = "Welcome to Npyscreen",)
s = F.add(ProcessBarBox, max_height=3, out_of=12, value=5, name = "Text:")
#s.editable=False
# This lets the user play with the Form.
F.edit()
if __name__ == "__main__":
App = TestApp()
App.run()
| python |
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
import os.path
import re
import shutil
class UserProfile(models.Model):
user = models.ForeignKey(User, unique = True)
pic = models.ImageField(upload_to = 'profiles')
best_answers = models.IntegerField(default = 0)
answers = models.IntegerField(default = 0)
points = models.IntegerField(default = 100)
def save(self):
oldname = self.pic
files_ = str(self.pic).split('.')
ext = files_[len(files_) - 1]
self.pic = '%s.%s' % (self.user.username, ext)
super(UserProfile, self).save()
dirs = settings.MEDIA_ROOT
oldpath = os.path.join(dirs, oldname).replace('\\','/')
newpath = os.path.join(dirs, self.pic).replace('\\','/')
shutil.move(oldpath, newpath)
class Admin:
pass
class Category(models.Model):
name = models.CharField(max_length = 50, unique = True)
slug = models.SlugField(unique = True)
def save(self):
self.slug = slugify(self.name)
super(Category, self).save()
def get_absolute_url(self):
return '/cat/%s/' % self.slug
def __str__(self):
return self.name
class Admin:
pass
class Question(models.Model):
user = models.ForeignKey(User)
category = models.ForeignKey(Category)
title = models.CharField(max_length = 300)
description = models.TextField()
is_open = models.BooleanField(default = True)
created_on = models.DateTimeField(auto_now_add = 1)
@models.permalink
def get_absolute_url(self):
return ('answrs.views.answer', [self.id])
def __str__(self):
return self.title
class Admin:
pass
class Answer(models.Model):
user = models.ForeignKey(User)
question = models.ForeignKey(Question)
created_on = models.DateTimeField(auto_now_add = 1)
text = models.TextField()
is_best = models.BooleanField(default = True)
points = models.BooleanField(default = 1)
def __str__(self):
return self.text
class Admin:
pass
def slugify(string):
string = re.sub('\s+', '_', string)
string = re.sub('[^\w.-]', '', string)
return string.strip('_.- ').lower()
| python |
#!/usr/bin/env python
import json
import re
import requests
import sys
FOLDER = 'debug' #'analyses'
GENS = ['sm' ] #['rb', 'gs', 'rs', 'dp', 'bw', 'xy', 'sm']
def dexUrl(gen):
return 'https://www.smogon.com/dex/' + gen + '/pokemon'
def setUrl(gen, poke):
return dexUrl(gen) + '/' + poke
for gen in GENS:
dex = json.loads(re.search('dexSettings = ({.*})', requests.get(dexUrl(gen)).text).group(1))
pokemon = {}
for poke in dex['injectRpcs'][1][1]["pokemon"]:
if not poke["cap"]:
text = requests.get(setUrl(gen, poke['name'])).text
match = re.search('dexSettings = ({.*})', text)
if match:
mon = json.loads(match.group(1))
pokemon[poke['name']] = mon['injectRpcs'][2][1]['strategies']
else:
print >> sys.stderr, poke['name']
print >> sys.stderr, text
with open(FOLDER + '/' + gen + '.json', 'w') as out:
json.dump(pokemon, out, indent=2)
| python |
#coding=utf-8
from sklearn.datasets import load_svmlight_file
from sklearn.datasets import dump_svmlight_file
from sklearn.cluster import AgglomerativeClustering
from sklearn.externals import joblib
hac_model = joblib.load('hac_result.pkl')
tfidf_matrix, y_train = load_svmlight_file("./d_train.txt")
dump_svmlight_file(tfidf_matrix,hac_model.labels_,'hac_train_rst.txt',zero_based=True,multilabel=False)
| python |
from glue_utils import InputExample
import sys
import torch
# convert .pth file to .txt file (use for generating adversarial examples in text format)
def create_file(mode):
examples = torch.load(f'{sys.argv[1]}_adv/{mode}-examples.pth')
with open(f'{sys.argv[1]}_adv/{mode}.txt', 'w') as f:
for example in examples:
words = example.text_a.split(' ')
line = []
labels = example.label
for word, label in zip(words, labels):
term = label
if label != 'O':
term = 'T' + label[-4:]
line.append(f'{word}={term}')
line = example.text_a + '####' + ' '.join(line) + '\n'
f.write(line)
if __name__ == "__main__":
for mode in ['train', 'dev', 'test']:
create_file(mode)
| python |
"""Instrument sqlite3 to report SQLite queries.
``patch_all`` will automatically patch your sqlite3 connection to make it work.
::
from ddtrace import Pin, patch
import sqlite3
# If not patched yet, you can patch sqlite3 specifically
patch(sqlite3=True)
# This will report a span with the default settings
db = sqlite3.connect(":memory:")
cursor = db.cursor()
cursor.execute("select * from users where id = 1")
# Use a pin to specify metadata related to this connection
Pin.override(db, service='sqlite-users')
"""
from .connection import connection_factory
from .patch import patch
__all__ = ['connection_factory', 'patch']
| python |
__package__ = "PyUtil_Lib"
__author__ = "Phong Lam"
| python |
# Copyright 2020-2021 Exactpro (Exactpro Systems Limited)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import time
from abc import ABC, abstractmethod
from threading import Lock
from google.protobuf.message import DecodeError
from prometheus_client import Histogram, Counter
from th2_common.schema.message.configuration.message_configuration import QueueConfiguration
from th2_common.schema.message.impl.rabbitmq.configuration.subscribe_target import SubscribeTarget
from th2_common.schema.message.impl.rabbitmq.connection.connection_manager import ConnectionManager
from th2_common.schema.message.impl.rabbitmq.connection.reconnecting_consumer import ReconnectingConsumer
from th2_common.schema.message.message_listener import MessageListener
from th2_common.schema.message.message_subscriber import MessageSubscriber
from th2_common.schema.metrics.common_metrics import HealthMetrics
logger = logging.getLogger(__name__)
class AbstractRabbitSubscriber(MessageSubscriber, ABC):
def __init__(self, connection_manager: ConnectionManager, queue_configuration: QueueConfiguration,
subscribe_target: SubscribeTarget) -> None:
self.__subscribe_target = subscribe_target
self.__attributes = tuple(set(queue_configuration.attributes))
self.listeners = set()
self.__lock_listeners = Lock()
self.__consumer: ReconnectingConsumer = connection_manager.consumer
self.__consumer_tag = None
self.__closed = True
self.__metrics = HealthMetrics(self)
def start(self):
if self.__subscribe_target is None:
raise Exception('Subscriber did not init')
if self.__consumer_tag is None:
queue = self.__subscribe_target.get_queue()
self.__consumer_tag = self.__consumer.add_subscriber(queue=queue,
on_message_callback=self.handle)
self.__closed = False
self.__metrics.enable()
def handle(self, channel, method, properties, body):
process_timer = self.get_processing_timer()
start_time = time.time()
try:
values = self.value_from_bytes(body)
for value in values:
if value is None:
raise ValueError('Received value is null')
labels = self.extract_labels(value)
if labels is None:
raise ValueError('Labels list extracted from received value is null')
if labels:
counter = self.get_delivery_counter()
counter.labels(*labels).inc()
content_counter = self.get_content_counter()
content_counter.labels(*labels).inc(self.extract_count_from(value))
else:
counter = self.get_delivery_counter()
counter.inc()
content_counter = self.get_content_counter()
content_counter.inc(self.extract_count_from(value))
if logger.isEnabledFor(logging.TRACE):
logger.trace(f'Received message: {self.to_trace_string(value)}')
elif logger.isEnabledFor(logging.DEBUG):
logger.debug(f'Received message: {self.to_debug_string(value)}')
if not self.filter(value):
return
self.handle_with_listener(value, channel, method)
except DecodeError as e:
logger.exception(
f'Can not parse value from delivery for: {method.consumer_tag} due to DecodeError: {e}\n'
f' body: {body}\n'
f' self: {self}\n')
return
except Exception as e:
logger.error(f'Can not parse value from delivery for: {method.consumer_tag}', e)
return
finally:
process_timer.observe(time.time() - start_time)
cb = functools.partial(self.ack_message, channel, method.delivery_tag)
self.__consumer.add_callback_threadsafe(cb)
def ack_message(self, channel, delivery_tag):
if channel.is_open:
channel.basic_ack(delivery_tag)
else:
logger.error('Message acknowledgment failed due to the channel being closed')
def handle_with_listener(self, value, channel, method):
with self.__lock_listeners:
for listener in self.listeners:
try:
listener.handler(self.__attributes, value)
except Exception as e:
logger.warning(f"Message listener from class '{type(listener)}' threw exception {e}")
def add_listener(self, message_listener: MessageListener):
if message_listener is None:
return
with self.__lock_listeners:
self.listeners.add(message_listener)
def is_close(self) -> bool:
return self.__closed
def close(self):
with self.__lock_listeners:
for listener in self.listeners:
listener.on_close()
self.listeners.clear()
self.__consumer.remove_subscriber(self.__consumer_tag)
self.__closed = True
self.__metrics.disable()
@staticmethod
@abstractmethod
def value_from_bytes(body):
pass
@abstractmethod
def filter(self, value) -> bool:
pass
@abstractmethod
def get_delivery_counter(self) -> Counter:
pass
@abstractmethod
def get_content_counter(self) -> Counter:
pass
@abstractmethod
def get_processing_timer(self) -> Histogram:
pass
@abstractmethod
def extract_count_from(self, batch):
pass
@abstractmethod
def extract_labels(self, batch):
pass
@abstractmethod
def to_trace_string(self, value):
pass
@abstractmethod
def to_debug_string(self, value):
pass
| python |
# pylint: disable=unused-argument
"""Testing Module nlp.pdflib_dcr."""
import os
import cfg.glob
import pytest
import dcr
# -----------------------------------------------------------------------------
# Constants & Globals.
# -----------------------------------------------------------------------------
# pylint: disable=W0212
# @pytest.mark.issue
# -----------------------------------------------------------------------------
# Test RUN_ACTION_TEXT_FROM_PDF - normal - keep.
# -----------------------------------------------------------------------------
def test_run_action_extract_text_from_pdf_normal_keep(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_TEXT_FROM_PDF - normal - keep."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("pdf_text_ok_protected", "pdf"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"),
(cfg.glob.setup._DCR_CFG_TETML_LINE, "true"),
(cfg.glob.setup._DCR_CFG_TETML_WORD, "true"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_extract_text_from_pdf_normal_keep <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
[
"pdf_text_ok_protected_1.pdf",
"pdf_text_ok_protected_1.line.xml",
"pdf_text_ok_protected_1.word.xml",
],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_TEXT_FROM_PDF - normal - keep - only page.
# -----------------------------------------------------------------------------
def test_run_action_extract_text_from_pdf_normal_keep_only_page(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_TEXT_FROM_PDF - normal - keep - only page."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("pdf_text_ok_protected", "pdf"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"),
(cfg.glob.setup._DCR_CFG_TETML_LINE, "false"),
(cfg.glob.setup._DCR_CFG_TETML_PAGE, "true"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_extract_text_from_pdf_normal_keep_only_page <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
[
"pdf_text_ok_protected_1.pdf",
],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_TEXT_FROM_PDF - rej_file_open - line.
# -----------------------------------------------------------------------------
def test_run_action_extract_text_from_pdf_rej_file_open_line(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_TEXT_FROM_PDF - rej_file_open - line."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib", "pdf"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"),
(cfg.glob.setup._DCR_CFG_TETML_LINE, "true"),
(cfg.glob.setup._DCR_CFG_TETML_PAGE, "false"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PDF_2_IMAGE])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_IMAGE_2_PDF])
os.remove(
os.path.join(
cfg.glob.setup.directory_inbox_accepted,
"case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib_1_1.pdf",
)
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_extract_text_from_pdf_rej_file_open_line <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
[
"case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib_1.pdf",
"case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib_1_1.jpeg",
],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_TEXT_FROM_PDF - rej_file_open - page.
# -----------------------------------------------------------------------------
def test_run_action_extract_text_from_pdf_rej_file_open_page(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_TEXT_FROM_PDF - rej_file_open - page."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib", "pdf"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"),
(cfg.glob.setup._DCR_CFG_TETML_LINE, "false"),
(cfg.glob.setup._DCR_CFG_TETML_PAGE, "true"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PDF_2_IMAGE])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_IMAGE_2_PDF])
os.remove(
os.path.join(
cfg.glob.setup.directory_inbox_accepted,
"case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib_1_1.pdf",
)
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_extract_text_from_pdf_rej_file_open_page <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
[
"case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib_1.pdf",
"case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib_1_1.jpeg",
],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
| python |
import json
import os
import random
from bonsai_common import SimulatorSession, Schema
import dotenv
from microsoft_bonsai_api.simulator.client import BonsaiClientConfig
from microsoft_bonsai_api.simulator.generated.models import SimulatorInterface
from sim import extrusion_model as em
from sim import units
# time step (seconds) between state updates
Δt = 1
class ExtruderSimulation(SimulatorSession):
def reset(
self,
ω0_s: float = 1e-6,
Δω0_s: float = 0,
f0_c: float = 1e-6,
Δf0_c: float = 0,
T: float = units.celsius_to_kelvin(190),
L0: float = 1 * 12 * units.METERS_PER_INCH,
ε: float = 0.1 * units.METERS_PER_INCH,
):
"""
Extruder model for simulation.
Parameters
----------
ω0_s : float, optional
Initial screw angular speed (radians / second).
Δω0_s : float, optional
Initial change in screw angular speed (radians / second^2).
f0_c : float, optional
Initial cutter frequency (hertz).
Δf0_c : float, optional
Initial change in cutter frequency (1 / second^2).
T : float, optional
Initial temperature (Kelvin).
L0 : float, optional
Initial product length (meters).
ε : float, optional
Product tolerance (meters).
"""
# angular speed of the extruder screw (radians / second)
self.ω_s = ω0_s
# change in angular speed of the extruder screw (radians / second^2)
self.Δω_s = Δω0_s
self.Δω_eff = self.Δω_s
# frequency of the cutter (hertz)
self.f_c = f0_c
# change in cutter frequency (1 / second^2)
self.Δf_c = Δf0_c
self.Δf_eff = self.Δf_c
# temperature (Kelvin)
self.T = T
self.L0 = L0
self.ε = ε
model = em.ExtrusionModel(
ω=self.ω_s, Δω=self.Δω_s, f_c=self.f_c, T=self.T, Δt=Δt
)
self.T += model.ΔT
# material flow rate (meters^3 / second)
self.Q = model.Q_op
# product length (meters)
self.L = model.L
# manufacturing yield, defined as the number of good parts
# per iteration (dimensionless)
self.yield_ = model.yield_
def episode_start(self, config: Schema) -> None:
self.reset(
ω0_s=config.get("initial_screw_angular_speed"),
Δω0_s=config.get("initial_screw_angular_acceleration"),
f0_c=config.get("initial_cutter_frequency"),
Δf0_c=config.get("initial_cutter_acceleration"),
T=config.get("initial_temperature"),
)
def step(self):
# add a small amount of random noise to the actions to avoid
# the trivial solution of simply applying zero acceleration
# on each iteration
σ_max = 0.0001
σ_s = random.uniform(-σ_max, σ_max)
σ_c = random.uniform(-σ_max, σ_max)
self.Δω_eff = self.Δω_s * (1 + σ_s)
self.ω_s += Δt * self.Δω_eff
self.Δf_eff = self.Δf_c * (1 + σ_c)
self.f_c += Δt * self.Δf_eff
model = em.ExtrusionModel(
ω=self.ω_s, Δω=self.Δω_eff, f_c=self.f_c, T=self.T, Δt=Δt
)
self.T += model.ΔT
# material flow rate (meters^3 / second)
self.Q = model.Q_op
# product length (meters)
self.L = model.L
# manufacturing yield, defined as the number of good parts
# per iteration (dimensionless)
self.yield_ = model.yield_
def episode_step(self, action: Schema) -> None:
self.Δω_s = action.get("screw_angular_acceleration")
self.Δf_c = action.get("cutter_acceleration")
self.step()
def get_state(self):
return {
"screw_angular_speed": self.ω_s,
"screw_angular_acceleration": self.Δω_eff,
"cutter_frequency": self.f_c,
"cutter_acceleration": self.Δf_eff,
"temperature": self.T,
"product_length": self.L,
"flow_rate": self.Q,
"yield": self.yield_,
}
def halted(self) -> bool:
return False
def get_interface(self) -> SimulatorInterface:
"""Register sim interface."""
with open("interface.json", "r") as infile:
interface = json.load(infile)
return SimulatorInterface(
name=interface["name"],
timeout=interface["timeout"],
simulator_context=self.get_simulator_context(),
description=interface["description"],
)
def main():
workspace = os.getenv("SIM_WORKSPACE")
access_key = os.getenv("SIM_ACCESS_KEY")
# values in `.env`, if they exist, take priority over environment variables
dotenv.load_dotenv(".env", override=True)
if workspace is None:
raise ValueError("The Bonsai workspace ID is not set.")
if access_key is None:
raise ValueError("The access key for the Bonsai workspace is not set.")
config = BonsaiClientConfig(workspace=workspace, access_key=access_key)
extruder_sim = ExtruderSimulation(config)
extruder_sim.reset()
while extruder_sim.run():
continue
if __name__ == "__main__":
main()
| python |
# flake8: noqa pylint: skip-file
"""Tests for the TelldusLive config flow."""
import asyncio
from unittest.mock import Mock, patch
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.tellduslive import (
APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL,
config_flow)
from homeassistant.const import CONF_HOST
from tests.common import MockConfigEntry, MockDependency, mock_coro
def init_config_flow(hass, side_effect=None):
"""Init a configuration flow."""
flow = config_flow.FlowHandler()
flow.hass = hass
if side_effect:
flow._get_auth_url = Mock(side_effect=side_effect)
return flow
@pytest.fixture
def supports_local_api():
"""Set TelldusLive supports_local_api."""
return True
@pytest.fixture
def authorize():
"""Set TelldusLive authorize."""
return True
@pytest.fixture
def mock_tellduslive(supports_local_api, authorize):
"""Mock tellduslive."""
with MockDependency('tellduslive') as mock_tellduslive_:
mock_tellduslive_.supports_local_api.return_value = supports_local_api
mock_tellduslive_.Session().authorize.return_value = authorize
mock_tellduslive_.Session().access_token = 'token'
mock_tellduslive_.Session().access_token_secret = 'token_secret'
mock_tellduslive_.Session().authorize_url = 'https://example.com'
yield mock_tellduslive_
async def test_abort_if_already_setup(hass):
"""Test we abort if TelldusLive is already setup."""
flow = init_config_flow(hass)
with patch.object(hass.config_entries, 'async_entries', return_value=[{}]):
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'already_setup'
with patch.object(hass.config_entries, 'async_entries', return_value=[{}]):
result = await flow.async_step_import(None)
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'already_setup'
async def test_full_flow_implementation(hass, mock_tellduslive):
"""Test registering an implementation and finishing flow works."""
flow = init_config_flow(hass)
result = await flow.async_step_discovery(['localhost', 'tellstick'])
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'user'
assert len(flow._hosts) == 2
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'user'
result = await flow.async_step_user({'host': 'localhost'})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'auth'
assert result['description_placeholders'] == {
'auth_url': 'https://example.com',
'app_name': APPLICATION_NAME,
}
result = await flow.async_step_auth('')
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['title'] == 'localhost'
assert result['data']['host'] == 'localhost'
assert result['data']['scan_interval'] == 60
assert result['data']['session'] == {'token': 'token', 'host': 'localhost'}
async def test_step_import(hass, mock_tellduslive):
"""Test that we trigger auth when configuring from import."""
flow = init_config_flow(hass)
result = await flow.async_step_import({
CONF_HOST: DOMAIN,
KEY_SCAN_INTERVAL: 0,
})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'auth'
async def test_step_import_add_host(hass, mock_tellduslive):
"""Test that we add host and trigger user when configuring from import."""
flow = init_config_flow(hass)
result = await flow.async_step_import({
CONF_HOST: 'localhost',
KEY_SCAN_INTERVAL: 0,
})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'user'
async def test_step_import_no_config_file(hass, mock_tellduslive):
"""Test that we trigger user with no config_file configuring from import."""
flow = init_config_flow(hass)
result = await flow.async_step_import({ CONF_HOST: 'localhost', KEY_SCAN_INTERVAL: 0, })
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'user'
async def test_step_import_load_json_matching_host(hass, mock_tellduslive):
"""Test that we add host and trigger user when configuring from import."""
flow = init_config_flow(hass)
with patch('homeassistant.components.tellduslive.config_flow.load_json',
return_value={'tellduslive': {}}), \
patch('os.path.isfile'):
result = await flow.async_step_import({ CONF_HOST: 'Cloud API', KEY_SCAN_INTERVAL: 0, })
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'user'
async def test_step_import_load_json(hass, mock_tellduslive):
"""Test that we create entry when configuring from import."""
flow = init_config_flow(hass)
with patch('homeassistant.components.tellduslive.config_flow.load_json',
return_value={'localhost': {}}), \
patch('os.path.isfile'):
result = await flow.async_step_import({ CONF_HOST: 'localhost', KEY_SCAN_INTERVAL: SCAN_INTERVAL, })
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['title'] == 'localhost'
assert result['data']['host'] == 'localhost'
assert result['data']['scan_interval'] == 60
assert result['data']['session'] == {}
@pytest.mark.parametrize('supports_local_api', [False])
async def test_step_disco_no_local_api(hass, mock_tellduslive):
"""Test that we trigger when configuring from discovery, not supporting local api."""
flow = init_config_flow(hass)
result = await flow.async_step_discovery(['localhost', 'tellstick'])
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'auth'
assert len(flow._hosts) == 1
async def test_step_auth(hass, mock_tellduslive):
"""Test that create cloud entity from auth."""
flow = init_config_flow(hass)
await flow.async_step_auth()
result = await flow.async_step_auth(['localhost', 'tellstick'])
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['title'] == 'Cloud API'
assert result['data']['host'] == 'Cloud API'
assert result['data']['scan_interval'] == 60
assert result['data']['session'] == {
'token': 'token',
'token_secret': 'token_secret',
}
@pytest.mark.parametrize('authorize', [False])
async def test_wrong_auth_flow_implementation(hass, mock_tellduslive):
"""Test wrong auth."""
flow = init_config_flow(hass)
await flow.async_step_auth()
result = await flow.async_step_auth('')
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'auth'
assert result['errors']['base'] == 'auth_error'
async def test_not_pick_host_if_only_one(hass, mock_tellduslive):
"""Test not picking host if we have just one."""
flow = init_config_flow(hass)
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'auth'
async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive):
"""Test abort if generating authorize url timeout."""
flow = init_config_flow(hass, side_effect=asyncio.TimeoutError)
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'authorize_url_timeout'
async def test_abort_no_auth_url(hass, mock_tellduslive):
"""Test abort if generating authorize url returns none."""
flow = init_config_flow(hass)
flow._get_auth_url = Mock(return_value=False)
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'authorize_url_fail'
async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive):
"""Test we abort if generating authorize url blows up."""
flow = init_config_flow(hass, side_effect=ValueError)
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'authorize_url_fail'
async def test_discovery_already_configured(hass, mock_tellduslive):
"""Test abort if alredy configured fires from discovery."""
MockConfigEntry(
domain='tellduslive',
data={'host': 'some-host'}
).add_to_hass(hass)
flow = init_config_flow(hass)
result = await flow.async_step_discovery(['some-host', ''])
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'already_setup'
| python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.common.utils import data_utils
from openstackclient.tests.functional.identity.v3 import common
class IdentityProviderTests(common.IdentityTests):
# Introduce functional test case for command 'Identity Provider'
def test_idp_create(self):
self._create_dummy_idp()
def test_idp_delete(self):
identity_provider = self._create_dummy_idp(add_clean_up=False)
raw_output = self.openstack('identity provider delete %s'
% identity_provider)
self.assertEqual(0, len(raw_output))
def test_idp_multi_delete(self):
idp_1 = self._create_dummy_idp(add_clean_up=False)
idp_2 = self._create_dummy_idp(add_clean_up=False)
raw_output = self.openstack(
'identity provider delete %s %s' % (idp_1, idp_2))
self.assertEqual(0, len(raw_output))
def test_idp_show(self):
identity_provider = self._create_dummy_idp(add_clean_up=True)
raw_output = self.openstack('identity provider show %s'
% identity_provider)
items = self.parse_show(raw_output)
self.assert_show_fields(items, self.IDENTITY_PROVIDER_FIELDS)
def test_idp_list(self):
self._create_dummy_idp(add_clean_up=True)
raw_output = self.openstack('identity provider list')
items = self.parse_listing(raw_output)
self.assert_table_structure(items, self.IDENTITY_PROVIDER_LIST_HEADERS)
def test_idp_set(self):
identity_provider = self._create_dummy_idp(add_clean_up=True)
new_remoteid = data_utils.rand_name('newRemoteId')
raw_output = self.openstack('identity provider set '
'%(identity-provider)s '
'--remote-id %(remote-id)s '
% {'identity-provider': identity_provider,
'remote-id': new_remoteid})
self.assertEqual(0, len(raw_output))
raw_output = self.openstack('identity provider show %s'
% identity_provider)
updated_value = self.parse_show_as_object(raw_output)
self.assertIn(new_remoteid, updated_value['remote_ids'])
| python |
import unittest
from translator import english_to_french, french_to_english
class TestE2F(unittest.TestCase):
def test1(self):
self.assertEqual(english_to_french(""), "API Exception") # test null
self.assertEqual(english_to_french("Hello"), "Bonjour") # test positive
self.assertNotEqual(english_to_french("Hello"), "Hello") # test negative
class TestF2E(unittest.TestCase):
def test1(self):
self.assertEqual(french_to_english(""), "API Exception") # test null
self.assertEqual(french_to_english("Bonjour"), "Hello") # test positive
self.assertNotEqual(french_to_english("Bonjour"), "Bonjour") # test negative
unittest.main() | python |
import errno
import logging
import os
from typing import TYPE_CHECKING, Optional
from .errors import ObjectFormatError
if TYPE_CHECKING:
from dvc.fs.base import FileSystem
from dvc.hash_info import HashInfo
from dvc.types import AnyPath
from .db.base import ObjectDB
logger = logging.getLogger(__name__)
class HashFile:
def __init__(
self,
fs_path: Optional["AnyPath"],
fs: Optional["FileSystem"],
hash_info: "HashInfo",
name: Optional[str] = None,
):
self.fs_path = fs_path
self.fs = fs
self.hash_info = hash_info
self.name = name
def __len__(self):
return 1
def __str__(self):
return f"object {self.hash_info}"
def __bool__(self):
return bool(self.hash_info)
def __eq__(self, other):
if not isinstance(other, HashFile):
return False
return (
self.fs_path == other.fs_path
and self.fs == other.fs
and self.hash_info == other.hash_info
)
def __hash__(self):
return hash(
(
self.hash_info,
self.fs_path,
self.fs.scheme if self.fs else None,
)
)
def check(self, odb: "ObjectDB", check_hash: bool = True):
if not check_hash:
assert self.fs
if not self.fs.exists(self.fs_path):
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), self.fs_path
)
else:
return None
self._check_hash(odb)
def _check_hash(self, odb):
from .stage import get_file_hash
_, actual = get_file_hash(
self.fs_path, self.fs, self.hash_info.name, odb.state
)
logger.trace(
"cache '%s' expected '%s' actual '%s'",
self.fs_path,
self.hash_info,
actual,
)
assert actual.name == self.hash_info.name
if actual.value.split(".")[0] != self.hash_info.value.split(".")[0]:
raise ObjectFormatError(f"{self} is corrupted")
| python |
"""Generators - Small
=====================
Some small graphs
"""
import pytest
from networkx.generators.tests.test_small import TestGeneratorsSmall
from graphscope.framework.errors import UnimplementedError
from graphscope.nx.utils.compat import with_graphscope_nx_context
@pytest.mark.usefixtures("graphscope_session")
@with_graphscope_nx_context(TestGeneratorsSmall)
class TestGeneratorsSmall:
def test_properties_named_small_graphs(self):
pass
| python |
import random
def bogoSort(a):
while (is_sorted(a)== False):
shuffle(a)
def is_sorted(a):
n = len(a)
for i in range(0, n-1):
if (a[i] > a[i+1] ):
return False
return True
def shuffle(a):
n = len(a)
for i in range (0,n):
r = random.randint(0,n-1)
a[i], a[r] = a[r], a[i]
lst = list(map(int,input('Enter a number list to be sorted: ').split()))
bogoSort(lst)
print(lst)
| python |
from alembic import op
import sqlalchemy as sa
"""add ride resync date
Revision ID: 21518d40552c
Revises: d4be89cbab08
Create Date: 2020-02-01 08:53:33.632416
"""
# revision identifiers, used by Alembic.
revision = '21518d40552c'
down_revision = 'd4be89cbab08'
def upgrade():
op.add_column('rides', sa.Column('resync_date', sa.DateTime, nullable=True))
# we do not know which rides have partial efforts fetched so schedule them all for resync over the next few days
op.execute('update rides set efforts_fetched = false, resync_count = 1, resync_date = now() + interval floor(rand() * 72) hour')
pass
def downgrade():
op.drop_column('rides', 'resync_date')
pass
| python |
from flask import Flask, render_template, request
from wtforms import Form, DecimalField, validators
app = Flask(__name__)
class EntryForm(Form):
x_entry = DecimalField('x:',
places=10,
validators=[validators.NumberRange(-1e10, 1e10)])
y_entry = DecimalField('y:',
places=10,
validators=[validators.NumberRange(-1e10, 1e10)])
@app.route('/')
def index():
form = EntryForm(request.form)
return render_template('entry.html', form=form, z='')
@app.route('/results', methods=['POST'])
def results():
form = EntryForm(request.form)
z = ''
if request.method == 'POST' and form.validate():
x = request.form['x_entry']
y = request.form['y_entry']
z = float(x) + float(y)
return render_template('entry.html', form=form, z=z)
if __name__ == '__main__':
app.run(debug=True) | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.