seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
43965087140
|
#Question 1 Concurrent
import concurrent.futures
import time
start=time.perf_counter()
#define functions
def add(x, y):
print("This worked")
return x + y
print("This worked")
def subtract(x, y):
return x-y
def multiply(x, y):
return x * y
def divide(x, y):
if y==0:
raise ValueError('Cannot divide by zero!')
return x / y
def modulus(x, y):
return x % y
if __name__=='__main__':
with concurrent.futures.ProcessPoolExecutor() as exec:
proc1=exec.submit(add,10,5)
proc2=exec.submit(subtract,10,5)
proc3=exec.submit(multiply,10,5)
proc4=exec.submit(divide,10,5)
proc5=exec.submit(modulus,10,5)
print(proc1.result())
print(proc2.result())
print(proc3.result())
print(proc4.result())
print(proc5.result())
finish=time.perf_counter()
print("Finished in", round(finish-start,2), "second(s)")
|
mpadill6/UCSD-Intro-to-Programming
|
Week 4/Question 1/Hw4_Concurrent.py
|
Hw4_Concurrent.py
|
py
| 974 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73826380346
|
# This Python file has the entire code without particular libraries.
# Made by: Feltrim
# Instagram: instagram.com/vfeltrim_
# Libraries
from math import factorial, sqrt
from time import sleep
# Interface functions:
def line(size=42):
"""_summary_
Args:
size (int, optional): Defaults to 42.
Returns:
_type_: _Return '-'_
"""
return '-' * size
def header(txt):
"""_summary_
Args:
txt (_type_): _txt = text provided to create headers_
"""
print(line())
print(txt.center(42))
print(line())
def read_int(msg):
"""_summary_
Args:
msg (_type_): _msg = integer number_
Returns:
_type_: _if n == int, return n_
"""
while True:
try:
n = int(input(msg))
except (ValueError, TypeError):
print('\033[31mERROR: please, digit an valid argument.\033[m')
continue
except KeyboardInterrupt:
print('\n\033[31mUser chose not to enter this number.\033[m')
return 0
else:
return n
def menu(options):
"""_summary_
Args:
options (_type_): options = text provided to create a menu with options_
Returns:
_type_: _a menu with all the options provided during coding_
"""
header('Select the operation')
c = 1
for item in options:
print(f'\033[34m{c}\033[m - \033[33m{item}\033[m')
c += 1
print(line())
opt = read_int('\033[32mYour option\n>>>>> \033[m')
return opt
# Operations functions:
def add(a=0, b=0):
"""_summary_
Args:
a (int, optional): _integer number provided by the user_. Defaults to 0.
b (int, optional): _another integer number provided by the user_. Defaults to 0.
Returns:
_type_: _calculation result_
"""
res = a + b
return header(f'>>>>> \033[1;33m{a} + {b}\033[m = \033[1;34m{res}\033[m')
def sub(a=0, b=0):
"""_summary_
Args:
a (int, optional): _integer number provided by the user_. Defaults to 0.
b (int, optional): _another integer number provided by the user_. Defaults to 0.
Returns:
_type_: _calculation result_
"""
res = a - b
return header(f'>>>>> \033[1;33m{a} - {b}\033[m = \033[1;34m{res}\033[m')
def mult(a=0, b=0):
"""_summary_
Args:
a (int, optional): _integer number provided by the user_. Defaults to 0.
b (int, optional): _another integer number provided by the user_. Defaults to 0.
Returns:
_type_: _calculation result_
"""
res = a * b
return header(f'>>>>> \033[1;33m{a} x {b}\033[m = \033[1;34m{res}\033[m')
def div(a=0, b=0):
"""_summary_
Args:
a (int, optional): _integer number provided by the user_. Defaults to 0.
b (int, optional): _another integer number provided by the user_. Defaults to 0.
Returns:
_type_: _calculation result_
"""
try:
res = a / b
if res.is_integer():
return header(f'>>>>> \033[1;33m{a} ÷ {b}\033[m = \033[1;34m{res:.0f}\033[m')
else:
return header(f'>>>>> \033[1;33m{a} ÷ {b}\033[m = \033[1;34m{res}\033[m')
except ZeroDivisionError:
while b == 0:
print('\033[31mERROR: A number can not be divided by 0 (zero).\033[m')
b = read_int(f'>>>>> {a} ÷ ')
res = a / b
if res.is_integer():
return header(f'>>>>> \033[1;33m{a} ÷ {b}\033[m = \033[1;34m{res:.0f}\033[m')
else:
return header(f'>>>>> \033[1;33m{a} ÷ {b}\033[m = \033[1;34m{res}\033[m')
def exp(a=0, b=0):
"""_summary_
Args:
a (int, optional): _integer number provided by the user_. Defaults to 0.
b (int, optional): _another integer number provided by the user_. Defaults to 0.
Returns:
_type_: _calculation result_
"""
res = a ** b
return header(f'>>>>> \033[1;33m{a}^{b}\033[m = \033[1;34m{res}\033[m')
def square_root(a=0):
"""_summary_
Args:
a (int, optional): _integer number provided by the user_. Defaults to 0.
Returns:
_type_: _calculation result_
"""
res = sqrt(a)
if res.is_integer():
return header(f'>>>>> \033[1;33m√{a}\033[m = \033[1;34m{res:.0f}\033[m')
else:
return header(f'>>>>> \033[1;33m√{a}\033[m = \033[1;34m{res}\033[m')
def fac(a=0):
"""_summary_
Args:
a (int, optional): _integer number provided by the user_. Defaults to 0.
Returns:
_type_: _calculation result_
"""
res = factorial(a)
return header(f'>>>>> \033[1;33m{a}!\033[m = \033[1;34m{res}\033[m')
# Main code, where we put all the functions together:
header('Python Calculator')
while True:
n1 = read_int('Enter an integer\n>>>>> ')
opt1 = menu(['Addition', 'Subtraction', 'Multiplication',
'Division', 'More Options', 'Close the Program'])
if opt1 == 1:
n2 = read_int(f'>>>>> {n1} + ')
add(n1, n2)
elif opt1 == 2:
n2 = read_int(f'>>>>> {n1} - ')
sub(n1, n2)
elif opt1 == 3:
n2 = read_int(f'>>>>> {n1} x ')
mult(n1, n2)
elif opt1 == 4:
n2 = read_int(f'>>>>> {n1} ÷ ')
div(n1, n2)
elif opt1 == 5:
opt2 = menu(['Exponentiation', 'Square Root',
'Factorial', 'Close the Program'])
if opt2 == 1:
n2 = read_int(f'>>>>> {n1}^')
exp(n1, n2)
elif opt2 == 2:
square_root(n1)
elif opt2 == 3:
fac(n1)
elif opt2 == 4:
header('Finishing....')
sleep(1.5)
header('''Thanks for using my program!!
Developed by:\033[1;96m Feltrim\033[m''')
sleep(5)
break
else:
print('\033[31mPlease, select a valid option.\033[m')
elif opt1 == 6:
header('Finishing....')
sleep(1.5)
header('''Thanks for using my program!!
Developed by:\033[1;96m Feltrim\033[m''')
sleep(5)
break
else:
print('\033[31mPlease, select a valid option.\033[m')
|
Feltrim/Calculator
|
Codes/main_code.py
|
main_code.py
|
py
| 6,153 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74773385148
|
import numpy as np
import os
import torch
from typing import List, Tuple
from tqdm import tqdm
from datetime import datetime, timedelta
import pickle
import matplotlib.pyplot as plt
# -------------------- Colorize ------------------------------------------
"""A set of common utilities used within the environments. These are
not intended as API functions, and will not remain stable over time.
"""
import numpy as np
import matplotlib.colors as colors
color2num = dict(gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38)
def colorize(string, color, bold=False, highlight=False):
"""Return string surrounded by appropriate terminal color codes to
print colorized text. Valid colors: gray, red, green, yellow,
blue, magenta, cyan, white, crimson
"""
# Import six here so that `utils` has no import-time dependencies.
# We want this since we use `utils` during our import-time sanity checks
# that verify that our dependencies (including six) are actually present.
import six
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(six.u(str(num)))
if bold:
attr.append(six.u('1'))
attrs = six.u(';').join(attr)
return six.u('\x1b[%sm%s\x1b[0m') % (attrs, string)
def calc_iou(times_gt, time):
a_s, a_e = times_gt
b_s, b_e = time
if b_s > a_e or a_s > b_e:
return 0
else:
o_s = max(a_s,b_s)
o_e = min(a_e,b_e)
intersection = o_e - o_s
u_s = min(a_s,b_s)
u_e = max(a_e,b_e)
union = u_e - u_s
return intersection/float(union)
def green(s):
return colorize(s, 'green', bold=True)
def blue(s):
return colorize(s, 'blue', bold=True)
def red(s):
return colorize(s, 'red', bold=True)
def magenta(s):
return colorize(s, 'magenta', bold=True)
def colorize_mat(mat, hsv):
"""
Colorizes the values in a 2D matrix MAT
to the color as defined by the color HSV.
The values in the matrix modulate the 'V' (or value) channel.
H,S (hue and saturation) are held fixed.
HSV values are assumed to be in range [0,1].
Returns an uint8 'RGB' image.
"""
mat = mat.astype(np.float32)
m, M = np.min(mat), np.max(mat)
v = (mat - m) / (M - m)
h, s = hsv[0] * np.ones_like(v), hsv[1] * np.ones_like(v)
hsv = np.dstack([h, s, v])
rgb = (255 * colors.hsv_to_rgb(hsv)).astype(np.uint8)
return rgb
# -------------------- / Colorize ------------------------------------------
def gpu_initializer(gpu_id):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
global device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device: ', device)
return device
|
hannahbull/slrtp2022_t3
|
utils.py
|
utils.py
|
py
| 2,912 |
python
|
en
|
code
| 3 |
github-code
|
6
|
32766695147
|
from django.shortcuts import render, reverse, redirect
from django.views.generic import View
from django.views.generic.edit import CreateView
import requests
import re
count = 6
# Create your views here.
def home(request):
template_name = 'home.html'
return render(request, template_name=template_name)
def getPainting(request):
template_name = 'arts.html'
prelink = "https://drive.google.com/uc?export=view&id="
if request.method == "POST":
global count
# request.POST['id']
imgid = count + 1
name = request.POST['name']
link = request.POST['link']
linkid = re.search(r"\bd\/\w+[^/]([A-Za-z0-9-_])*", link)
link = prelink + linkid.group()[2:]
requests.post('https://kvdvse6qr3.execute-api.ap-south-1.amazonaws.com/img/image',
json = {'imgId':f'{imgid}',
'altText': f'{name}',
'imgUrl': f'{link}'})
allImages = requests.get("https://kvdvse6qr3.execute-api.ap-south-1.amazonaws.com/img/images")
return render(request, template_name=template_name, context = { 'images': allImages.json()['images'] })
# class getPaintingView(View):
# template_name = 'arts.html'
# def get(self, request):
# return render(request, self.template_name)
# def post(self, request):
# print(request)
# class addPaintingView(CreateView):
# template_name = 'addArt.html'
# def get(self, request):
# return render(request, self.template_name)
|
SaahilS468/Serverless-API
|
image/views.py
|
views.py
|
py
| 1,598 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28830646732
|
"""Constants for the WiHeat Climate integration."""
import logging
API_URL = 'https://wi-heat.com/'
ID = 'home-assistant'
SESSION = 'A2B3C4D5E6'
DOMAIN = "wiheat"
CONF_CODE_FORMAT = "code_format"
CONF_CODE = "code"
CONF_TEMP = "temp"
UPDATE_INTERVAL = "timesync"
MIN_SCAN_INTERVAL = 60
API_ENDPOINT = {
'getUserDetails': f'{API_URL}usr_API_2.php',
'getData': f'{API_URL}API_2.php'
}
HEADERS = {
'host': 'wi-heat.com',
'accept': '*/*',
'content-type': 'application/x-www-form-urlencoded',
'accept-encoding': 'gzip, deflate, br',
}
QUERY = {
'login': 'login',
'getVpHwid': 'getVPhwid'
}
POWER_STATE = {
'11': 'on',
'21': 'off'
}
FAN_SPEED = {
'2': 'auto',
'3': 'minimum',
'5': 'medium',
'7': 'maximum'
}
PLASMACLUSTER = {
'F4': 'on',
'F0': 'off'
}
LOGGER = logging.getLogger(__package__)
|
kimjohnsson/wiheat
|
custom_components/wiheat/const.py
|
const.py
|
py
| 861 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5555757977
|
import copy
from typing import Dict, List, Tuple
import torch
from data.low_res import SingleDomain
from data.geography import frequency_encoded_latitude
import numpy as np
from data.vars import FIELD_MASK, FORCING_MASK, get_var_mask_name
import xarray as xr
from utils.xarray_oper import tonumpydict
def determine_ndoms(*args,**kwargs):
arglens = [1]
for i in range(len(args)):
if isinstance(args[i],list):
arglens.append(len(args[i]))
for key,_ in kwargs.items():
if isinstance(kwargs[key],list):
arglens.append(len(kwargs[key]))
return int(np.amax(arglens))
class MultiDomain(SingleDomain):
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self.var_grouping = kwargs.pop('var_grouping')
def get_lat_features(self,lats):
posdict = self.locate(lats[0],lats[-1],lat = True)
(n0,_),n = posdict['locs'],posdict["len"]
slc = slice(n0,len(lats)+n0)
abslat,signlat = frequency_encoded_latitude(n,self.half_spread*2+1)
return np.cos(abslat[slc]),np.cos(signlat[slc])
def append_lat_features(self,outs):
key = list(outs.keys())[0]
lats = outs[key].u.lat.values
abslat,signlat = self.get_lat_features(lats)
n = len(outs[key].u.lon)
abslat = abslat.reshape(-1,1)@np.ones((1,n))
signlat = signlat.reshape(-1,1)@np.ones((1,n))
latfeats = xr.Dataset(
data_vars = dict(
abslat = (["lat","lon"],abslat),
signlat = (["lat","lon"],signlat),
),
coords = dict(
lon = outs[key].u.lon,
lat = outs[key].u.lat
)
)
outs['lat_feats'] = latfeats
return outs
class MultiDomainDataset(MultiDomain):
def __init__(self,*args,scalars = None,latitude = False,temperature = False,torch_flag = False, **kwargs):
self.scalars = scalars
self.latitude = latitude
self.temperature = temperature
self.torch_flag = torch_flag
self.input_kwargs = kwargs
super().__init__(*args,**kwargs)
@property
def sslice(self,):
return slice(self.half_spread,-self.half_spread)
def pad(self,data_vars:dict,coords:dict):
forcing_mask_names = [get_var_mask_name(fn) for fn in self.forcing_names]
for name in data_vars.keys():
dims,vals = data_vars[name]
if 'lat' not in dims or 'lon' not in dims:
continue
pad = (0,0)
if name in self.forcing_names + forcing_mask_names and self.half_spread>0:
vrshp = list(vals.shape)
vals = vals.reshape([-1]+ vrshp[-2:])
vals = vals[:,self.sslice,self.sslice]
vals = vals.reshape(vrshp[:-2] + list(vals.shape[-2:]))
# print(f'{vrshp}->{vals.shape}' )
padtuple = (len(vals.shape)-2)*[(0,0)] + [(0,pad[0]),(0,pad[1])]
vals = np.pad(vals,pad_width = tuple(padtuple),constant_values = np.nan)
data_vars[name] = (dims,vals)
def pad_coords(coords,slice_flag = False):
lat = coords['lat']
pad = 0
coords['lat_pad'] = pad
lat = np.pad(lat,pad_width = ((0,pad),),constant_values = 0)
if slice_flag:
lat = lat[self.sslice]
coords['lat'] = lat
lon = coords['lon']
pad = 0
coords['lon_pad'] = pad
lon = np.pad(lon,pad_width = ((0,pad),),constant_values = 0)
if slice_flag:
lon = lon[self.sslice]
coords['lon'] = lon
return coords
forcing_coords = pad_coords(copy.deepcopy(coords),slice_flag=self.half_spread>0)
coords = pad_coords(coords,slice_flag=False)
return data_vars,coords,forcing_coords
def add_lat_features(self,data_vars,coords):
lats = coords['lat']
lons = coords['lon']
abslat,signlat = self.get_lat_features(lats)
data_vars['abs_lat'] = (['lat','lon'], abslat.reshape([-1,1]) @ np.ones((1,len(lons))))
data_vars['sign_lat'] = (['lat','lon'],signlat.reshape([-1,1]) @ np.ones((1,len(lons))))
return data_vars
def group_variables(self,data_vars):
groups = []
for vargroup in self.var_grouping:
valdict = {}
for varname in vargroup:
if varname not in data_vars:
continue
valdict[varname] = data_vars[varname]
# for suff in '_mean _std'.split():
for suff in '_scale '.split():
nvarname = varname + suff
if nvarname in data_vars:
valdict[nvarname] = data_vars[nvarname]
groups.append(valdict)
return tuple(groups)
def group_np_stack(self,vargroups):
return tuple([self._np_stack(vars) for vars in vargroups])
def _np_stack(self,vals:Dict[str,Tuple[List[str],np.ndarray]]):
v = []
for _,val in vals.values():
v.append(val)
if len(v) == 0:
return np.empty(0)
else:
return np.stack(v,axis =0)
def group_to_torch(self,vargroups):
return tuple([self._to_torch(vars) for vars in vargroups])
def _to_torch(self,vals:np.array,dtype = torch.float32):
# vals = vals[:,300:-280,300:-280]
return torch.from_numpy(vals).type(dtype)
def normalize(self,data_vars,coords):
keys_list = tuple(data_vars.keys())
for key in keys_list:
dims,vals = data_vars[key]
if 'lat' not in dims or 'lon' not in dims:
continue
shp = {d:len(coords[d]) for d in dims}
newdims = {key:None for key in shp}
if 'lon' in shp:
shp['lon'] = 1
newdims.pop('lon')
if 'lat' in shp:
shp['lat'] = 1
newdims.pop('lat')
shp0 = [shp[key] for key in newdims]
shp1 = list(shp.values())
newdims = list(newdims.keys())
a = np.ones(shp0)
if self.scalars is not None:
if f"{key}_scale" in self.scalars:
a = self.scalars[f"{key}_scale"].values
a = a.reshape(shp0)
if not self.torch_flag:
data_vars[f"{key}_scale"] = (newdims,a)
# data_vars[f"{key}_mean"] = (newdims,a)
# data_vars[f"{key}_std"] = (newdims,b)
# vals = (vals - a.reshape(shp1))/b.reshape(shp1)
vals = vals/a.reshape(shp1)
data_vars[key] = (dims,vals)
return data_vars,coords
def mask(self,data_vars):
keys_list = tuple(data_vars.keys())
for key in keys_list:
dims,f = data_vars[key]
if not ('lat' in dims and 'lon' in dims):
continue
mask = f==f
f[~mask] = 0
mask_found = False
for group,group_mask in zip([self.field_names,self.forcing_names],[FIELD_MASK,FORCING_MASK]):
if key in group:
mask = data_vars[group_mask][1]
mask_found =True
break
if mask_found:
varmask = get_var_mask_name(key)
data_vars[varmask] = (dims,mask)
if not self.torch_flag:
data_vars[f"{varmask}_normalization"] = (['normalization'],np.array([0,1]))
return data_vars
def __getitem__(self, i):
ds = super().__getitem__(i)
# print(f'MultiDomainDataset - {[f"{key}-{val.shape}" for key,val in ds.coords.items()]}')
per_region = []
requested_boundaries = ([None]*4,) if self.requested_boundaries is None else self.requested_boundaries
# print(f'requested_boundaries = {requested_boundaries}')
for lat0,lat1,lon0,lon1 in requested_boundaries:
if lat0 is not None:
subds = ds.sel(lat = slice(lat0,lat1),lon= slice(lon0,lon1))
else:
subds = ds
single_dom_out = self.single_domain(subds)
if not self.torch_flag:
return single_dom_out
per_region.append(single_dom_out)
cropped_per_region = []
def get_slice(length: int, length_to: int):
d_left = max(0, (length - length_to) // 2)
d_right = d_left + max(0, (length - length_to)) % 2
return slice(d_left, length - d_right)
for var_inputs in zip(*per_region):
shps = []
for var_in in var_inputs:
shps.append(np.array(var_in.shape))
shps = np.stack(shps,axis = 0)
shps = np.amin(shps,axis =0)
# shps = np.amax(shps,axis =0)
group = []
for var_in in var_inputs:
slcs = [get_slice(shp,_shp) for shp,_shp in zip(var_in.shape,shps)]
var_in = var_in[slcs[0],slcs[1],slcs[2]]
# var_in = var_in[:shps[0],:shps[1],:shps[2]]
group.append(var_in)
# zer =torch.zeros(*shps)
# shps_ = var_in.shape
# zer[:shps_[0],:shps_[1],:shps_[2]] = var_in
# group.append(zer)
group = torch.stack(group,dim = 0)
cropped_per_region.append(group)
min_gpu_reject_size = 200
max_shape = np.stack([np.array(group.shape[2:]) for group in cropped_per_region],axis = 0)
max_shape = np.amax(max_shape,axis = 0)
pad_shape = np.maximum(min_gpu_reject_size - max_shape,0)
if True:#np.all(pad_shape == 0) or not torch.cuda.is_available():
return tuple(cropped_per_region)
cropped_per_region_ = []
for group in cropped_per_region:
shp = group.shape
padded_shape = np.array(shp)
padded_shape[2:] += pad_shape
z = torch.zeros(*padded_shape)
z[:,:,:shp[2],:shp[3]] = group
cropped_per_region_.append(z)
return tuple(cropped_per_region_)
def single_domain(self,outs):
data_vars,coords = tonumpydict(outs)
# for key,(dim,val) in data_vars.items():
# print(f'{key}-{dim}: {val.shape}')
for ik,iv in self.input_kwargs.items():
if ik not in coords:
if np.isscalar(iv) or isinstance(iv,str):
coords[ik] = np.array([iv])
# print('\n'.join([f'{key} : {type(coords[key])}' for key in coords]))
# print('\n'.join([f'{key} : {data_vars[key][1].shape}' for key in data_vars]))
# raise Exception
if self.latitude:
data_vars = self.add_lat_features(data_vars,coords)
data_vars,coords = self.normalize(data_vars,coords)
data_vars = self.mask(data_vars)
data_vars,coords,forcing_coords = self.pad(data_vars,coords)
# dropkeys = []
# for key in data_vars:
# if 'normalization' in key or 'scale' in key:
# dropkeys.append(key)
# continue
# if 'S' not in key:
# dropkeys.append(key)
# continue
# for dk in dropkeys:
# data_vars.pop(dk)
# selkeys = 'Su Sv Stemp'.split()
# data_vars = {key:data_vars[key] for key in selkeys}
# ds = xr.Dataset(data_vars,forcing_coords)
# ds = np.log10(np.abs(ds))
# print(ds)
# plot_ds(ds,'ds.png',ncols = 1)
# raise Exception
grouped_vars = self.group_variables(data_vars)
if self.torch_flag:
grouped_vars = self.group_np_stack(grouped_vars)
return self.group_to_torch(grouped_vars)
else:
grouped_vars = list(grouped_vars)
grouped_vars.append(coords)
grouped_vars.append(forcing_coords)
return tuple(grouped_vars)
|
CemGultekin1/cm2p6
|
data/low_res_dataset.py
|
low_res_dataset.py
|
py
| 12,208 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9772532153
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 11 15:49:56 2020
@author: lnajt
"""
#import pyximport; pyximport.install()
import fib
import primes
def uncompiled_fib(n):
"""Print the Fibonacci series up to n."""
a, b = 0, 1
while b < n:
print(b, end=' ')
a, b = b, a + b
print()
#number = 1000000000000000000000000000000000
#fib.fib(number)
#uncompiled_fib(number)
def uncompiled_primes(nb_primes):
p = []
if nb_primes > 1000:
nb_primes = 1000
len_p = 0 # The current number of elements in p.
n = 2
while len_p < nb_primes:
# Is n prime?
for i in p[:len_p]:
if n % i == 0:
break
# If no break occurred in the loop, we have a prime.
else:
p.append(n)
len_p += 1
n += 1
# Let's return the result in a python list:
result_as_list = [prime for prime in p[:len_p]]
return result_as_list
print(primes.primes(1000))
print(uncompiled_primes(1000))
|
ElleNajt/TinyProjects
|
Learning_Cython/use.py
|
use.py
|
py
| 1,023 |
python
|
en
|
code
| 4 |
github-code
|
6
|
5824663901
|
"""
Flask app for testing the SMART on FHIR OAuth stuff
Build from this tutorial: http://docs.smarthealthit.org/tutorials/authorization/
And using requests-oauthlib: http://requests-oauthlib.readthedocs.io/en/latest/index.html
"""
from flask import Flask, redirect, request, session
from requests_oauthlib import OAuth2Session
#from urllib import urlencode
import json
import logging
import http.client
import warnings
# Enable lots of debug logging
http.client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# Replace these with the values you get when you register you app in the SMART sandbox
client_id = "df23ba7c-3b2b-4b92-8aec-fbe73426d472"
client_secret = "AKBmOV4tIIs6C7y2Dgy6Idquo_NUgFYolDmOpTDOtt2Hr_Nw7RglPE2aeHzBI0cuEyJN2tDgwPLQe_A2aAqLQr8"
redirect_uri = "http://localhost:5000/callback"
# Scopes to request from the SMART server
scope = [ \
"openid", \
"patient/*.*", \
"profile", \
"launch" \
]
app = Flask(__name__)
@app.route('/')
def index():
return "SMART on FHIR test client - please either launch from the SMART sandbox, or <a href='/standalone'>click here to test a standalone launch</a>"
@app.route('/standalone')
def standalone():
session['serviceUri'] = "https://sb-fhir-stu3.smarthealthit.org/smartstu3/data"
# Go to the server and get the auth endpoint URLs from it's CapabilityStatement
getAuthEndpointFromServerConformance(session['serviceUri'])
# Now, start the authorization process against the auth endpoint
return authorize_user()
"""
This is the main launch URL called by the SMART on FHIR sandbox (or any SMART on FHIR enabled EPR)
"""
@app.route('/smart-app')
def launch():
# Get some launch parameters from the calling EHR system
serviceUri = request.args.get('iss') # https://sb-fhir-stu3.smarthealthit.org/smartstu3/data
launchContextId = request.args.get('launch')
# Store launch context in the session
session['launchContextId'] = launchContextId
session['serviceUri'] = serviceUri
print ("App launched from SMART sandbox, with issuer URL: "+serviceUri)
# Go to the server and get the auth endpoint URLs from it's CapabilityStatement
getAuthEndpointFromServerConformance(serviceUri)
# Now, start the authorization process against the auth endpoint
return authorize_user()
"""
Go to the specified FHIR server and retrieve it's CapabilityStatement to obtain the OAuth details
"""
def getAuthEndpointFromServerConformance(serviceUri):
# The issuer is the server endpoint - get it's conformance profile to find the auth URL
conformanceResource = getRemoteResource(serviceUri)
# Parse the oauth URLs from the profile
conformanceJSON = json.loads(conformanceResource)
authorizeUrl = ''
tokenUrl = ''
# Nasty hacky unsafe parsing - perhaps look to use either the python fhir client, or a jsonpath library?
for entry in conformanceJSON["rest"][0]["security"]["extension"][0]["extension"]:
if entry['url'] == 'authorize':
authorizeUrl = entry['valueUri']
elif entry['url'] == 'token':
tokenUrl = entry['valueUri']
print ("Got an authorization URL from the capabilitystatement:"+authorizeUrl)
print ("Got a token URL from the capabilitystatement:"+tokenUrl)
# Store the relevant parameters in the session to use for authorizing
session['authorizeUrl'] = authorizeUrl
session['tokenUrl'] = tokenUrl
"""
Use the python oauth2 client to call the authorization endpoint
"""
def authorize_user():
smart_auth_session = OAuth2Session(client_id)
if 'launchContextId' in session:
authorization_url, state = smart_auth_session.authorization_url(session['authorizeUrl'], \
aud=session['serviceUri'], \
launch=session['launchContextId'])
else:
authorization_url, state = smart_auth_session.authorization_url(session['authorizeUrl'], \
aud=session['serviceUri'])
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
print ("Redirecting to authorization URL:"+authorization_url)
return redirect(authorization_url)
"""
Callback URL called by authorization server once the user has logged in.
Takes their authorization code and calls the token endpoint to get an access token.
"""
@app.route("/callback", methods=["GET", "POST"])
def callback():
# Retrieving an access token
smart_auth_session = OAuth2Session(client_id, scope=scope, redirect_uri=redirect_uri, state=session['oauth_state'])
token_url = session['tokenUrl']
token_response = smart_auth_session.fetch_token(token_url, client_secret=client_secret, \
authorization_response=request.url)
session['oauth_token'] = token_response
if 'patient' in session:
# Get the patient ID passed in with the token
patient_id = token_response['patient']
return getPatientDetails(patient_id)
else:
return getPatientList()
"""
Access a protected FHIR resource from the SMART server, passing our access token in the request
"""
def getPatientDetails(patient_id):
protected_resource_request = OAuth2Session(client_id, token=session['oauth_token'])
fhir_root = session['serviceUri']
patient_url = fhir_root+"/Patient/"+patient_id
return json.dumps(protected_resource_request.get(patient_url).json())
def getPatientList():
protected_resource_request = OAuth2Session(client_id, token=session['oauth_token'])
fhir_root = session['serviceUri']
patient_url = fhir_root+"/Patient"
return json.dumps(protected_resource_request.get(patient_url).json())
"""
Takes the base FHIR server URL and uses it to retrieve a conformance resource for the server
"""
def getRemoteResource(serviceUri):
remoteEndpoint = (serviceUri + '/metadata')[8:]
separator = remoteEndpoint.find('/')
host = remoteEndpoint[:separator]
path = remoteEndpoint[separator:]
conn = http.client.HTTPSConnection(host)
conn.request("GET", path)
response = conn.getresponse()
resultResource = response.readall().decode('utf-8')
return resultResource
"""
Initialise our Flask server in debug mode
"""
if __name__ == '__main__':
import os
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'
app.secret_key = os.urandom(24)
app.run(host="localhost", port=5000, debug=True)
|
ahatherly/SMART-on-FHIR-testclient
|
app.py
|
app.py
|
py
| 6,659 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1854100159
|
import sys
import os
import pandas as pd
import seaborn as sns; sns.set(style="ticks", color_codes=True)
class Data(object):
def __init__(self, fn = "Advertising.csv"):
# find path to data files, either called from top-level or within scripts
if os.path.exists("../data/"+fn):
data_path = "../data/"
elif os.path.exists("data/"+fn):
data_path = "data/"
else:# fn probably includes a path
data_path=""
self.file = data_path+fn
self.raw_data = self.load_data()
def load_data(self):
data = pd.read_csv(self.file)
return data
@staticmethod
def xy_split(df):
X = df.loc[:, df.columns!="Sales"]
y = df.loc[:, df.columns=="Sales"]
return X, y
@staticmethod
def pair_plot(df, fn = "adv_pplot.png"):
plt = sns.pairplot(df)
plt.savefig(fn)
def clean(self, df):
X,y = self.xy_split(df)
if __name__ == "__main__":
data_1 = Data()
X, y = data_1.xy_split(data_1.raw_data)
data_1.pair_plot(data_1.raw_data)
|
amloren1/pg
|
scripts/advertising.py
|
advertising.py
|
py
| 1,102 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43600893416
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from adminsortable2.admin import SortableAdminMixin, SortableInlineAdminMixin
from modeltranslation.admin import (
TranslationAdmin, TranslationTabularInline, TranslationStackedInline,
TabbedTranslationAdmin
)
from .models import (
SiteSettings, FooterSettings, NavigationMenu, NavigationLinks
)
class HeaderSettingsAdminMixin(object):
"""
Mixin класс для разделения сео данных в админ панели
"""
def get_fieldsets(self, request, obj=None):
seo_fields = ['left_side_title_en', 'left_side_title_ru',
'right_side_title_en', 'right_side_title_ru',
'right_side_description_en', 'right_side_description_ru',
'button_text_en', 'button_text_ru',
'button_link_en', 'button_link_ru']
if self.fieldsets:
return self.fieldsets
fields = [
x for x in self.get_fields(request, obj) if not x in seo_fields
]
return [
(None, {'fields': fields}), ('HeaderSettings', {
'fields': seo_fields
})
]
@admin.register(SiteSettings)
class SiteSettingsAdmin(admin.ModelAdmin):
fields = ('favicon', 'logo', 'preloader')
@admin.register(FooterSettings)
class FooterSettingsAdmin(TabbedTranslationAdmin):
class Media:
js = (
'http://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js',
'http://ajax.googleapis.com/ajax/libs/jqueryui/1.10.2/jquery-ui.min.js',
'modeltranslation/js/tabbed_translation_fields.js',
)
css = {
'screen': ('modeltranslation/css/tabbed_translation_fields.css',),
}
class NavigationLinksTabularInline(
SortableInlineAdminMixin, TranslationStackedInline):
model = NavigationLinks
extra = 0
@admin.register(NavigationMenu)
class NavigationMenuAdmin(TabbedTranslationAdmin):
list_display = ['name', 'menu_type']
inlines = (NavigationLinksTabularInline,)
class Media:
js = (
'http://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js',
'http://ajax.googleapis.com/ajax/libs/jqueryui/1.10.2/jquery-ui.min.js',
'modeltranslation/js/tabbed_translation_fields.js',
'js/admin/admin_navigation_menu.js',
)
css = {
'screen': ('modeltranslation/css/tabbed_translation_fields.css',),
}
|
CrazyChief/acidbro
|
core/admin.py
|
admin.py
|
py
| 2,524 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16146274526
|
from simp_py import tft
lcd = tft.tft
import machine
rtc = machine.RTC()
synced = False
for i in range(3):
try:
rtc.ntp_sync('pool.ntp.org')
synced = True
break
except:
time.sleep(1)
if not synced:
lcd.text(0,50, 'time sync failured')
else:
while True:
tuplex = rtc.now()
YYYY,MM,DD,hh,mm,ss,_,_ = tuplex
ss = '%d-%d-%d %02d:%02d:%02d' % (YYYY,MM,DD,hh,mm,ss)
lcd.text(0,50,ss)
time.sleep(1)
|
kcfkwok2003/Simp_py
|
simp_py_examples/course/SM001_old/t209.py
|
t209.py
|
py
| 443 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2704503307
|
# -*- coding: utf-8 -*-
from django.test import Client, RequestFactory, TestCase
from tasks import views
from tasks.models import Task, TaskStatus
from users.models import CustomUser
class TaskTest(TestCase):
"""Test cases for tasks."""
def setUp(self):
"""Initial setup before tests."""
self.factory = RequestFactory()
self.user = CustomUser.objects.create_user( # noqa: S106
username='testuser',
password='supersecret',
)
self.client = Client()
def createTask(self, name='Test task name'): # noqa: N802
"""Create test task."""
status = TaskStatus.objects.create(name='New')
return Task.objects.create(
name=name,
assigned_to=self.user,
creator=self.user,
status=status,
tags=['important', 'test'],
)
def test_task_create(self):
"""Test task creation."""
task = self.createTask()
self.assertTrue(isinstance(task, Task))
self.assertEqual(task.__str__(), task.name) # noqa: WPS609
self.assertEqual(Task.objects.count(), 1)
def test_tasks_list(self):
"""Test tasklist view."""
request = self.factory.get('/')
request.user = self.user
response = views.TaskList.as_view()(request)
self.assertEqual(response.status_code, 200) # noqa: WPS432
|
altvec/python-project-lvl4
|
tasks/tests.py
|
tests.py
|
py
| 1,403 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12981024226
|
#!/usr/bin/env python
"""
Pymodbus Synchronous Client Example to showcase Device Information
--------------------------------------------------------------------------
This client demonstrates the use of Device Information to get information
about servers connected to the client. This is part of the MODBUS specification,
and uses the MEI 0x2B 0x0E request / response.
"""
# --------------------------------------------------------------------------- #
# import the various server implementations
# --------------------------------------------------------------------------- #
from pymodbus.client.sync import ModbusTcpClient as ModbusClient
# from pymodbus.client.sync import ModbusUdpClient as ModbusClient
# from pymodbus.client.sync import ModbusSerialClient as ModbusClient
# --------------------------------------------------------------------------- #
# import the request
# --------------------------------------------------------------------------- #
from pymodbus.mei_message import ReadDeviceInformationRequest
from pymodbus.device import ModbusDeviceIdentification
# --------------------------------------------------------------------------- #
# configure the client logging
# --------------------------------------------------------------------------- #
import logging
FORMAT = ('%(asctime)-15s %(threadName)-15s '
'%(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s')
logging.basicConfig(format=FORMAT)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
UNIT = 0x1
def run_sync_client():
# ------------------------------------------------------------------------#
# choose the client you want
# ------------------------------------------------------------------------#
# make sure to start an implementation to hit against. For this
# you can use an existing device, the reference implementation in the tools
# directory, or start a pymodbus server.
#
# If you use the UDP or TCP clients, you can override the framer being used
# to use a custom implementation (say RTU over TCP). By default they use
# the socket framer::
#
# client = ModbusClient('localhost', port=5020, framer=ModbusRtuFramer)
#
# It should be noted that you can supply an ipv4 or an ipv6 host address
# for both the UDP and TCP clients.
#
# There are also other options that can be set on the client that controls
# how transactions are performed. The current ones are:
#
# * retries - Specify how many retries to allow per transaction (default=3)
# * retry_on_empty - Is an empty response a retry (default = False)
# * source_address - Specifies the TCP source address to bind to
#
# Here is an example of using these options::
#
# client = ModbusClient('localhost', retries=3, retry_on_empty=True)
# ------------------------------------------------------------------------#
client = ModbusClient('localhost', port=5020)
# from pymodbus.transaction import ModbusRtuFramer
# client = ModbusClient('localhost', port=5020, framer=ModbusRtuFramer)
# client = ModbusClient(method='binary', port='/dev/ptyp0', timeout=1)
# client = ModbusClient(method='ascii', port='/dev/ptyp0', timeout=1)
# client = ModbusClient(method='rtu', port='/dev/ptyp0', timeout=1,
# baudrate=9600)
client.connect()
# ------------------------------------------------------------------------#
# specify slave to query
# ------------------------------------------------------------------------#
# The slave to query is specified in an optional parameter for each
# individual request. This can be done by specifying the `unit` parameter
# which defaults to `0x00`
# ----------------------------------------------------------------------- #
log.debug("Reading Device Information")
information = {}
rr = None
while not rr or rr.more_follows:
next_object_id = rr.next_object_id if rr else 0
rq = ReadDeviceInformationRequest(read_code=0x03, unit=UNIT,
object_id=next_object_id)
rr = client.execute(rq)
information.update(rr.information)
log.debug(rr)
print("Device Information : ")
for key in information.keys():
print(key, information[key])
# ----------------------------------------------------------------------- #
# You can also have the information parsed through the
# ModbusDeviceIdentificiation class, which gets you a more usable way
# to access the Basic and Regular device information objects which are
# specifically listed in the Modbus specification
# ----------------------------------------------------------------------- #
di = ModbusDeviceIdentification(info=information)
print('Product Name : ', di.ProductName)
# ----------------------------------------------------------------------- #
# close the client
# ----------------------------------------------------------------------- #
client.close()
if __name__ == "__main__":
run_sync_client()
|
renatosperlongo/pymodbus
|
examples/contrib/deviceinfo_showcase_client.py
|
deviceinfo_showcase_client.py
|
py
| 5,108 |
python
|
en
|
code
| 1 |
github-code
|
6
|
17418292210
|
import math
class Vector3 (object):
__slots__ = ('x', 'y', 'z', '_x', '_y', '_z')
__hash__ = None
def __init__(self, x=0, y=0, z=0):
self.x = self._x = x
self.y = self._y = y
self.z = self._z = z
def __copy__(self):
return self.__class__(self.x, self.y, self.z)
copy = __copy__
def __repr__(self):
return 'Vector3(%.2f, %.2f, %.2f)' % (self.x, self.y, self.z)
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.z == other.z
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return self.x > other.x and self.y > other.y and self.z > other.z
def __ge__(self, other):
return self.x >= other.x and self.y >= other.y and self.z >= other.z
def __lt__(self, other):
return self.x < other.x and self.y < other.y and self.z < other.z
def __le__(self, other):
return self.x <= other.x and self.y <= other.y and self.z <= other.z
def __nonzero__(self):
return self.x != 0 or self.y != 0 or self.z != 0
def __add__(self, other):
return self.__class__(self.x + other.x, self.y + other.y, self.z + other.z)
def __iadd__(self, other):
self._x = self.x
self._y = self.y
self._z = self.z
self.x += other.x
self.y += other.y
self.z += other.z
return self
def __sub__(self, other):
return self.__class__(self.x - other.x, self.y - other.y, self.z - other.z)
def __isub__(self, other):
self.x -= other.x
self.y -= other.y
self.z -= other.z
return self
def __mul__(self, other):
return self.__class__(self.x * other, self.y * other, self.z * other)
def __imul__(self, other):
self.x *= other
self.y *= other
self.z *= other
return self
def __div__(self, other):
return self.__class__(self.x / other, self.y / other, self.z / other)
def __idiv__(self, other):
self.x /= other
self.y /= other
self.z /= other
return self
__truediv__ = __div__
__itruediv__ = __idiv__
def __neg__(self):
return self.__class__(-self.x, -self.y, -self.z)
def magnitude_squared(self):
return self.x ** 2 + self.y ** 2 + self.z ** 2
def magnitude(self):
return math.sqrt(self.magnitude_squared())
def normalize(self):
d = self.magnitude()
if d:
self.x /= d
self.y /= d
self.z /= d
return self
def normalized(self):
return self.copy().normalize()
def dot(self, other):
return (self.x * other.x) + (self.y * other.y) + (self.z * other.z)
def cross(self, other):
return self.__class__(
(self.y * other.z) - (self.z * other.y),
(-self.x * other.z) + (self.z * other.x),
(self.x * other.y) - (self.y * other.x)
)
def project(self, other):
n = other.normalized()
return n * self.dot(n)
def distance_squared(self, other):
return ((self.x - other.x) ** 2) + ((self.y - other.y) ** 2) + ((self.z - other.z) ** 2)
def set(self, x, y, z, save=True):
self._x = self.x if save else x
self._y = self.y if save else y
self._z = self.z if save else z
self.x = x
self.y = y
self.z = z
return self
def revert(self):
self.x = self._x
self.y = self._y
self.z = self._z
return self
class Point3 (Vector3):
def velocity(self):
return Vector3(self.x - self._x, self.y - self._y, self.z - self._z)
def verlet(self, a, dt):
tx = self.x
ty = self.y
tz = self.z
m = a * (dt * dt)
self.x += (self.x - self._x) + m.x
self.y += (self.y - self._y) + m.y
self.z += (self.z - self._z) + m.z
self._x = tx
self._y = ty
self._z = tz
# a b c d
# e f g h
# i j k l
# m n o p
class Matrix4 (object):
__slots__ = list('abcdefghijklmnop')
def __init__(self):
self.identity()
def __copy__(self):
M = self.__class__()
M.a = self.a
M.b = self.b
M.c = self.c
M.d = self.d
M.e = self.e
M.f = self.f
M.g = self.g
M.h = self.h
M.i = self.i
M.j = self.j
M.k = self.k
M.l = self.l
M.m = self.m
M.n = self.n
M.o = self.o
M.p = self.p
return M
copy = __copy__
def __repr__(self):
return ('Matrix4([% 8.2f % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f % 8.2f])') \
% (self.a, self.b, self.c, self.d,
self.e, self.f, self.g, self.h,
self.i, self.j, self.k, self.l,
self.m, self.n, self.o, self.p)
def __mul__(self, other):
if isinstance(other, Matrix4):
Aa = self.a
Ab = self.b
Ac = self.c
Ad = self.d
Ae = self.e
Af = self.f
Ag = self.g
Ah = self.h
Ai = self.i
Aj = self.j
Ak = self.k
Al = self.l
Am = self.m
An = self.n
Ao = self.o
Ap = self.p
Ba = other.a
Bb = other.b
Bc = other.c
Bd = other.d
Be = other.e
Bf = other.f
Bg = other.g
Bh = other.h
Bi = other.i
Bj = other.j
Bk = other.k
Bl = other.l
Bm = other.m
Bn = other.n
Bo = other.o
Bp = other.p
C = Matrix4()
C.a = Aa * Ba + Ab * Be + Ac * Bi + Ad * Bm
C.b = Aa * Bb + Ab * Bf + Ac * Bj + Ad * Bn
C.c = Aa * Bc + Ab * Bg + Ac * Bk + Ad * Bo
C.d = Aa * Bd + Ab * Bh + Ac * Bl + Ad * Bp
C.e = Ae * Ba + Af * Be + Ag * Bi + Ah * Bm
C.f = Ae * Bb + Af * Bf + Ag * Bj + Ah * Bn
C.g = Ae * Bc + Af * Bg + Ag * Bk + Ah * Bo
C.h = Ae * Bd + Af * Bh + Ag * Bl + Ah * Bp
C.i = Ai * Ba + Aj * Be + Ak * Bi + Al * Bm
C.j = Ai * Bb + Aj * Bf + Ak * Bj + Al * Bn
C.k = Ai * Bc + Aj * Bg + Ak * Bk + Al * Bo
C.l = Ai * Bd + Aj * Bh + Ak * Bl + Al * Bp
C.m = Am * Ba + An * Be + Ao * Bi + Ap * Bm
C.n = Am * Bb + An * Bf + Ao * Bj + Ap * Bn
C.o = Am * Bc + An * Bg + Ao * Bk + Ap * Bo
C.p = Am * Bd + An * Bh + Ao * Bl + Ap * Bp
return C
elif isinstance(other, Point3):
A = self
B = other
x = A.a * B.x + A.b * B.y + A.c * B.z + A.d
y = A.e * B.x + A.f * B.y + A.g * B.z + A.h
z = A.i * B.x + A.j * B.y + A.k * B.z + A.l
return Point3(x, y, z)
elif isinstance(other, Vector3):
A = self
B = other
x = A.a * B.x + A.b * B.y + A.c * B.z
y = A.e * B.x + A.f * B.y + A.g * B.z
z = A.i * B.x + A.j * B.y + A.k * B.z
return Vector3(x, y, z)
def __imul__(self, other):
assert isinstance(other, Matrix4)
Aa = self.a
Ab = self.b
Ac = self.c
Ad = self.d
Ae = self.e
Af = self.f
Ag = self.g
Ah = self.h
Ai = self.i
Aj = self.j
Ak = self.k
Al = self.l
Am = self.m
An = self.n
Ao = self.o
Ap = self.p
Ba = other.a
Bb = other.b
Bc = other.c
Bd = other.d
Be = other.e
Bf = other.f
Bg = other.g
Bh = other.h
Bi = other.i
Bj = other.j
Bk = other.k
Bl = other.l
Bm = other.m
Bn = other.n
Bo = other.o
Bp = other.p
self.a = Aa * Ba + Ab * Be + Ac * Bi + Ad * Bm
self.b = Aa * Bb + Ab * Bf + Ac * Bj + Ad * Bn
self.c = Aa * Bc + Ab * Bg + Ac * Bk + Ad * Bo
self.d = Aa * Bd + Ab * Bh + Ac * Bl + Ad * Bp
self.e = Ae * Ba + Af * Be + Ag * Bi + Ah * Bm
self.f = Ae * Bb + Af * Bf + Ag * Bj + Ah * Bn
self.g = Ae * Bc + Af * Bg + Ag * Bk + Ah * Bo
self.h = Ae * Bd + Af * Bh + Ag * Bl + Ah * Bp
self.i = Ai * Ba + Aj * Be + Ak * Bi + Al * Bm
self.j = Ai * Bb + Aj * Bf + Ak * Bj + Al * Bn
self.k = Ai * Bc + Aj * Bg + Ak * Bk + Al * Bo
self.l = Ai * Bd + Aj * Bh + Ak * Bl + Al * Bp
self.m = Am * Ba + An * Be + Ao * Bi + Ap * Bm
self.n = Am * Bb + An * Bf + Ao * Bj + Ap * Bn
self.o = Am * Bc + An * Bg + Ao * Bk + Ap * Bo
self.p = Am * Bd + An * Bh + Ao * Bl + Ap * Bp
return self
def transform(self, point):
A = self
B = point
x = A.a * B.x + A.b * B.y + A.c * B.z + A.d
y = A.e * B.x + A.f * B.y + A.g * B.z + A.h
z = A.i * B.x + A.j * B.y + A.k * B.z + A.l
w = A.m * B.x + A.n * B.y + A.o * B.z + A.p
if w != 0:
x /= w
y /= w
z /= w
return Point3(x, y, z)
def itransform(self, point):
A = self
B = point
B.x = A.a * B.x + A.b * B.y + A.c * B.z + A.d
B.y = A.e * B.x + A.f * B.y + A.g * B.z + A.h
B.z = A.i * B.x + A.j * B.y + A.k * B.z + A.l
w = A.m * B.x + A.n * B.y + A.o * B.z + A.p
if w != 0:
B.x /= w
B.y /= w
B.z /= w
return B
def identity(self):
self.a = self.f = self.k = self.p = 1.0
self.b = self.c = self.d = self.e = self.g = self.h = self.i = self.j = self.l = self.m = self.n = self.o = 0
return self
def set_translation(self, x, y, z):
self.d = x
self.h = y
self.l = z
return self
def set_rotation(self, yaw, pitch, roll):
ch = math.cos(yaw)
sh = math.sin(yaw)
ca = math.cos(pitch)
sa = math.sin(pitch)
cb = math.cos(roll)
sb = math.sin(roll)
self.a = ch * ca
self.b = sh * sb - ch * sa * cb
self.c = ch * sa * sb + sh * cb
self.e = sa
self.f = ca * cb
self.g = -ca * sb
self.i = -sh * ca
self.j = sh * sa * cb + ch * sb
self.k = -sh * sa * sb + ch * cb
self.m = self.n = self.o = 0
self.p = 1.0
return self
|
dcwatson/pavara-pyglet
|
pavara/vecmath.py
|
vecmath.py
|
py
| 10,903 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12579350671
|
import numpy as np
import pandas as pd
#FEATURES
def make_feature_changing(df, feature_changing_list):
for func, params in feature_changing_list:
df = func(df=df, **params)
return df
#MULTIPLE PREDICTIONS
def multiple_predictions_TST(models_TST, Xs_TST):
predicts = []
for idx, model in enumerate(models_TST):
predict = model.predict(Xs_TST[idx])
predicts.append(predict)
return np.array(predicts).T
def multiple_predictions_C(models_C, Xs_C):
predicts = []
for idx, model in enumerate(models_C):
predict = model.predict(Xs_C[idx])
predicts.append(predict)
return np.array(predicts).T
# METRICS
def metric(answers, user_csv):
delta_c = np.abs(np.array(answers['C']) - np.array(user_csv['C']))
hit_rate_c = np.int64(delta_c < 0.02)
delta_t = np.abs(np.array(answers['TST']) - np.array(user_csv['TST']))
hit_rate_t = np.int64(delta_t < 20)
N = np.size(answers['C'])
return np.sum(hit_rate_c + hit_rate_t) / 2 / N
def metric_log(answers, user_csv):
delta_c = np.abs(np.exp(np.array(answers['log_C'])) - np.exp(np.array(user_csv['log_C'])))
hit_rate_c = np.int64(delta_c < 0.02)
delta_t = np.abs(np.array(answers['TST']) - np.array(user_csv['TST']))
hit_rate_t = np.int64(delta_t < 20)
N = np.size(answers['log_C'])
return np.sum(hit_rate_c + hit_rate_t) / 2 / N
def metric_C(y_true, y_pred):
delta_c = np.abs(y_true - y_pred)
hit_rate_c = np.int64(delta_c < 0.02)
return hit_rate_c.mean()
def metric_TST(y_true, y_pred):
delta_t = np.abs(y_true - y_pred)
hit_rate_t = np.int64(delta_t < 20)
return hit_rate_t.mean()
def metric_C_log(y_true, y_pred):
delta_c = np.abs(np.exp(np.array(y_true)) - np.exp(np.array(y_pred)))
hit_rate_c = np.int64(delta_c < 0.02)
return hit_rate_c.mean()
|
mechmabot/EVRAZ_AI
|
module.py
|
module.py
|
py
| 1,856 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36646912477
|
import matplotlib
matplotlib.use('Agg') # noqa
from deepdecoder.data import generator_3d_tags_with_depth_map, DistributionHDF5Dataset
import diktya.distributions
from diktya.numpy import tile
import matplotlib.pyplot as plt
import os
import argparse
from keras.utils.generic_utils import Progbar
from scipy.ndimage.interpolation import zoom
from scipy.ndimage.filters import gaussian_filter1d
from scipy.misc import imsave
from deepdecoder.scripts.default_3d_tags_distribution import default_tag_distribution
def generator(tag_dist, batch_size, antialiasing=1):
s = antialiasing
depth_scale = 1/2
for param, mask, depth_map in generator_3d_tags_with_depth_map(
tag_dist, batch_size, antialiasing=s, depth_scale=depth_scale):
depth_map = gaussian_filter1d(depth_map, 2/6/depth_scale, axis=-1, mode='constant')
depth_map = gaussian_filter1d(depth_map, 2/6/depth_scale, axis=-2, mode='constant')
depth_map = zoom(depth_map, (1., 1., depth_scale, depth_scale))
yield param, mask, depth_map
def plot_anitaliasing(tag_dist, fname, a, nb_samples=64):
_, masks, depth_map = next(generator(tag_dist, nb_samples, antialiasing=a))
tiled = tile(masks)[0]
imsave(fname.format(a), tiled)
def run(tag_dist, output_fname, force, nb_samples):
os.makedirs(os.path.dirname(output_fname), exist_ok=True)
if os.path.exists(output_fname) and force:
print("Deleted {}".format(output_fname))
os.remove(output_fname)
else:
assert not os.path.exists(output_fname), \
"File {} already exists. Use --force to override it"
basename, _ = os.path.splitext(output_fname)
anit_name = basename + "_anti_{}.png"
hist_name = basename + "_hist_{}.png"
plot_anitaliasing(tag_dist, anit_name, 1)
plot_anitaliasing(tag_dist, anit_name, 2)
plot_anitaliasing(tag_dist, anit_name, 4)
plot_anitaliasing(tag_dist, anit_name, 8)
labels, masks, _ = next(generator(tag_dist, 10000, antialiasing=2))
for key in labels.dtype.names:
m = labels[key].mean()
s = labels[key].std()
print("{}: {:.3f}, {:.3f}".format(key, m, s))
assert abs(m) <= 0.03
for label_name in sorted(set(labels.dtype.names) - set(['bits'])):
x = labels[label_name]
plt.hist(x.flatten(), bins=40, normed=True)
plt.savefig(hist_name.format(label_name))
plt.clf()
dset = DistributionHDF5Dataset(output_fname, distribution=tag_dist,
nb_samples=nb_samples, mode='w')
progbar = Progbar(nb_samples)
batch_size = min(25000, nb_samples)
for labels, tags, depth_map in generator(tag_dist, batch_size, antialiasing=4):
pos = dset.append(labels=labels, tag3d=tags, depth_map=depth_map)
progbar.update(pos)
if pos == nb_samples:
break
print("Saved tag 3d dataset to: {}".format(output_fname))
dist_fname = basename + "_distribution.json"
with open(dist_fname, "w+") as dist_f:
dist_f.write(tag_dist.to_json())
print("Saved distribution to: {}".format(dist_fname))
def main():
parser = argparse.ArgumentParser(
description='Generate images and depth maps from the 3d object model of the tag')
parser.add_argument('output', type=str, help='output file name')
parser.add_argument('-f', '--force', action='store_true',
help='override existing output files')
parser.add_argument('-d', '--dist', type=str, default=default_tag_distribution(),
help='Json params of the distribution')
parser.add_argument('-n', '--nb-samples', type=float, required=True,
help='Number of samples to generate')
args = parser.parse_args()
if type(args.dist) == str:
with open(args.dist) as f:
dist = diktya.distributions.load_from_json(f.read())
else:
dist = args.dist
run(dist, args.output, args.force, int(args.nb_samples))
if __name__ == "__main__":
main()
|
berleon/deepdecoder
|
deepdecoder/scripts/generate_3d_tags.py
|
generate_3d_tags.py
|
py
| 4,038 |
python
|
en
|
code
| 50 |
github-code
|
6
|
40986191942
|
import random , sys , traceback
from time import sleep
from selenium import webdriver
import datetime
c=1;
browser = webdriver.Chrome('D:\\Python\\Bot Insta\\chromedriver')
browser.get('https://google.com')
while c== 1:
c=0
try:
browser.find_element_by_xpath('/html/body/ytd-app/div/div/ytd-masthead/div[3]/div[1]/ytd-topbar-logo-renderer/a/div[1]').click()
print('am rulat ')
except:
c=1
sleep(2)
print('sunte in exceptie ')
print('gata')
# while browser.find_element_by_xpath('/html/body/ytd-app/div/div/ytd-masthead/div[3]/div[1]/ytd-topbar-logo-renderer/a/div[1]')==[]:
# print("nu e bine nu gasim ce trebuie ")
# sleep(2)
# print('am gasit')
#browser.close()
#browser.quit()
|
mirceah99/Python-Bot-Insta
|
Teste.py
|
Teste.py
|
py
| 779 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3714759753
|
from unittest import TestCase
from datetime import datetime
from uuid import uuid4
from sh import git, rm, gitlint, touch, echo, ErrorReturnCode
class BaseTestCase(TestCase):
pass
class IntegrationTests(BaseTestCase):
""" Simple set of integration tests for gitlint """
tmp_git_repo = None
@classmethod
def setUpClass(cls):
""" Sets up the integration tests by creating a new temporary git repository """
cls.tmp_git_repo = "/tmp/gitlint-test-%s" % datetime.now().strftime("%Y%m%d-%H%M%S")
git("init", cls.tmp_git_repo)
# configuring name and email is required in every git repot
git("config", "user.name", "gitlint-test-user", _cwd=cls.tmp_git_repo)
git("config", "user.email", "[email protected]", _cwd=cls.tmp_git_repo)
@classmethod
def tearDownClass(cls):
""" Cleans up the temporary git repository """
rm("-rf", cls.tmp_git_repo)
def _create_simple_commit(self, message):
""" Creates a simple commit with an empty test file.
:param message: Commit message for the commit. """
test_filename = "test-file-" + str(uuid4())
touch(test_filename, _cwd=self.tmp_git_repo)
git("add", test_filename, _cwd=self.tmp_git_repo)
git("commit", "-m", message, _cwd=self.tmp_git_repo)
def test_successful(self):
self._create_simple_commit("Simple title\n\nSimple body")
output = gitlint(_cwd=self.tmp_git_repo, _tty_in=True)
self.assertEqual(output, "")
def test_errors(self):
commit_msg = "WIP: This is a title.\nContent on the second line"
self._create_simple_commit(commit_msg)
output = gitlint(_cwd=self.tmp_git_repo, _tty_in=True, _ok_code=[3])
expected = "1: T3 Title has trailing punctuation (.): \"WIP: This is a title.\"\n" + \
"1: T5 Title contains the word 'WIP' (case-insensitive): \"WIP: This is a title.\"\n" + \
"2: B4 Second line is not empty: \"Content on the second line\"\n"
self.assertEqual(output, expected)
def test_pipe_input(self):
error_msg = None
# For some odd reason, sh doesn't return the error output when piping something into gitlint.
# Note that this does work as expected in the test_errors testcase.
# To work around this we raise and catch an exception
try:
gitlint(echo("WIP: Pipe test."), _tty_in=False)
except ErrorReturnCode as e:
# StdErr is returned as bytes -> decode to unicode string
# http://stackoverflow.com/questions/606191/convert-bytes-to-a-python-string
error_msg = e.stderr.decode("utf-8")
expected = "1: T3 Title has trailing punctuation (.): \"WIP: Pipe test.\"\n" + \
"1: T5 Title contains the word 'WIP' (case-insensitive): \"WIP: Pipe test.\"\n" + \
"3: B6 Body message is missing\n"
self.assertEqual(error_msg, expected)
|
Hawatel/gitlint
|
qa/integration_test.py
|
integration_test.py
|
py
| 2,991 |
python
|
en
|
code
| null |
github-code
|
6
|
11221441363
|
import os, bcrypt
from datetime import datetime
from flask import Flask, request, jsonify, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__, static_folder='.')
app.config['UPLOAD_FOLDER'] = 'uploads'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.BINARY(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def __repr__(self):
return f'User({self.username}, {self.email})'
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.Integer, nullable=False)
date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
position = db.Column(db.String(), nullable=False)
description = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f'Post({type}, {self.date})'
def serialize(self):
return {
'id': self.id,
'type': self.type,
'date': self.date,
'position': self.position,
'description': self.description,
'user_id': self.user_id
}
def path(id):
return os.path.join(app.config['UPLOAD_FOLDER'], str(id) + '.jpg')
@app.route('/')
def hello():
return 'Hello World!'
@app.route('/user', methods=['POST'])
def user():
if 'id' not in request.form:
return 'id missing', 400
user = User.query.filter_by(id=request.form['id']).first()
if user == None:
return 'inexistant', 404
return user.username
@app.route('/data', methods=['GET', 'POST', 'DELETE'])
def data():
if request.method == 'POST':
for key in ['type', 'position', 'description', 'user_id']:
if request.form.get(key) == None:
return key + ' missing', 400
if 'image' not in request.files:
return 'image missing', 400
file = request.files['image']
if file.filename == '':
return 'image missing', 400
post = Post(
type = request.form['type'],
position = request.form['position'],
description = request.form['description'],
user_id = request.form['user_id']
)
db.session.add(post)
db.session.flush()
file.save(path(post.id))
db.session.commit()
return jsonify(post.serialize())
elif request.method == 'DELETE':
if 'id' not in request.form:
return 'id missing', 400
id=request.form['id']
Post.query.filter_by(id=id).delete()
db.session.commit()
file = path(id)
if os.path.exists(file):
os.remove(file)
return 'ok'
if request.args.get('form') != None:
return app.send_static_file('app.html')
if request.args.get('id') != None:
post = Post.query.filter_by(id=request.args['id']).first()
if post == None:
return 'not found', 404
return jsonify(post.serialize())
return jsonify([post.serialize() for post in Post.query.all()])
# if neither, 405 ou 406
if __name__ == '__main__':
app.run(debug = True)
|
tran-simon/hackatown
|
app.py
|
app.py
|
py
| 3,082 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75341615226
|
"""Train an EfficientNetB4 model to predict GBM vs PCNSL.
This requires TensorFlow >= 2.3.0.
"""
import argparse
import math
from pathlib import Path
import pickle
from typing import Tuple, Union
import h5py
import numpy as np
import tensorflow as tf
PathType = Union[str, Path]
def augment_base(x, y):
x = tf.image.random_brightness(x, max_delta=2)
x = tf.image.random_flip_left_right(x)
x = tf.image.random_flip_up_down(x)
x = tf.image.random_hue(x, max_delta=0.25)
return x, y
def augment_base_and_noise(x, y):
x, y = augment_base(x, y)
# Apply gaussian noise to fraction of samples.
x = tf.cond(
pred=tf.random.uniform([]) < 0.1,
true_fn=lambda: x
+ tf.random.normal(tf.shape(x), mean=0.0, stddev=0.05, dtype=x.dtype),
false_fn=lambda: x,
)
return x, y
def load_data_into_train_val(
data_path: PathType, augmentation: str
) -> Tuple[tf.data.Dataset, tf.data.Dataset]:
print("Loading data from HDF5...", flush=True)
with h5py.File(str(data_path)) as f:
x_gbm = f["/gbm/380_380/features"][:]
y_gbm = f["/gbm/380_380/labels"][:]
x_pcnsl = f["/pcnsl/380_380/features"][:]
y_pcnsl = f["/pcnsl/380_380/labels"][:]
print("gbm features shape", x_gbm.shape)
print("gbm labels shape", y_gbm.shape)
print("pcnsl features shape", x_pcnsl.shape)
print("pcnsl labels shape", y_pcnsl.shape, flush=True)
x = np.concatenate((x_gbm, x_pcnsl)).astype(np.float32)
y = np.concatenate((y_gbm, y_pcnsl)).astype(np.float32)
# Shuffle the samples. The shuffling is the same for features and labels.
print("Shuffling samples ...", flush=True)
shuffle_inds = np.arange(y.shape[0])
np.random.seed(42)
np.random.shuffle(shuffle_inds)
x = x[shuffle_inds]
y = y[shuffle_inds]
inds = np.random.choice([0, 1], size=y.size, p=[0.85, 0.15])
x_train, y_train = x[inds == 0], y[inds == 0]
x_val, y_val = x[inds == 1], y[inds == 1]
# Create tf.data.Dataset
print("Creating tf.data.Dataset ...", flush=True)
batch_size = 8
dset_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
if augmentation == "none":
print("Not applying augmentation.")
elif augmentation == "base":
print("Applying 'base' augmentation.")
dset_train = dset_train.map(augment_base)
elif augmentation == "base_and_noise":
print("Applying 'base_and_noise' augmentation.")
dset_train = dset_train.map(augment_base)
else:
raise ValueError(f"unknown augmentation type: {augmentation}")
dset_train = dset_train.shuffle(1000, reshuffle_each_iteration=True)
dset_train = dset_train.batch(batch_size)
dset_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))
dset_val = dset_val.batch(batch_size)
return dset_train, dset_val
def get_model() -> tf.keras.Model:
print("Creating model ...", flush=True)
tfkl = tf.keras.layers
# This is from the tf.keras.applications.efficientnet implementation in version
# 2.5.0 of tensorflow.
DENSE_KERNEL_INITIALIZER = {
"class_name": "VarianceScaling",
"config": {"scale": 1.0 / 3.0, "mode": "fan_out", "distribution": "uniform"},
}
base_model = tf.keras.applications.EfficientNetB4(
include_top=False,
input_shape=(380, 380, 3),
weights="imagenet",
)
base_model.activity_regularizer = tf.keras.regularizers.l2(l=0.01)
_x = tfkl.GlobalAveragePooling2D(name="avg_pool")(base_model.output)
_x = tfkl.Dropout(0.5)(_x)
_x = tfkl.Dense(
1,
activation="sigmoid",
name="predictions",
kernel_initializer=DENSE_KERNEL_INITIALIZER,
)(_x)
model = tf.keras.Model(inputs=base_model.input, outputs=_x)
return model
def main(
data_path: PathType,
checkpoint_prefix: PathType,
augmentation: str = "none",
epochs: int = 300,
):
model = get_model()
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-04),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=False),
metrics=[tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.AUC()],
)
def schedule_lr(epoch):
if epoch < 50:
return 1e-04
else:
return 1e-04 * math.exp(0.015 * (50 - epoch))
checkpoint_prefix = Path(checkpoint_prefix)
checkpoint_prefix.mkdir(parents=True, exist_ok=False)
callbacks = [
tf.keras.callbacks.LearningRateScheduler(schedule_lr, verbose=1),
tf.keras.callbacks.ModelCheckpoint(
filepath=str(checkpoint_prefix / "ckpt_{epoch:03d}_{val_loss:0.4f}.hdf5"),
save_best_only=True,
verbose=1,
),
]
dset_train, dset_val = load_data_into_train_val(
data_path=data_path, augmentation=augmentation
)
print("Beginning training...", flush=True)
history = model.fit(
dset_train,
epochs=epochs,
validation_data=dset_val,
callbacks=callbacks,
verbose=2,
)
# We save as pickle and not as json because the numpy arrays in this dictionary
# do not play nicely with json. Pickle is fine with it, though.
print("Saving training/validation history to pickle file ...")
with (checkpoint_prefix / "history.pkl").open("wb") as f:
pickle.dump(history.history, f)
def get_parsed_args() -> argparse.Namespace:
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("data_path", help="Path to HDF5 with data.")
p.add_argument("ckpt_prefix", help="Directory in which to save checkpoints.")
p.add_argument(
"--augmentation",
choices=["none", "base", "base_and_noise"],
default="none",
help="Type of augmentation to apply to training data.",
)
p.add_argument("--epochs", type=int, default=300, help="Number of epochs to train.")
args = p.parse_args()
args.data_path = Path(args.data_path)
args.ckpt_prefix = Path(args.ckpt_prefix)
return args
if __name__ == "__main__":
args = get_parsed_args()
print("-" * 40)
print("Arguments passed to this script:")
for key, value in vars(args).items():
print(f" - {key}: {value}")
print("-" * 40, flush=True)
main(
data_path=args.data_path,
checkpoint_prefix=args.ckpt_prefix,
augmentation=args.augmentation,
epochs=args.epochs,
)
print("Reached end of python script.")
|
kaczmarj/classification-of-gbm-vs-pcnsl-using-cnns
|
step1_train_model.py
|
step1_train_model.py
|
py
| 6,476 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42123556181
|
# Partie1: Récupération des infos à partir d'un lien article
# Choisissez n'importe quelle page Produit sur le site de Books to Scrape. Écrivez un script Python qui visite cette page et en extrait les informations suivantes :
import sys
import requests
from bs4 import BeautifulSoup
import csv
import os
import urllib.request
# sys.argv -> list arguments passed to the script by the terminal (here the article url)
url = sys.argv[1]
response = requests.get(url)
parser = BeautifulSoup(response.content, 'html.parser')
products_infos = parser.find_all('td')
data = []
# product_page_url
data.append(url)
# universal_product_code (upc)
data.append(products_infos[0].string)
# title
data.append(parser.find('div', class_='product_main').h1.string)
# price_including_tax
price_including_tax = products_infos[3].string
price_tva = price_including_tax.replace('£', '')
data.append(price_tva)
# price_excluding_tax
price_excluding_tax = products_infos[2].string
price_ht = price_excluding_tax.replace('£', '')
data.append(price_ht)
# number_available
data.append(products_infos[5].string)
# product_description
find_p = parser.find_all('p')
data.append(find_p[3].string)
# category
find_a = parser.find_all('a')
data.append(find_a[3].string)
# review_rating
rate = parser.find('p', class_='star-rating')
rate_class = rate.get('class')
# Check if review is One, Two, Three, Four or five and append the result in the variable review
review = 0
if 'One' in rate_class:
review = 1
if 'Two' in rate_class:
review = 2
if 'Three' in rate_class:
review = 3
if 'Four' in rate_class:
review = 4
if 'Five' in rate_class:
review = 5
data.append(review)
# image_url
find_img = parser.find("img")
source = find_img.get('src')
image_url = source.replace("../../", "http://books.toscrape.com/")
data.append(image_url)
# GET images
pictures = []
soup_div_picture = parser.find('div', class_='item active')
soup_picture = soup_div_picture.find('img').get('src')
find_image_url = 'http://books.toscrape.com/' + soup_picture
pictures.append(find_image_url.replace('../../', ''))
# Try to create pictures repertory, if it's not possible(error), dont do anything(continue)
path = 'images/'
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
# For each picture in pictures, open repertory pictures, copy / paste them inside and refactoring
# their name(picture1, picture2...)
for link in range(len(pictures)):
img_url = pictures[link]
print(img_url)
with open(f'images/image{link + 1}.jpg', 'wb+') as f:
f.write(urllib.request.urlopen(img_url).read())
# Try to open data, if there is no directory create it
path = 'data'
try:
os.makedirs(path)
except os.error:
if not os.path.isdir(path):
os.mkdir(path)
# Écrivez les données dans un fichier CSV qui utilise les champs ci-dessus comme en-têtes de colonnes.
header = ['product_page_url', 'universal_ product_code (upc)', 'title', 'price_including_tax', 'price_excluding_tax', 'number_available', 'product_description', 'category', 'review_rating', 'image_url']
with open('data/article_data.csv', 'w', encoding='utf-8') as article:
w = csv.writer(article, delimiter=',')
w.writerow(header)
w.writerow(data)
|
glgstyle/MyBookScraper
|
scrap_article.py
|
scrap_article.py
|
py
| 3,256 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11472792272
|
import re
import time
import datetime
import json
import copy
import random
import os
from pathlib import Path
from urllib.parse import quote
from amiyabot import PluginInstance
from core.util import read_yaml
from core import log, Message, Chain
from core.database.user import User, UserInfo
from core.database.bot import OperatorConfig
from core.resource.arknightsGameData import ArknightsGameData, ArknightsGameDataResource, Operator
from .database import AmiyaBotWifuStatusDataBase
curr_dir = os.path.dirname(__file__)
class WifuPluginInstance(PluginInstance):
def install(self):
AmiyaBotWifuStatusDataBase.create_table(safe=True)
bot = WifuPluginInstance(
name='每日随机助理',
version='1.4',
plugin_id='amiyabot-arknights-hsyhhssyy-wifu',
plugin_type='',
description='每日生成一个随机助理',
document=f'{curr_dir}/README.md'
)
def compare_date_difference(day1: str,day2: str):
time_array1 = time.strptime(''.join(day1.split(' ')[0]), "%Y-%m-%d")
timestamp_day1 = int(time.mktime(time_array1))
time_array2 = time.strptime(''.join(day2.split(' ')[0]), "%Y-%m-%d")
timestamp_day2 = int(time.mktime(time_array2))
result = (timestamp_day1 - timestamp_day2) // 60 // 60 // 24
return result
def compare_second_difference(day1: str,day2: str):
time_array1 = time.strptime(''.join(day1.split(' ')[0]), "%Y-%m-%d %H:%M:%S")
timestamp_day1 = int(time.mktime(time_array1))
time_array2 = time.strptime(''.join(day2.split(' ')[0]), "%Y-%m-%d %H:%M:%S")
timestamp_day2 = int(time.mktime(time_array2))
result = (timestamp_day1 - timestamp_day2)
return result
async def wifu_action(data: Message):
# log.info('触发了选老婆功能.')
wifu_meta: dict = UserInfo.get_meta_value(data.user_id,'amiyabot-arknights-wifu')
now = datetime.date.today()
#查看User是不是已经有Wifu了
if wifu_meta.__contains__('wifu_date') and wifu_meta.__contains__('wifu_name'):
# 计算日期
last_wifu_time = wifu_meta['wifu_date']
time_delta = compare_date_difference(now.strftime("%Y-%m-%d"),last_wifu_time)
if time_delta<1 :
log.info(f'选老婆TimeDelta{time_delta}')
return await show_existing_wifu(data,data.user_id)
wifu_meta['wifu_date'] = now.strftime("%Y-%m-%d")
# 随机一位 Wifu给他
operators = {}
if not operators:
operators = copy.deepcopy(ArknightsGameData().operators)
operator = operators.pop(random.choice(list(operators.keys())))
while OperatorConfig.get_or_none(operator_name=operator.name,operator_type=8):
operator = operators.pop(random.choice(list(operators.keys())))
wifu_meta['wifu_name'] = operator.name
UserInfo.set_meta_value(data.user_id,'amiyabot-arknights-wifu',wifu_meta)
AmiyaBotWifuStatusDataBase.create(channel_id=data.channel_id, user_id=data.user_id, wifu_name=operator.name,
create_at=datetime.date.today())
count = count_in_channel(data.channel_id,operator.name,data.user_id)
str = f'博士,您今日选到的助理是干员{operator.name}呢'
if count>1:
str+=f",他已经是第{count}次成为您的助理了!\n"
else:
str+="!\n"
ask = Chain(data, at=True).text(str)
return await create_ret_data(data, ask,operator)
async def create_ret_data(data, ask,operator):
skin = random.choice(operator.skins())
skin_path = await ArknightsGameDataResource.get_skin_file(skin)
if not skin_path:
return ask.text('目前还没有该干员的立绘,真是抱歉博士~[face:9]')
else:
relative_path = Path(f"../../../{skin_path}")
log.info(f'skin: {relative_path}')
ask.html(path=f'{curr_dir}/template/wifu.html',
data={"id": "testAlt", "image": quote(f"{relative_path}")}, width=1024)
voices = operator.voices()
if not voices:
log.info(f'No voice file for operator {operator.operator_name}.')
return ask
else:
voice = voices[0]
voice_path = await ArknightsGameDataResource.get_voice_file(operator, voice['voice_title'],'_cn')
if not voice_path:
return ask
else:
return ask.text(voice['voice_text'].replace('{@nickname}',data.nickname)).voice(voice_path)
return ask
# 计算user_id在指定channel_id和wifu_name下的记录count数
def count_in_channel(channel_id, wifu_name, user_id):
return AmiyaBotWifuStatusDataBase.select().where(
(AmiyaBotWifuStatusDataBase.channel_id == channel_id) &
(AmiyaBotWifuStatusDataBase.wifu_name == wifu_name) &
(AmiyaBotWifuStatusDataBase.user_id == user_id)
).count()
# 计算user_id在全部channel_id和指定wifu_name下的记录count数
def count_in_all_channels(wifu_name, user_id):
return AmiyaBotWifuStatusDataBase.select().where(
(AmiyaBotWifuStatusDataBase.wifu_name == wifu_name) &
(AmiyaBotWifuStatusDataBase.user_id == user_id)
).count()
async def show_existing_wifu(data: Message, user_id: int):
wifu_meta: dict = UserInfo.get_meta_value(user_id,'amiyabot-arknights-wifu')
operator_name = wifu_meta['wifu_name']
operators = {}
if not operators:
operators = copy.deepcopy(ArknightsGameData().operators)
operator = operators[operator_name]
# 测试用代码
# AmiyaBotWifuStatusDataBase.create(channel_id=data.channel_id, user_id=data.user_id, wifu_name=operator.name,
# create_at=datetime.date.today())
count = count_in_channel(data.channel_id,operator.name,data.user_id)
str = f'博士,您今天已经选过助理啦,您的助理是干员{operator.name}哦'
if count>1:
str+=f",他已经是第{count}次成为您的助理了呢~"
else:
str+="~"
ask = Chain(data, at=True).text(str)
return await create_ret_data(data,ask,operator)
@bot.on_message(keywords=['选老婆', '抽老婆', '选助理', '抽助理'],level=2)
async def _(data: Message):
return await wifu_action(data)
|
hsyhhssyy/amiyabot-arknights-hsyhhssyy-wifu
|
main.py
|
main.py
|
py
| 6,170 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36229750080
|
from typing import List
'''
剑指 Offer II 119. 最长连续序列 == 128
一般想法是排序再遍历,时间复杂度为O(nlogn)
连续的数会有一个起始数字num,num - 1不在nums数组中
所以找到num - 1 不在nums中的那个数,查询其连续长度
'''
class Solution:
def longestConsecutive(self, nums: List[int]) -> int:
s = set(nums)
maxlen = 0
for num in s:
if num - 1 not in s:
templen = 0
while num in s:
num += 1
templen += 1
maxlen = max(templen, maxlen)
return maxlen
|
z-w-wang/Leetcode-Problemlist
|
FxxkOffer/Graph/Offer_2_119.py
|
Offer_2_119.py
|
py
| 641 |
python
|
en
|
code
| 3 |
github-code
|
6
|
2704750787
|
import os
from pymander.contexts import PrebuiltCommandContext, MultiLineContext, StandardPrompt
from pymander.shortcuts import run_with_context
from pymander.decorators import bind_argparse, bind_regex
class FileWriterContext(MultiLineContext):
FinishedHandler = MultiLineContext.OverOn2EmptyLines
def __init__(self, *args, **kwargs):
self.callback = kwargs.pop('callback', lambda data: None)
self.error = kwargs.pop('error', self.write)
super().__init__(*args, **kwargs)
def on_finished(self):
self.callback(self.buffer)
self.exit()
def prompt(self):
self.write('... ')
def on_cant_execute(self, line):
pass
class FsContext(PrebuiltCommandContext, StandardPrompt):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.current_dir = os.path.abspath('.')
@bind_argparse('cd', ['dirname'])
def cd(self, dirname):
full_dirname = os.path.abspath(os.path.join(self.current_dir, dirname))
if not os.path.exists(full_dirname):
self.write('No such dir: {0}\n'.format(dirname))
return
self.current_dir = full_dirname
@bind_regex('^ls(\s+(?P<dirname>\w+))?')
def ls(self, dirname):
if dirname:
full_dirname = os.path.abspath(os.path.join(self.current_dir, dirname))
else:
full_dirname = self.current_dir
if not os.path.exists(full_dirname):
self.write('No such dir: {0}\n'.format(dirname))
return
if not os.path.isdir(full_dirname):
self.write('{0}\n'.format(dirname))
return
self.write('{0}\n'.format('\n'.join(sorted(os.listdir(full_dirname)))))
@bind_argparse('mkdir', ['dirname'])
def mkdir(self, dirname):
if not os.path.exists(self.current_dir):
self.write('No such dir: {0}\n'.format(dirname))
return
full_dirname = os.path.abspath(os.path.join(self.current_dir, dirname))
os.mkdir(full_dirname)
@bind_argparse('new', ['filename'])
def new(self, filename):
if not os.path.exists(self.current_dir):
self.write('No such dir: {0}\n'.format(filename))
return
full_filename = os.path.abspath(os.path.join(self.current_dir, filename))
if os.path.exists(full_filename):
self.write('{0} already exists!\n'.format(filename))
return
self.write('< Enter content of new file "{0}" (2 empty lines to exit editor)>\n'.format(filename))
def save_to_file(text):
with open(full_filename, 'w') as f:
f.write(text)
return FileWriterContext(callback=save_to_file)
def prompt(self):
self.write('@ {0} > '.format(os.path.basename(self.current_dir)))
if __name__ == '__main__':
run_with_context(FsContext())
|
altvod/pymander
|
examples/fswalk.py
|
fswalk.py
|
py
| 2,906 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43040033841
|
# -*- coding: utf-8 -*-
"""
@author: lucianavarromartin
PRODUCTOR CONSUMIDOR 3(limited)
El almacén ahora tiene espacio infinito, y cada productor tiene k subalmacenes
que pueden estar llenos simultaneamente.
Añadimos el objeto Lock, en este código, para tener un acceso controlado a
los subalmacenes.
El proceso para cuando cada productor ha repuesto el elemento de sus k almacenes,
N veces, después de haber sido consumido por el consumidor.
"""
from multiprocessing import Process, Manager
from multiprocessing import BoundedSemaphore, Semaphore, Lock
from multiprocessing import current_process
from multiprocessing import Array
from time import sleep
import random
N = 3 # Cantidad de productos que puede fabricar cada productor
K = 2 # Cantidad de subalmacenes
NPROD = 3 #Número de productores
def add_data(almacen, pid, data, mutex):
mutex.acquire()
try:
almacen.append(pid*1000 + data)
sleep(1)
finally:
mutex.release()
def productor(almacen, pid, empty, non_empty, mutex):
"""
Cuando el productor produce, añade un elemento a su almacén, entonces se
bloquea el semaforo empty asociado a este y se desbloquea el non_empty.
"""
dato = random.randint(0,5)
for n in range(N):
empty[pid].acquire()
dato += random.randint(0,5)
add_data(almacen, pid, dato, mutex)
print (f"productor {current_process().name} almacenado {dato}")
non_empty[pid].release()
print(f"producer {current_process().name} Ha terminado de producir")
empty[pid].acquire()
sleep(1)
non_empty[pid].release()
def consumidor(almacen, empty, non_empty, mutex):
"""
Cuando el consumidor consume un elemento de uno de los productores este
elemento ya no está en el almacén entonces se bloquea el semaforo non_empty
asociado a este productor y se desbloquea el empty.
"""
for s in non_empty:
s.acquire()
sleep(1)
ordenados = []
while len(ordenados) < NPROD * N:
numeros = []
lista_posicion = []
for i in range(len(almacen)):
if almacen[i] >= 0:
numeros.append(almacen[i] % 1000)
lista_posicion.append(almacen[i]//1000)
if numeros == []:
break
dato = min(numeros)
posicion = lista_posicion[numeros.index(dato)]
posicion_almacen = almacen[:].index(dato + posicion * 1000)
almacen[posicion_almacen]= -2
ordenados.append(dato)
empty[posicion].release()
print (f"consumidor {current_process().name} consumiendo {dato}")
non_empty[posicion].acquire()
print(ordenados)
def main():
manager = Manager()
almacen = manager.list()
non_empty = [Semaphore(0) for i in range (NPROD)]
empty = [BoundedSemaphore(K) for _ in range (NPROD)]
mutex = Lock()
prodlst = [Process(target=productor,
name=f'prod_{i}',
args=(almacen, i, empty, non_empty, mutex))
for i in range(NPROD)]
cons = [ Process(target=consumidor,
name=f'cons',
args=(almacen, empty, non_empty, mutex))]
for p in prodlst + cons:
p.start()
for p in prodlst + cons:
p.join()
if __name__ == '__main__':
main()
|
lucnav01/ProductorConsumidor
|
ProductorConsumidor3NavarroMartinLucia.py
|
ProductorConsumidor3NavarroMartinLucia.py
|
py
| 3,451 |
python
|
es
|
code
| 0 |
github-code
|
6
|
8515938890
|
import numpy as np
from matplotlib import cm
import matplotlib.pyplot as plt
from matplotlib import gridspec
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from pandas import unique
import csv
import h5py
from astropy import constants as const
from astropy import units as u
I_units = u.erg*u.cm**(-2)*u.s**(-1)*u.Hz**(-1)*u.sr**(-1)
h = const.h
c = const.c
kB = const.k_B
data_path = "/mn/stornext/d20/RoCS/atulm/Project1_stars/SED_data/"
SED_path = data_path + "Clean_SED_data/"
path_art = "/mn/stornext/d19/RoCS/jonasrth/ART/SED/"
def get_star_data(star_name):
"""
Collects necessary data on the stars to compare them with model SED.
star_name (str) must fit with one of the 12 stars compiled in the EMISSA project.
"""
# Collecting SED data for star_name
filename = star_name + "_CleanSED.csv"
with open(SED_path+filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
csv_array = np.array(list(csv_reader))
n_freq = np.argwhere(csv_array[0]=="sed_freq")[0][0]
n_flux = np.argwhere(csv_array[0]=="sed_flux")[0][0]
n_eflux = np.argwhere(csv_array[0]=="sed_eflux")[0][0]
n_tab = np.argwhere(csv_array[0]=="_tabname")[0][0]
sed_freq = csv_array[1:,n_freq].astype(np.float64)*u.GHz
sed_flux = csv_array[1:,n_flux].astype(np.float64)*u.Jy
sed_eflux = csv_array[1:,n_eflux].astype(np.float64)*u.Jy
tabname = csv_array[1:,n_tab]
# Collecting radius and distance data for star_name
with open(data_path + "star_props.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
csv_array = np.array(list(csv_reader))
n_d = np.argwhere(csv_array[0]==" Distance (pc)")[0][0]
n_ed = np.argwhere(csv_array[0]==" Dist_err")[0][0]
n_R = np.argwhere(csv_array[0]=="Radius (Rs)")[0][0]
n_eR = np.argwhere(csv_array[0]=="Rad_err")[0][0]
m = np.argwhere(csv_array[:,1]==star_name.replace("_"," "))[0][0]
d = float(csv_array[m, n_d])*u.pc
d_err = float(csv_array[m, n_ed])*u.pc
R = float(csv_array[m, n_R])*const.R_sun
R_err = float(csv_array[m, n_eR])*const.R_sun
# Returning collected data in dictionary:
data = {}
data["sed_freq"] = sed_freq
data["sed_flux"] = sed_flux
data["sed_eflux"] = sed_eflux
data["tabname"] = tabname
data["d"] = d
data["d_err"] = d_err
data["R"] = R
data["R_err"] = R_err
return data
def plot_SED(star_name, model_data, figname):
"""
"""
data = get_star_data(star_name)
mod_int = np.array([])
mod_wav = np.array([])
for file in model_data.values():
mod_int = np.append(mod_int, np.mean(np.array(file["Stokes_I"][0,...]),axis=(0,1)))
mod_wav = np.append(mod_wav, np.array(file["Wavelength"]))
mod_freq = (c/(mod_wav*u.angstrom)).to(u.GHz)
mod_freq, inds = np.unique(mod_freq, return_index=True)
mod_int = mod_int[inds]*I_units
mod_flux = (np.pi*(data["R"]/data["d"])**2*mod_int*u.sr).to(u.Jy)
mod_flux_max = (np.pi*((data["R"]+data["R_err"])/(data["d"]-data["d_err"]))**2*mod_int*u.sr).to(u.Jy)
mod_flux_min = (np.pi*((data["R"]-data["R_err"])/(data["d"]+data["d_err"]))**2*mod_int*u.sr).to(u.Jy)
a = np.argmin(abs(mod_freq.value - np.min(data["sed_freq"]).value)) - 1
b = np.argmin(abs(mod_freq.value - np.max(data["sed_freq"]).value)) + 1
#a = np.argmin(abs(mod_freq.value - 7)) - 1
### Interpolation
f = interp1d(mod_freq, mod_flux)
### Plotting:
fig = plt.figure(figsize=(8,6.4))
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], hspace=0)
ax0 = fig.add_subplot(gs[0])
#plt.suptitle(star_name.replace("_"," "), x=0.5, y=1.0)
ax0.text(0.1, 0.9, star_name.replace("_"," "), transform=ax0.transAxes)
cmap = cm.get_cmap("gnuplot")
gradient = np.linspace(0, 1, len(unique(data["tabname"])))
for it,tab in enumerate(unique(data["tabname"])):
n = np.argwhere(data["tabname"]==tab)[:,0]
ax0.errorbar(data["sed_freq"][n].value, data["sed_flux"][n].value, yerr=data["sed_eflux"][n].value,
color=cmap(gradient[it]), ls="None", marker="o", label=tab)
if tab=="ALMA data":
n_ALMA = it
ax0.plot(mod_freq[a:b], mod_flux[a:b], color="black", ls="solid", label="model data")
ax0.fill_between(mod_freq[a:b], y1=mod_flux_max[a:b], y2=mod_flux_min[a:b], color="grey", alpha=0.5)
handles, labels = ax0.get_legend_handles_labels()
legd=ax0.legend([handles[n_ALMA+1], handles[0]], [labels[n_ALMA+1], labels[0]], loc="lower center", bbox_to_anchor=(0.5,1.01), ncol=5)
ax0.axvspan(0,1e3, color="grey", alpha=0.2)
ax0.set_ylabel("Flux [Jy]")
ax0.xaxis.grid(which="both")
ax0.yaxis.grid(which="major")
ax0.set_yscale("log")
ax1 = fig.add_subplot(gs[1], sharex=ax0)
ax1.errorbar(data["sed_freq"], (data["sed_flux"].value - f(data["sed_freq"]))/f(data["sed_freq"]),
color="black", ls="None", marker="o")
ax1.axvspan(0,1e3, color="grey", alpha=0.2)
ax1.set_ylabel(r"$\Delta S/S_{mod}$")
ax1.set_xlabel("Frequency [GHz]")
ax1.xaxis.grid(which="both")
ax1.yaxis.grid(which="major")
ax1.set_xscale("log")
plt.setp(ax0.get_xticklabels(),visible=False)
plt.savefig("figures/" + figname, bbox_inches="tight")
star_name_list = ["Gam_Vir_A", "Gam_Vir_B", "Eta_Crv", "Gam_Lep", "Alf_Cen_A", "61_Vir", "Alf_Cen_B", "Eps_Eri", "GJ_2006_A", "Proxima_Cen"]
star_letter_list = ["C", "D", "E", "F", "G", "H", "I", "J", "K", "L"]
model_name_list = ["t65", "t65", "t65", "t65", "t57", "t57", "t50", "t50", "t32", "t32"]
for i in range(len(star_name_list)):
print(star_letter_list[i] + ": " +star_name_list[i]+" - "+model_name_list[i])
SEDs = {"t65" : {0 : h5py.File(path_art + "d3t65g45_000G_SED.h5","r")},
"t57" : {0 : h5py.File(path_art + "d3t57g44_000G_SED.h5","r")},
"t50" : {0 : h5py.File(path_art + "d3t50g45_000G_SED.h5","r")},
"t32" : {0 : h5py.File(path_art + "d3t32g45_000G_SED.h5","r")}
}
for i in range(len(star_name_list)):
figname = "EMISSA/SED_"+star_letter_list[i]+"_"+model_name_list[i]+".pdf"
plot_SED(star_name_list[i], SEDs[model_name_list[i]], figname)
#for i in range(len(star_name_list)):
# figname = "presentation/SED_"+star_letter_list[i]+"_"+model_name_list[i]+".pdf"
# plot_SED(star_name_list[i], SEDs[model_name_list[i]], figname)
|
jonasrth/MSc-plots
|
SED_EMISSA_plots.py
|
SED_EMISSA_plots.py
|
py
| 6,655 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71144565947
|
#!/usr/bin/env python3
from dotenv import load_dotenv
from pet_posts import bot
import logging
import os
def main():
load_dotenv() # take environment variables from .env.
api_token = os.getenv("API_TOKEN")
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
updater = bot.init(api_token)
bot.configure(updater.dispatcher)
bot.run(updater)
if __name__ == "__main__":
main()
|
dawngerpony/pet-posts
|
app.py
|
app.py
|
py
| 483 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32628012198
|
import pandas as pd
import glob
from datetime import datetime, timedelta
# Leitura dos arquivos da pasta dataset
def readCSV():
listCSV = []
namePath = 'dataset'
# Select all csv in folder selected
namesFiles = glob.glob(namePath + "/*.csv")
# join all them
for filename in namesFiles:
df = pd.read_csv(filename, sep=';')
dfMask = df['codmun'].isnull()
filtered_df = df[dfMask]
listCSV.append(filtered_df)
frame = pd.concat(listCSV, axis=0, ignore_index=True)
frame['data'] = pd.to_datetime(frame['data']) # .dt.strftime('%d/%m/%Y')
return frame
def itensCalculate(df, date, dateStart, uf):
all = []
mask = df['data'] == date.strftime('%Y-%m-%d')
dfAux = df[mask]
# Date
all.append(date)
# State
if uf == 76:
all.append('Brasil')
else:
all.append(df['estado'].iloc[0])
# CasosAcumulado
all.append(int(dfAux['casosAcumulado'].iloc[0]))
# MediaMovelCasosAtual, MediaMovelCasosAnterior, Situação, Porcentagem
for i in movingAverage(df, date, dateStart, 0):
all.append(i)
# ObitosAcumulados
all.append(dfAux['obitosAcumulado'].iloc[0])
# MediaMovelObtitosAtual, MediaMovelObitosAnterior, Situação, Porcentagem
for j in movingAverage(df, date, dateStart, 1):
all.append(j)
return all
# number = 0 -> Casos or number != 0 -> Óbitos
def movingAverage(df, date, dateStart, number):
all = []
if number == 0:
dfAux = df[['data', 'casosAcumulado']]
else:
dfAux = df[['data', 'obitosAcumulado']]
# MediaMovelAtual
mean_today = averageCall(df, date, dateStart, number)
# MediaMovelAnterior
mean_before = averageCall(df, date - timedelta(days=1), dateStart, number)
all.append(int(mean_today))
all.append(int(mean_before))
# Situação e Porcentagem of each moving-average
if mean_before == 0:
if mean_today != 0:
all.append('Aumento')
all.append(100)
else:
all.append('Estabilidade')
all.append('-')
elif mean_today/mean_before > 1:
all.append('Aumento')
all.append(round(((mean_today/mean_before - 1)*100), 4))
elif mean_today/mean_before < 1:
all.append('Diminuicao')
all.append(round(abs(mean_today/mean_before - 1)*100, 4))
else:
all.append('Estabilidade')
all.append(round((mean_today/mean_before - 1)*100, 4))
return all
def averageCall(df, date, dateStart, number):
colum = ''
if number == 0:
colum = 'casosNovos'
else:
colum = 'obitosNovos'
# First 7 days
if date.strftime('%Y-%m-%d') < (dateStart + timedelta(days=7)).strftime('%Y-%m-%d'):
mask = (df['data'] <= date.strftime('%Y-%m-%d'))
dfAux = df[mask]
return dfAux[colum].sum()/7
# After
else:
# Select part of dataframe that need to calculate mean
mask = (df['data'] <= date.strftime('%Y-%m-%d')) & (df['data'] > (date - timedelta(days=7)).strftime('%Y-%m-%d'))
dfAux = df[mask]
return dfAux[colum].mean()
|
lfmaster780/dataCovid
|
utils.py
|
utils.py
|
py
| 3,138 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20363546350
|
#%%
from dataclasses import dataclass, field
from functools import wraps
from typing import List, Optional, Protocol, Union
import time
from .controller import Controller
from . import commands
from .acceptance_scheme import AcceptanceScheme, UnconditionalAcceptance
from .scattering_simulation import ScatteringSimulation
from .box_simulation import Box
def timeit(my_func):
@wraps(my_func)
def timed(*args, **kw):
tstart = time.time()
output = my_func(*args, **kw)
tend = time.time()
print(f"{my_func.__name__} took {(tend - tstart)} seconds to execute")
return output
return timed
CommandOrAcceptableCommand = Union[commands.Command, commands.AcceptableCommand]
def decorate_command(command: CommandOrAcceptableCommand) -> commands.AcceptableCommand:
if isinstance(command, commands.AcceptableCommand):
return command
if isinstance(command, commands.Command):
return commands.AcceptableCommand(
base_command=command,
acceptance_scheme=UnconditionalAcceptance()
)
class Evaluator(Protocol):
def evaluate(self, command: CommandOrAcceptableCommand) -> bool:
pass
@dataclass
class Simulator:
controller: Controller
evaluator: Evaluator
@timeit
def simulate(self):
controller = self.controller
for command in controller.ledger:
controller.action()
controller.compute_states()
self.evaluator.evaluate(command)
class Viewer(Protocol):
def show_view(simulation: ScatteringSimulation, command: CommandOrAcceptableCommand, acc_scheme: AcceptanceScheme) -> None:
pass
@dataclass
class MonteCarloEvaluator:
simulation: ScatteringSimulation
viewer: Optional[Viewer] = None
def _show_view(self, command: CommandOrAcceptableCommand, acc_scheme: AcceptanceScheme) -> None:
if self.viewer:
self.viewer.show_view(self.simulation, command, acc_scheme)
def evaluate(self, command: CommandOrAcceptableCommand) -> bool:
acceptable_command = decorate_command(command)
acceptable_command.handle_simulation(self.simulation)
acc_scheme = acceptable_command.acceptance_scheme
self._show_view(command, acc_scheme)
return acc_scheme.is_acceptable()
@dataclass
class MemorizedSimulator(Simulator):
simulation: ScatteringSimulation
box_list: List[Box]
state_command: commands.Command = field(init = False, default_factory=lambda : None)
def compute_states(self) -> None:
if self.state_command:
self.state_command.execute()
else:
self.controller.compute_states()
def simulate_command(self, controller: Controller, command: CommandOrAcceptableCommand) -> None:
controller.action()
self.compute_states()
command.execute()
acceptable = self.evaluator.evaluate(command)
if acceptable:
self.state_command = commands.SetSimulationState.gen_from_simulation(self.simulation.simulation_params, self.box_list)
@timeit
def simulate(self) -> None:
controller = self.controller
for command in controller.ledger:
self.simulate_command(controller=controller, command=command)
if __name__ == "__main__":
pass
#%%
|
lestercbarnsley/SasRMC
|
sas_rmc/simulator.py
|
simulator.py
|
py
| 3,355 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70941003388
|
'''
Created on 8/03/2016
@author: EJArizaR
'''
import unittest
from apps.DaneUsers.tests.test_base import test_base
from django.core.urlresolvers import reverse
class IsUsernameRegisteredTest(test_base):
def setUp(self):
test_base.setUp(self)
def test_returns_False_if_user_doesnt_exist(self):
response = self.client.get(reverse('DaneUsers:isUsernameRegistered'),{"username":"[email protected]"})
self.assertEqual(response.content, "False")
def test_returns_True_if_exists(self):
self.create_user()
response = self.client.get(reverse('DaneUsers:isUsernameRegistered'),{"username":"[email protected]"})
self.assertEqual(response.content, "True")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
diegopuerto/kiosco_universitario
|
source/apps/DaneUsers/tests/test_is_username_registered.py
|
test_is_username_registered.py
|
py
| 852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15430780258
|
adjacency_matrix = {1: [2, 3], 2: [4, 5],
3: [5], 4: [6], 5: [6],
6: [7], 7: []}
## Non-recursive
def dfs(graph, start):
"""
All possible connected vertices
"""
stack,path = [start],[]
while stack:
ele = stack.pop()
if ele in path:
continue
else:
path.append(ele)
for neighbours in graph[ele]:
stack.append(neighbours)
return path
print(dfs(adjacency_matrix,1))
def dfs_recur(graph,start,path):
#print("step", path)
path.append(start)
for neighbour in graph[start]:
print("neighbour",neighbour)
if neighbour not in path:
path = dfs_recur(graph,neighbour,path)
return path
print("recursive",dfs_recur(adjacency_matrix,1,[]))
|
dipalira/LeetCode
|
Arrays/dfs.py
|
dfs.py
|
py
| 709 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26804339781
|
numeral_values = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
for case in range(int(input())):
expression = input()
numerals = expression.replace("+", " ").replace("=", " ").split()
value = 0
for num in numerals:
for idx in range(len(num)):
if ( (idx + 3 < len(num) and
all(num[idx + shift_idx] == num[idx + shift_idx + 1]
for shift_idx in range(len(num) - 1))) or
(idx + 1 < len(num) and
numeral_values[num[idx + 1]] > numeral_values[num[idx]])):
value -= numeral_values[num[idx]]
continue
value += numeral_values[num[idx]]
if value > 1000:
print(expression + "CONCORDIA CUM VERITATE")
continue
fives = ['V', 'L', 'D', '']
ones = ['I', 'X', 'C', 'M']
digits = str(value)
result = ""
for idx in range(len(digits)):
digit = int(digits[idx])
idx = len(digits) - 1 - idx
if digit <= 3: result += ones[idx] * digit
elif digit <= 5: result += ones[idx] * (5 - digit) + fives[idx]
elif digit <= 8: result += fives[idx] + ones[idx] * (digit - 5)
else: result += ones[idx] * (10 - digit) + ones[idx + 1]
print(expression + result)
|
Stevan-Zhuang/DMOJ
|
CCC/CCC '96 S4 - When in Rome.py
|
CCC '96 S4 - When in Rome.py
|
py
| 1,321 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21884803180
|
def split_article_dict(di:dict) :
"""
Mandatory info :
@author : str
@year : int
@title : str
@journal : str
@volume : str
Optional Info :
@doi : str
@issn : str
@issue : int
@page : str
@url : str
"""
art_mandatory_key = ["author", "year", "title", "journal"]
art_optional_key = ["doi", "issn", "issue", "page", "url", "volume"]
mandatory_ = {}
for m_key in art_mandatory_key :
try :
m_val = di[m_key]
mandatory_[m_key] = m_val
except KeyError as ke :
print(di['title'], "is missing mandatory key %s" % (ke))
optional_ = {}
for o_key in art_optional_key :
o_val = di.get(o_key, None)
if o_val != None :
optional_[o_key] = o_val
return mandatory_, optional_
def split_inproceeding_dict(di:dict) :
"""
Mandatory info :
@author : str
@year : int
@title : str
@journal : str
Optional Info :
@page : str
"""
inpr_mandatory_key = ["author", "year", "title", "journal"]
inpr_optional_key = ["doi", "issn", "issue", "page", "url"]
mandatory_ = {}
for m_key in inpr_mandatory_key :
try :
m_val = di[m_key]
mandatory_[m_key] = m_val
except KeyError as ke :
print(di['title'], "is missing mandatory key %s" % (ke))
optional_ = {}
for o_key in inpr_optional_key :
o_val = di.get(o_key, None)
if o_val != None :
optional_[o_key] = o_val
return mandatory_, optional_
def split_book_dict(di:dict) :
book_mandatory_key = ["author", "year", "title", "publisher"]
book_optional_key = ["country", "city"]
mandatory_ = {}
for m_key in book_mandatory_key :
try :
m_val = di[m_key]
mandatory_[m_key] = m_val
except KeyError as ke :
print(di['title'], "is missing mandatory key %s" % (ke))
optional_ = {}
for o_key in book_optional_key :
o_val = di.get(o_key, None)
if o_val != None :
optional_[o_key] = o_val
return mandatory_, optional_
def _name_abrv(name:str) :
special_word = ['de']
abbrv_name = name.split(' ')
word_amount = len(abbrv_name)
res = ''
for i in range(word_amount):
if i < word_amount-1 :
if abbrv_name[i] in special_word :
res = '%s %s' % (res, abbrv_name[i])
else :
ab_word = abbrv_name[i][:1] + "."
res = '%s %s' % (res, ab_word)
else :
res = '%s %s' % (res, abbrv_name[i])
return res[1::]
def author_formatter(author:str) :
# parsing based on ieee standard
#
res = author.split(" and ")
author_amnt = len(res)
# replace other in author list
replacement_for = 'et al.'
temp = None
if author_amnt == 1 :
# one author
temp = _name_abrv(res[0])
elif author_amnt == 2 :
# two author
temp = _name_abrv(res[0])
if res[1].find('others')>= 0 :
temp = "%s %s" % (temp, replacement_for)
else :
temp = "%s and %s" % (temp, _name_abrv(res[1]))
elif author_amnt >= 3 or author_amnt <= 6 :
# 3-6 author
temp = _name_abrv(res[0])
for i in range(1, author_amnt) :
if i == author_amnt -1 :
if res[i].find('others')>= 0 :
temp = "%s %s" % (temp, replacement_for)
else :
temp = "%s and %s" % (temp, _name_abrv(res[i]))
else :
if res[i].find('others')>= 0 :
temp = "%s %s" % (temp, replacement_for)
else :
temp = "%s, %s" % (temp, _name_abrv(res[i]))
else :
# more than 6 author
temp = _name_abrv(res[0])
temp = "%s %s" % (temp, replacement_for)
return temp
|
linkv12/bib2md
|
reff_parser.py
|
reff_parser.py
|
py
| 3,976 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37429761663
|
import os
from bs4 import BeautifulSoup
import requests
import requests.exceptions
import urllib.parse
from collections import deque
import re
# Create the directory to store the scraped data if it does not already exist
if not os.path.exists("scraped_data"):
os.makedirs("scraped_data")
user_url = str(input('[+] Enter Target URL To Scan: '))
urls = deque([user_url])
scraped_urls = set()
emails = set()
phone_numbers = set()
count = 0
try:
while len(urls):
count += 1
if count == 100:
break
url = urls.popleft()
scraped_urls.add(url)
parts = urllib.parse.urlsplit(url)
base_url = '{0.scheme}://{0.netloc}'.format(parts)
path = url[:url.rfind('/')+1] if '/' in parts.path else url
print('[%d] Processing %s' % (count, url))
try:
response = requests.get(url)
except (requests.exceptions.MissingSchema, requests.exceptions.ConnectionError):
continue
new_emails = set(re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", response.text, re.I))
emails.update(new_emails)
new_phone_numbers = set(re.findall(r"\b\d{3}[-.]?\d{3}[-.]?\d{4}\b", response.text))
phone_numbers.update(new_phone_numbers)
soup = BeautifulSoup(response.text, features="lxml")
for anchor in soup.find_all("a"):
link = anchor.attrs['href'] if 'href' in anchor.attrs else ''
if link.startswith('/'):
link = base_url + link
elif not link.startswith('http'):
link = path + link
if not link in urls and not link in scraped_urls:
urls.append(link)
except KeyboardInterrupt:
print('[-] Closing!')
# Create a file to store the scraped email addresses
with open("scraped_data/emails.txt", "w") as f:
print("[+] Scraped Emails:")
for email in emails:
f.write(email + "\n")
print(email)
# Create a file to store the scraped phone numbers
with open("scraped_data/phone_numbers.txt", "w") as f:
print("\n[+] Scraped Phone Numbers:")
for phone_number in phone_numbers:
f.write(phone_number + "\n")
print(phone_number)
print("\n[+] Scraped data saved in 'scraped_data' folder.")
|
opemi-aa/email_phone_scrape
|
email_phone_scrape.py
|
email_phone_scrape.py
|
py
| 2,267 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34780751946
|
import datetime
import csv
import re
from Classes import Contact
contact_list = list()
contact_list_csv = "contact_list.csv"
# Создание нового контакта и запись его в csv файл
def create_contact():
print("Для того чтобы пропустить пункт и оставить его пустым введите: _")
new_contact = Contact("", "", "", "", "", "")
# Ввод имени
while True:
try:
new_contact.first_name = input("Имя: ")
except ValueError:
print("Неверный формат имени, присутствуют недопустимые символы")
else:
break
# Ввод фамилии
while True:
try:
new_contact.last_name = input("Фамилия: ")
except ValueError:
print("Неверный формат фамилии, присутствуют недопустимые символы")
else:
break
# Ввод даты рождения
while True:
try:
new_contact.birth_date = datetime.datetime \
.strptime(input("Дата рождения в формате ДД.ММ.ГГГГ: "), "%d.%m.%Y").date()
except ValueError:
print("Неверный формат даты или дата не может быть позднее текущего дня")
else:
break
# Ввод наименования компании
new_contact.company_name = input("Компания: ")
# Ввод E-Mail
while True:
try:
new_contact.email = input("E-Mail: ")
except ValueError:
print("Неверный формат E-Mail")
else:
break
# Ввод номера телефона
while True:
try:
new_contact.phone_number = input("Номер телефона в формате +7(___)___-__-__: ")
except ValueError:
print("Неверный формат номера")
else:
break
contact_list.append(new_contact)
# Создание словаря из контакта
contact_dict = {"first name": new_contact.first_name, "last name": new_contact.last_name,
"birth date": new_contact.birth_date, "company name": new_contact.company_name,
"email": new_contact.email, "phone number": new_contact.phone_number}
# Добавление записи в csv
with open(contact_list_csv, "a", newline="") as file:
columns = ["first name", "last name", "birth date", "company name", "email", "phone number"]
data_writer = csv.DictWriter(file, fieldnames=columns)
# writer.writeheader()
data_writer.writerow(contact_dict)
# Загрузка списка контактов из csv
def load_contact_list():
with open(contact_list_csv) as file:
data_reader = csv.DictReader(file)
for line in data_reader:
contact_list.append(Contact(line["first name"], line["last name"],
line["birth date"], line["company name"],
line["email"], line["phone number"]))
# Отображение списка контактов
def show_contact_list():
for contact in contact_list:
print(contact)
# Поиск по имени
def find_by_first_name(name):
regex = r"(?i)\b{}".format(name)
counter = 0
for contact in contact_list:
if re.search(regex, contact.first_name):
print(contact)
counter += 1
print("Найдено: {}".format(counter))
# Поиск по фамилии
def find_by_last_name(name):
regex = r"(?i)\b{}".format(name)
counter = 0
for contact in contact_list:
if re.search(regex, contact.last_name):
print(contact)
counter += 1
print("Найдено: {}".format(counter))
# Полная очистка списка контактов/восстановление contact_list.csv
def clear_contact_list():
with open(contact_list_csv, "w", newline="") as file:
columns = ["first name", "last name", "birth date", "company name", "email", "phone number"]
data_writer = csv.DictWriter(file, fieldnames=columns)
data_writer.writeheader()
contact_list.clear()
print("Файл contact_list.csv был сброшен, список контактов очищен.")
# Вызов команд для работы со списком контактов
def command_dialog():
print("help - для вызова справки")
while True:
command = input(">>> ")
if command == "show":
show_contact_list()
if command == "create":
create_contact()
if command == "find_fn":
find_by_first_name(input("Искать по имени:"))
if command == "find_ln":
find_by_last_name(input("Искать по фамилии:"))
if command == "clear":
clear_contact_list()
if command == "quit":
break
if command == "help":
print("Список команд:\nshow - показать список контактов\ncreate - добавить контакт\n"
"find_fn - поиск по имени\nfind_ln - поиск по фамилии\nclear - полная очистка списка контактов\n"
"quit - выйти из программы\nhelp - справка")
|
NAS371/contactListTestWork
|
Program.py
|
Program.py
|
py
| 5,649 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
19240299148
|
import numpy as np
import torch
import random
import time
smp = torch.nn.Softmax(dim=0)
smt = torch.nn.Softmax(dim=1)
def get_T_global_min(args, record, max_step = None, T0 = None, p0 = None, lr = 0.1, NumTest = None, all_point_cnt = 15000):
if max_step is None:
max_step = args.max_iter
if NumTest is None:
NumTest = args.G
KINDS = args.num_classes
all_point_cnt = np.min((all_point_cnt,int(len(record)*0.9)))
print(f'Sample {all_point_cnt} instances in each round')
p_estimate = [[] for _ in range(3)]
p_estimate[0] = torch.zeros(KINDS)
p_estimate[1] = torch.zeros(KINDS, KINDS)
p_estimate[2] = torch.zeros(KINDS, KINDS, KINDS)
for idx in range(NumTest):
print(idx, flush=True)
sel_loc = np.random.permutation(record.shape[1])[:3]
record_sel = record[:, sel_loc]
# print(f'sel_loc is {sel_loc}')
cnt_y_3 = count_y_known2nn(KINDS, record_sel, all_point_cnt)
for i in range(3):
cnt_y_3[i] /= all_point_cnt
p_estimate[i] = p_estimate[i] + cnt_y_3[i] if idx != 0 else cnt_y_3[i]
for j in range(3):
p_estimate[j] = p_estimate[j] / NumTest
args.device = set_device()
loss_min, E_calc, P_calc, T_init = calc_func(KINDS, p_estimate, False, args.device, max_step, T0, p0, lr = lr)
E_calc = E_calc.cpu().numpy()
P_calc = P_calc.cpu().numpy()
return E_calc, P_calc
def error(T, T_true):
error = np.sum(np.abs(T-T_true)) / np.sum(np.abs(T_true))
return error
def set_device():
if torch.cuda.is_available():
_device = torch.device("cuda")
else:
_device = torch.device("cpu")
print(f'Current device is {_device}', flush=True)
return _device
def distCosine(x, y):
"""
:param x: m x k array
:param y: n x k array
:return: m x n array
"""
xx = np.sum(x ** 2, axis=1) ** 0.5
x = x / xx[:, np.newaxis]
yy = np.sum(y ** 2, axis=1) ** 0.5
y = y / yy[:, np.newaxis]
dist = 1 - np.dot(x, y.transpose()) # 1 - cosine distance
return dist
def count_real(KINDS, T, P, mode, _device = 'cpu'):
# time1 = time.time()
P = P.reshape((KINDS, 1))
p_real = [[] for _ in range(3)]
p_real[0] = torch.mm(T.transpose(0, 1), P).transpose(0, 1)
# p_real[2] = torch.zeros((KINDS, KINDS, KINDS)).to(_device)
p_real[2] = torch.zeros((KINDS, KINDS, KINDS))
temp33 = torch.tensor([])
for i in range(KINDS):
Ti = torch.cat((T[:, i:], T[:, :i]), 1)
temp2 = torch.mm((T * Ti).transpose(0, 1), P)
p_real[1] = torch.cat([p_real[1], temp2], 1) if i != 0 else temp2
for j in range(KINDS):
Tj = torch.cat((T[:, j:], T[:, :j]), 1)
temp3 = torch.mm((T * Ti * Tj).transpose(0, 1), P)
temp33 = torch.cat([temp33, temp3], 1) if j != 0 else temp3
# adjust the order of the output (N*N*N), keeping consistent with p_estimate
t3 = []
for p3 in range(KINDS):
t3 = torch.cat((temp33[p3, KINDS - p3:], temp33[p3, :KINDS - p3]))
temp33[p3] = t3
if mode == -1:
for r in range(KINDS):
p_real[2][r][(i+r+KINDS)%KINDS] = temp33[r]
else:
p_real[2][mode][(i + mode + KINDS) % KINDS] = temp33[mode]
temp = [] # adjust the order of the output (N*N), keeping consistent with p_estimate
for p1 in range(KINDS):
temp = torch.cat((p_real[1][p1, KINDS-p1:], p_real[1][p1, :KINDS-p1]))
p_real[1][p1] = temp
return p_real
def func(KINDS, p_estimate, T_out, P_out, N,step, LOCAL, _device):
eps = 1e-2
eps2 = 1e-8
eps3 = 1e-5
loss = torch.tensor(0.0).to(_device) # define the loss
P = smp(P_out)
# loss = loss + 0.1*torch.norm(P.view(-1) - torch.tensor([0.51441996, 0.34073234, 0.08246922, 0.06237848]))
# loss = loss + 0.1 * torch.norm(P[3]-0.1) + 0.1 * torch.norm(P[2]-0.1)
# P = P_out
T = smt(T_out)
mode = random.randint(0, KINDS-1)
mode = -1
# Borrow p_ The calculation method of real is to calculate the temporary values of T and P at this time: N, N*N, N*N*N
p_temp = count_real(KINDS, T.to(torch.device("cpu")), P.to(torch.device("cpu")), mode, _device)
weight = [1.0,1.0,1.0]
# weight = [2.0,1.0,1.0]
for j in range(3): # || P1 || + || P2 || + || P3 ||
p_temp[j] = p_temp[j].to(_device)
loss += weight[j] * torch.norm(p_estimate[j] - p_temp[j]) #/ np.sqrt(N**j)
if step > 100 and LOCAL and KINDS != 100:
loss += torch.mean(torch.log(P+eps))/10
return loss
def calc_func(KINDS, p_estimate, LOCAL, _device, max_step = 501, T0=None, p0 = None, lr = 0.1):
# init
# _device = torch.device("cpu")
N = KINDS
eps = 1e-8
if T0 is None:
T = 1 * torch.eye(N) - torch.ones((N,N))
# T[-1] = torch.ones(N)
else:
T = T0
if p0 is None:
P = torch.ones((N, 1), device = None) / N + torch.rand((N,1), device = None)*0.1 # P:0-9 distribution
# P[2:] -= 5.0
# P = torch.tensor([0.4,0.4,0.1,0.1])
else:
P = p0
T = T.to(_device)
P = P.to(_device)
p_estimate = [item.to(_device) for item in p_estimate]
print(f'using {_device} to solve equations')
T.requires_grad = True
P.requires_grad = True
optimizer = torch.optim.Adam([T, P], lr = lr)
# train
loss_min = 100.0
T_rec = torch.zeros_like(T)
P_rec = torch.zeros_like(P)
time1 = time.time()
for step in range(max_step):
if step:
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = func(KINDS, p_estimate, T, P, N,step, LOCAL, _device)
if loss < loss_min and step > 5:
loss_min = loss.detach()
T_rec = T.detach()
P_rec = P.detach()
# if step % 100 == 0:
# print('loss {}'.format(loss))
# print(f'step: {step} time_cost: {time.time() - time1}')
# print(f'T {np.round(smt(T.cpu()).detach().numpy()*100,1)}', flush=True)
# print(f'P {np.round(smp(P.cpu().view(-1)).detach().numpy()*100,1)}', flush=True)
# # print(f'P {np.round((P.cpu().view(-1)).detach().numpy()*100,1)}', flush=True)
# time1 = time.time()
return loss_min, smt(T_rec).detach(), smp(P_rec).detach(), T_rec.detach()
def count_y(KINDS, feat_cord, label, cluster_sum):
# feat_cord = torch.tensor(final_feat)
cnt = [[] for _ in range(3)]
cnt[0] = torch.zeros(KINDS)
cnt[1] = torch.zeros(KINDS, KINDS)
cnt[2] = torch.zeros(KINDS, KINDS, KINDS)
feat_cord = feat_cord.cpu().numpy()
dist = distCosine(feat_cord, feat_cord)
max_val = np.max(dist)
am = np.argmin(dist,axis=1)
for i in range(cluster_sum):
dist[i][am[i]] = 10000.0 + max_val
min_dis_id = np.argmin(dist,axis=1)
for i in range(cluster_sum):
dist[i][min_dis_id[i]] = 10000.0 + max_val
min_dis_id2 = np.argmin(dist,axis=1)
for x1 in range(cluster_sum):
cnt[0][label[x1]] += 1
cnt[1][label[x1]][label[min_dis_id[x1]]] += 1
cnt[2][label[x1]][label[min_dis_id[x1]]][label[min_dis_id2[x1]]] += 1
return cnt
def count_y_known2nn(KINDS, label_list, cluster_sum=None):
if cluster_sum is not None:
sample = np.random.choice(range(label_list.shape[0]), cluster_sum, replace=False)
label_list = label_list[sample]
cnt = [[] for _ in range(3)]
cnt[0] = torch.zeros(KINDS)
cnt[1] = torch.zeros(KINDS, KINDS)
cnt[2] = torch.zeros(KINDS, KINDS, KINDS)
for i in range(cluster_sum):
cnt[0][label_list[i][0]] += 1
cnt[1][label_list[i][0]][label_list[i][1]] += 1
cnt[2][label_list[i][0]][label_list[i][1]][label_list[i][2]] += 1
return cnt
|
UCSC-REAL/fair-eval
|
hoc.py
|
hoc.py
|
py
| 7,838 |
python
|
en
|
code
| 5 |
github-code
|
6
|
32995735130
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
## We'll be doing this from scratch, so all imports will come from
## the Python standard library or 3rd-party tools
import socket
import struct
import base64
import json
import hashlib
import time
import enum
import xml.etree.ElementTree as ET
from enum import Enum
import pandas as pd
import password_obfuscation as obf
# # iRODS Protocol Cookbook
#
# This notebook will provide example implementations of key
# operations in the iRODS protocol. Read from the beginnging or use this table of contents to skip to the section that interests you. Once you've jumped to that spot, make sure the cell with the anchor is selected and run `Cell > Run All Above`.
#
# ## Table of Contents
#
# * [Handshake](#handshake)
# * [Authentication](#authentication)
# * [ils](#ils)
# - [Stat a collection](#stat_coll)
# - [Querying for the Data Objects in a Container](#data_objects_query)
# * [Data transfer](#data_transfer)
# * [Streaming](#streaming)
# * [Admin](#admin)
# * [Rule Exec](#rule_exec)
# * [Changing Your Password](#ipasswd)
# * [Disconnect](#disconnect)
# * [Appendix: iRODS Protocol Gotchas](#gotchas)
# This tutorial assumes you have deployed iRODS in Docker using
# the script stand_it_up.py from the iRODS Testing Environment,
# which can be found on Github [here](https://github.com/irods/irods_testing_environment)
# To find the IP address associated with your Docker container, you can run this one-liner:
# ```bash
# docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ubuntu-2004-postgres-1012_irods-catalog-provider_1
# ```
#
# *However,* this notebook works just fine for any iRODS deployment. Simply change the values `HOST`, `RODS_USER`, `PASSWORD`. It is recommended to create a new rodsadmin account or use an account whose password you are comfortable changing, and to start in the home collection of that user.
# In[3]:
HOST = "172.27.0.3"
RODS_USER = "rods"
PASSWORD = "rods"
# In[4]:
PORT = 1247 ## This is the standard iRODS port
MAX_PASSWORD_LENGTH = 50 ## This constant comes
## from the internals
## of the iRODS server
API_TABLE = {
"AUTHENTICATION_APN":110000, ## The API number for the 4.3.0 auth framework
"OBJ_STAT_AN":633,
"GEN_QUERY_AN":702,
"DATA_OBJ_PUT_AN": 606,
"DATA_OBJ_OPEN_AN": 602,
"DATA_OBJ_LSEEK_AN": 674,
"DATA_OBJ_CLOSE_AN": 673,
"DATA_OBJ_READ_AN": 675,
"GENERAL_ADMIN_AN": 701,
"EXEC_MY_RULE_AN": 625,
"USER_ADMIN_AN": 714
}
## These provide indices into the catalog,
## which allows the i RODS server to directly query the SQL server
CATALOG_INDEX_TABLE = {
"COL_COLL_NAME" :"501",
"COL_D_DATA_ID" :"401",
"COL_DATA_NAME" :"403",
"COL_COLL_INHERITANCE":"506",
"COL_DATA_MODE" :"421",
"COL_DATA_SIZE" :"407",
"COL_D_MODIFY_TIME" :"420",
"COL_D_CREATE_TIME" :"419"
}
CATALOG_REVERSE_INDEX_TABLE = {
v:k for k,v in CATALOG_INDEX_TABLE.items()
}
## This is an arbitrary string hardcoded into the server; will be checked by the server
RANDOM_STRING_CLIENT_SIDE = "1gCBizHWbwIYyWLoysGzTe6SyzqFKMniZX05faZHWAwQKXf6Fs"
test_value = obf.encode(RANDOM_STRING_CLIENT_SIDE)
# First, we're going to write a small library of functions that do some
# of the dirty work.
# Feel free to skip to [here](#start_of_real_work), where we start using this library to send
# and read messages, referring to this part to figure out how
# the part you're interested in was implemented.
#
# *Notice* that the comment above `def header(...` includes the packing instruction string for `MsgHeader_PI` ("PI" stands for "Packing Instruction"). This string has a special syntax that the iRODS server uses to define these message types.
# In[5]:
## We can define these in an enum since
## header types are a closed class and are not sensitive to any3
## particular API.
class HeaderType(Enum):
RODS_CONNECT = "RODS_CONNECT"
RODS_DISCONNECT = "RODS_DISCONNECT"
RODS_API_REQ = "RODS_API_REQ"
RODS_API_REPLY = "RODS_API_REPLY"
RODS_VERSION = "RODS_VERSION"
# #define MsgHeader_PI "str type[HEADER_TYPE_LEN]; int msgLen; int errorLen; int bsLen; int intInfo;"
def header(header_type: HeaderType, msg: bytes,
error_len=0, bs_len=0, int_info=0) -> bytes:
return f"""
<MsgHeader_PI>
<type>{header_type}</type>
<msgLen>{len(msg)}</msgLen>
<errorLen>{error_len}</errorLen>
<bsLen>{bs_len}</bsLen>
<intInfo>{int_info}</intInfo>
</MsgHeader_PI>
""".replace(' ', '').replace('\n', '').encode('utf-8') ## The protocol is whitespace-insensitive,
## but I removed them here for cleanliness
## and efficiency for when this gets pushed
## through the pipe.
def indent(elem, level=0):
i = "\n" + level*" "
j = "\n" + (level-1)*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for subelem in elem:
indent(subelem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = j
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = j
return elem
# In[6]:
def send_header(header: bytes, sock: socket) -> None:
header_len = int.to_bytes(len(header), byteorder='big', length=4) ## The first part of all iRODS messages
## must be 4 bytes indicating how long
## the header is in bytes. These bytes
## and the entire integer must be transmitted
## in big-endian order
print(f"[header_len] - [{header_len}]")
print(f"[header] - [{header}]")
sock.sendall(header_len)
sock.sendall(header)
def send_msg(msg: bytes,
sock: socket,
error_buf: bytes = None,
bs_buf: bytes = None) -> None:
sock.sendall(msg)
print(f"[msg] - [{msg}]")
if error_buf:
sock.sendall(error_buf)
if bs_buf:
sock.sendall(bs_buf)
def recv(sock: socket) -> [ET, ET]:
header_len = int.from_bytes(sock.recv(4), byteorder='big')
print(f"HEADER LEN: [{header_len}]")
header = ET.fromstring(sock.recv(header_len).decode("utf-8"))
ET.indent(header)
ET.dump(header)
if header_len > 0: ## TODO: It's odd that this is included as a case because something would be really
## broken if this were true
msg_len = int(header.find("msgLen").text)
bs_len = int(header.find("bsLen").text)
error_len = int(header.find("errorLen").text)
if msg_len > 0:
msg = ET.fromstring(sock.recv(
int(header.find("msgLen").text)).decode("utf-8"))
ET.indent(msg)
ET.dump(msg)
if error_len > 0:
print("[recv] getting error stack")
print(sock.recv(error_len))
if bs_len > 0:
print("[recv] getting bs buf")
print(sock.recv(bs_len))
return header, msg
else:
if error_len > 0:
print("[recv] getting error stack")
print(sock.recv(error_len))
if bs_len > 0:
print("[recv] getting bs buf")
print(sock.recv(bs_len))
return header, None
else:
return header, None
# ## Start of the "Real Work" <a class="anchor" id="start_of_real_work"></a>
# Note that even if you are using a plugin for authentication, iRODS may still refer to the information in the StartupPack_PI during authentication. If you are experiencing bugs during that step, check your Startup Pack as well as the structures associated with your specific plugin.
# In[7]:
class IrodsProt(Enum):
NATIVE_PROT = 0
XML_PROT = 1
## Now, let's start the connection process. First, we need an easy way to create the StartupPack.low
## define StartupPack_PI "int irodsProt; int reconnFlag; int connectCnt; str proxyUser[NAME_LEN];\
## str proxyRcatZone[NAME_LEN]; str clientUser[NAME_LEN]; str clientRcatZone[NAME_LEN];\
## str relVersion[NAME_LEN]; str apiVersion[NAME_LEN]; str option[LONG_NAME_LEN];"
def startup_pack(irods_prot=IrodsProt.XML_PROT.value,
reconn_flag=0,
connect_cnt=0,
proxy_user=None,
proxy_rcat_zone=None,
client_user="rods",
client_rcat_zone="tempZone",
rel_version="4.3.0",
api_version="d", ## This MUST ALWAYS be "d." This value has been hardcoded into iRODS
## since very early days.
option=None ## This option controls, among other things,whether SSL negotiation is required.
) -> bytes:
return f"""
<StartupPack_PI>
<irodsProt>{irods_prot}</irodsProt>
<reconnFlag>{reconn_flag}</reconnFlag>
<connectCnt>{connect_cnt}</connectCnt>
<proxyUser>{proxy_user or client_user}</proxyUser>
<proxyRcatZone>{proxy_rcat_zone or client_rcat_zone}</proxyRcatZone>
<clientUser>{client_user}</clientUser>
<clientRcatZone>{client_rcat_zone}</clientRcatZone>
<relVersion>rods{rel_version}</relVersion>
<apiVersion>{api_version}</apiVersion>
<option>{option}</option>
</StartupPack_PI>
""".replace(" ", "").replace("\n", "").encode("utf-8")
# We're going to be sending raw bytes over a socket, so let's create one
# If at some point the Notebook stops working, remember
# to manually close the socket.
# In[8]:
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((HOST, PORT))
# ## Handshake <a class="anchor" id="handshake"></a>
# In[ ]:
sp = startup_pack(client_user=RODS_USER)
sp
# In[ ]:
h = header(HeaderType.RODS_CONNECT.value, sp)
h
# In[ ]:
send_header(h, conn)
send_msg(sp, conn)
# In[ ]:
## In this Version_PI, status of 0 lets us know that negotiation has been successful.
h, msg = recv(conn)
# ## Authentication <a class="anchor" id="authentication"></a>
#
# Next up, we need to authenticate using our API of choice.
# Since this is a basic cookbook for 4.3.0, we'll be using the new
# auth framework's port of native authentication.
# This API works by exchanging binary buffers between client and server.
# Since XML must be valid UTF-8, this binary data MUST be base64-encoded.
# In[ ]:
def encode_dict_as_base64_json(d: dict):
return base64.b64encode(
json.dumps(d).encode('utf-8'))
# The payload is decoded because otherwise Python will
# add extra characters to give a string representation of the bytes object
# In[ ]:
def read_base64_into_json(bsix: bytes, trunc=False) -> dict:
decoded = base64.b64decode(bsix).decode('utf-8')
return json.loads(decoded[:-1]) if trunc else json.loads(decoded)
## #define BytesBuf_PI "int buflen; char *buf(buflen);"
def bin_bytes_buf(payload: dict) -> bytes:
payload = encode_dict_as_base64_json(payload)
return f"""
<BinBytesBuf_PI>
<buflen>{len(payload)}</buflen>
<buf>{payload.decode('utf-8')}</buf>
</BinBytesBuf_PI>
""".replace(" ", "").replace("\n","").encode('utf8')
# In[ ]:
## Some API-specific parameters
auth_ctx = {
"a_ttl":"0",
"force_password_prompt":"true",
"next_operation":"auth_agent_auth_request",
"scheme":"native",
"user_name":"rods",
"zone_name":"tempZone"
}
# In[ ]:
initial_auth_msg = bin_bytes_buf(auth_ctx)
print(initial_auth_msg)
h = header(HeaderType.RODS_API_REQ.value,
initial_auth_msg,
int_info=API_TABLE["AUTHENTICATION_APN"])
send_header(h, conn)
send_msg(initial_auth_msg, conn)
# In[ ]:
h, m = recv(conn)
# If you were writing a real client library or application, you would want to check intInfo for error codes
# so you could respond appropriately. Here, we're going to move on blissfully unaware.
# In[ ]:
auth_ctx = read_base64_into_json(m.find("buf").text, trunc=True)
request_result = auth_ctx[ 'request_result'].encode('utf-8')
print(f"REQUEST RESULT: [{request_result}]")
# In[ ]:
def pad_password(pw: str) -> bytes:
return struct.pack("%ds" % MAX_PASSWORD_LENGTH, pw.encode("utf-8").strip())
## The "signature" is taken from the first 16 bytes of the challenge string
## and is used by the server to validate certain operations,
## like password changes.
signature = "".join("{:02x}".format(c) for c in request_result)
print(f"SIGNATURE: [{signature}]")
## Native auth specific operations
m = hashlib.md5()
m.update(request_result)
m.update(pad_password(PASSWORD))
digest = m.digest()
encoded_digest = base64.b64encode(digest).decode('utf-8')
auth_ctx['digest'] = encoded_digest
auth_ctx['next_operation'] = 'auth_agent_auth_response'
challenge_response = bin_bytes_buf(auth_ctx)
print(challenge_response)
# In[ ]:
h = header(HeaderType.RODS_API_REQ.value,
challenge_response,
int_info=API_TABLE["AUTHENTICATION_APN"])
send_header(h, conn)
send_msg(challenge_response, conn)
# Once again, an `intInfo` of 0 is the auth framework's way of telling us that we've successfully authenticated. Decode the buf frame base64 if you'd like to double check the state of the auth context.
# In[ ]:
h, m = recv(conn)
# # ils <a class="anchor" id="ils"></a>
# Next, let's perform an `ils`. The iCommands implementation does a little bit of verification, so we'll see how to perform object stat-ing, genQuery, and specQuery here.
# Before delving into the substance of an iRODS workflow, you might take a look at the following image, which illustrates the general flow of the protocol. Essentially, after the handshake, the client and server loop between API requests and appropriate responses in an indefinite loop until the client sends a disconnect.
# ## Stat a Collection <a class="anchor" id="stat_coll"></a>
# This step is necessary to make sure that the directory about to be ls'd actually exists.
# First, we'll have to generate a `DataObjInp_PI`. This is a generic message type used for all sorts of operations. It also contains a `KeyValPair_PI`, which is an important data structure in the iRODS protocol. Although it cannot be sent on its own, it is a very important vehicle for parameters. Internally, this `KeyValPair_PI` is a cond_input structure.
# In[ ]:
## #define DataObjInp_PI "str objPath[MAX_NAME_LEN]; int createMode; int openFlags; double offset; \
## double dataSize; int numThreads; int oprType; struct *SpecColl_PI; struct KeyValPair_PI;"
def data_obj_inp(
obj_path,
create_mode="0",
open_flags="0",
offset="0",
data_size="0",
num_threads="0",
opr_type="0",
cond_input= {}
) -> bytes:
obj_inp = ET.fromstring(f"""
<DataObjInp_PI>
<objPath>{obj_path}</objPath>
<createMode>{create_mode}</createMode>
<openFlags>{open_flags}</openFlags>
<offset>{offset}</offset>
<dataSize>{data_size}</dataSize>
<numThreads>{num_threads}</numThreads>
<oprType>{opr_type}</oprType>
</DataObjInp_PI>
""")
ET.indent(obj_inp)
obj_inp = append_kvp(obj_inp, cond_input)
ret = ET.tostring(obj_inp).decode("utf-8").replace("\n", "").replace(" ", "").encode('utf-8')
print(ret)
return ret
# Next, we'll need some utility methods. How these work might not be totally obvious, so consider reading ahead and revisiting these once you've seen how it's used in the stat API Call.
# In[ ]:
def append_kvp(et, data):
kvp = ET.Element("KeyValPair_PI")
sslen = ET.Element("ssLen")
sslen.text = str(len(data))
kvp.append(sslen)
for key in data.keys():
keyWord = ET.Element("keyWord")
keyWord.text = key
kvp.append(keyWord)
for value in data.values():
svalue = ET.Element("svalue")
svalue.text = value
kvp.append(svalue)
et.append(kvp)
return et
def append_iivp(et, data):
iivp = ET.Element("InxIvalPair_PI")
sslen = ET.Element("iiLen")
sslen.text = str(len(data))
iivp.append(sslen)
for key in data.keys():
inx = ET.Element("inx")
inx.text = key
iivp.append(inx)
for value in data.values():
ivalue = ET.Element("ivalue")
ivalue.text = value
iivp.append(ivalue)
et.append(iivp)
return et
def append_ivp(et, data):
ivp = ET.Element("InxValPair_PI")
islen = ET.Element("isLen")
islen.text = str(len(data))
ivp.append(islen)
for key in data.keys():
inx = ET.Element("inx")
inx.text = key
ivp.append(inx)
for value in data.values():
svalue = ET.Element("svalue")
svalue.text = value
ivp.append(svalue)
et.append(ivp)
return et
# In[ ]:
stat_obj_inp = data_obj_inp("/tempZone/home/rods")
h = header(HeaderType.RODS_API_REQ.value,
stat_obj_inp,
int_info=API_TABLE["OBJ_STAT_AN"])
send_header(h, conn)
send_msg(stat_obj_inp, conn)
# If everything has gone smoothely, you should receive a `RodsObjStat_PI` from the server. That `objType` is 2 tells us that the thing we stat'd was a collection. Since collections are purely virtual objects, `objSize` is 0.
# In[ ]:
h, m = recv(conn)
# # Querying for the Data Objects in a Container <a class="anchor" id="data_objects_query"></a>
#
# Now we know our target is there. Let's go ahead and read its contents. This happens through a genQuery. For details about the first-generation GenQuery API, see [here](https://github.com/irods/irods_docs/blob/main/docs/developers/library_examples.md#querying-the-catalog-using-general-queries). For information about the GenQuery2 interface (under development as of time of writing), see [here](https://www.youtube.com/watch?v=3dR_JoGA6wA&t=654s&ab_channel=TheiRODSConsortium).
# In[ ]:
## #define GenQueryInp_PI "int maxRows; int continueInx; int partialStartIndex; \
## int options; struct KeyValPair_PI; struct InxIvalPair_PI; struct InxValPair_PI;"
def gen_query(
max_rows=256,
continue_inx=0,
partial_start_index=0,
options=0,
cond_input={},
select_inp={},
sql_cond_inp={}
) -> bytes:
ret = ET.fromstring(f"""
<GenQueryInp_PI>
<maxRows>{max_rows}</maxRows>
<continueInx>{continue_inx}</continueInx>
<partialStartIndex>{partial_start_index}</partialStartIndex>
<options>{options}</options>
</GenQueryInp_PI>
""")
ret = append_kvp(ret, cond_input)
ret = append_iivp(ret, select_inp)
ret = append_ivp(ret, sql_cond_inp)
return ET.tostring(ret).decode("utf-8").replace(" ", "").replace("\n", "").encode("utf-8")
## The Catalog ships with a table of SQL functions that can perform common functions
## The first link above also has an example of a specific query.
## Note that the server will send back a GenQueryOut_PI; there is no
## message type dedicated to results from a specQuery. However, all the SqlResult_PIs
## will have `attriInx` set to 0, since knowing the query string allows the client to
## reconstruct the order of the columns.
def spec_query(
sql,
arg_1,
max_rows=256,
continue_inx=0,
row_offset=0,
options=0,
cond_input={}
) -> bytes:
ret = ET.fromstring(f"""
<specificQueryInp_PI>
<sql>{sql}</sql>
<arg1>{arg_1}</arg1>
<maxRows>{max_rows}</maxRows>
<continueInx>{continue_inx}</continueInx>
<rowOffset>{row_offset}</rowOffset>
<options>{options}</options>
</specificQueryInp_PI>
""")
ret = append_kvp(ret, cond_input)
return ET.tostring(ret)
# In[ ]:
gq = gen_query(
select_inp={
CATALOG_INDEX_TABLE["COL_COLL_NAME"] :"1",
CATALOG_INDEX_TABLE["COL_DATA_NAME"] :"1",
CATALOG_INDEX_TABLE["COL_D_DATA_ID"] :"1",
CATALOG_INDEX_TABLE["COL_DATA_MODE"] :"1",
CATALOG_INDEX_TABLE["COL_DATA_SIZE"] :"1",
CATALOG_INDEX_TABLE["COL_D_MODIFY_TIME"]:"1",
CATALOG_INDEX_TABLE["COL_D_CREATE_TIME"]:"1"
},
sql_cond_inp={
CATALOG_INDEX_TABLE["COL_COLL_NAME"] :f"= '/tempZone/home/{RODS_USER}'"
}
)
# *NB:* It might be easier to make sense of the server's response if you make sure the directory you're about to stat is populated.
# One quick thing before we send this over to the server: the iRODS dialect of XML has a few quirks related to encoding special characters. Some special characters it does not escape at all. For others, it uses a non-standard encoding. For example, iRODS XML does not distinguish between "\`" and "'" (backticks and single quotes). For these reasons, we'll need to write some functions that translate between standard XML and iRODS XML.
# In[ ]:
STANDARD_TO_IRODS_TABLE = {
b'"' :b""",
b""":b""",
b"'":b"'",
b"	":b"\t",
b"
":b"\r",
b"
":b"\n",
b"`" :b"'",
b"'" :b"'"
}
def translate_xml_to_irods_dialect(xml_bytes):
inc = 0
for prefix in STANDARD_TO_IRODS_TABLE:
xml_bytes = xml_bytes.replace(prefix, STANDARD_TO_IRODS_TABLE[prefix])
return xml_bytes
gq = translate_xml_to_irods_dialect(gq)
print(gq)
h = header(HeaderType.RODS_API_REQ.value,
gq,
int_info=API_TABLE["GEN_QUERY_AN"])
# In[ ]:
send_header(h, conn)
send_msg(gq, conn)
# The results from this GenQuery might be a little hard to grok.
# In[ ]:
h, m = recv(conn)
# To demonstrate how they amount to valid SQL results, let's translate these into a Pandas DataFrame. To see a similar example in C++ that operates above the protocol level, refer to the genQuery1 documentation linked above.
# In[ ]:
def read_gen_query_results_into_dataframe(gqr):
## Each SqlResult_PI is a column of data
## Collect them all into a list
## We can safely ignore the "reslen" attribute since the Python XML
## API already knows how large each string is, but you might use it for error checking
row_cnt = int(gqr.find("rowCnt").text)
attribute_cnt = int(gqr.find("attriCnt").text)
data = {}
for result in gqr.findall("SqlResult_PI"):
attri_inx = result.find("attriInx").text
if attri_inx == "0":
continue
# res_len = int(result.find("reslen").text)
values = result.findall("value")
col = [value.text for value in values]
data[CATALOG_REVERSE_INDEX_TABLE[attri_inx]] = col
return pd.DataFrame(data)
read_gen_query_results_into_dataframe(m)
# # Data Transfer <a class="anchor" id="data_transfer"></a>
#
# Now that we can see the contents of this collection, let's create a new data object inside of it.
# This will show cases some of the more advanced features of `condInpt`.
# In[ ]:
## Suppose we want to transfer a file containing this text.
hello_cpp = """
#include <iostream>
int main() {
std::cout << "Hello World!";
return 0;
}
"""
# In[ ]:
data_object_name = "hello.cpp"
data_size=str(len(hello_cpp.encode("utf-8")))
iput_payload = data_obj_inp(
f"/tempZone/home/{RODS_USER}/{data_object_name}",
open_flags="2",
data_size=data_size,
opr_type="1",
cond_input={
"dataType":"generic",
"dataSize":data_size,
"dataIncluded":" " ## Generally, keys with empty values in cond_input act as flags
}
)
h = header(HeaderType.RODS_API_REQ.value,
iput_payload,
int_info=API_TABLE["DATA_OBJ_PUT_AN"],
bs_len=len(hello_cpp.encode("utf-8")))
send_header(h, conn)
send_msg(iput_payload, conn, bs_buf=hello_cpp.encode("utf-8"))
# Once you've received the response from the server and verified that `intInfo` is zero, go re-run the genQuery which produced the ls you ran before. You should see new file there.
# In[ ]:
h, m = recv(conn)
# In[ ]:
h = header(HeaderType.RODS_API_REQ.value,
gq,
int_info=API_TABLE["GEN_QUERY_AN"])
gq = gen_query(
select_inp={
CATALOG_INDEX_TABLE["COL_COLL_NAME"] :"1",
CATALOG_INDEX_TABLE["COL_DATA_NAME"] :"1",
CATALOG_INDEX_TABLE["COL_D_DATA_ID"] :"1",
CATALOG_INDEX_TABLE["COL_DATA_MODE"] :"1",
CATALOG_INDEX_TABLE["COL_DATA_SIZE"] :"1",
CATALOG_INDEX_TABLE["COL_D_MODIFY_TIME"]:"1",
CATALOG_INDEX_TABLE["COL_D_CREATE_TIME"]:"1"
},
sql_cond_inp={
CATALOG_INDEX_TABLE["COL_COLL_NAME"]:"= '/tempZone/home/rods'"
}
)
gq = translate_xml_to_irods_dialect(gq)
send_header(h, conn)
send_msg(gq, conn)
h, m = recv(conn)
read_gen_query_results_into_dataframe(m)
# ## Streaming <a class="anchor" id="data_transfer"></a>
#
# Modern iRODS versions implement parallel transfer using multiple streams. This documentation won't implement parallel transfer, but will show how to use the streaming API that it is built on top of.
# In[ ]:
## We'll open this file, seek past #includes and read.
## Streamed putting works similarly, and in general
## you can think of these calls as analogous to their UNIX counterparts.
streaming_open_request = data_obj_inp(
"/tempZone/home/rods/hello.cpp",
open_flags="2",
data_size="-1" ## We're getting the data from somewhere else,
## so obviously we don't know how big it is
)
h = header(
HeaderType.RODS_API_REQ.value,
streaming_open_request,
int_info=API_TABLE["DATA_OBJ_OPEN_AN"]
)
send_header(h, conn)
send_msg(streaming_open_request, conn)
# In[ ]:
h, m = recv(conn)
# In[ ]:
print(h.find("intInfo").text)
# In[ ]:
## This time intInfo, if it is positive, will be the value of the L1 Descriptor return by the server,
## which is an opaque handle to a replica of the data object we just opened.
## Notice that it's 3, just like you'd expect opening the first file on a UNIX system.
l1_descriptor = h.find("intInfo").text
seek_len = 22
## These constants are taken from their Linux equivalents
## and work the same way
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
## #define OpenedDataObjInp_PI "int l1descInx; int len; int whence; int oprType; \
## double offset; double bytesWritten; struct KeyValPair_PI;"
def opened_data_obj_inp(l1_desc,
len_=0,
whence=SEEK_SET,
opr_type=0,
offset=0,
bytes_written=0,
cond_input={}):
ret = ET.fromstring(f"""
<OpenedDataObjInp_PI>
<l1descInx>{l1_desc}</l1descInx>
<len>{len_}</len>
<whence>{whence}</whence>
<oprType>{opr_type}</oprType>
<offset>{offset}</offset>
<bytesWritten>{bytes_written}</bytesWritten>
</OpenedDataObjInp_PI>
""")
ret = append_kvp(ret, cond_input)
return ET.tostring(ret).decode("utf-8").replace(" ", "").replace("\n", "").encode("utf-8")
# In[ ]:
seeker = opened_data_obj_inp(l1_descriptor, offset=seek_len)
print(seeker)
h = header(
HeaderType.RODS_API_REQ.value,
seeker,
int_info=API_TABLE["DATA_OBJ_LSEEK_AN"]
)
send_header(h, conn)
send_msg(seeker, conn)
# In[ ]:
h, m = recv(conn)
# In[ ]:
reader = opened_data_obj_inp(l1_descriptor, len_=8192) ## The len parameter is important --
## this tells the server how many
## bytes to stream back to the client
print(reader)
h = header(
HeaderType.RODS_API_REQ.value,
reader,
int_info=API_TABLE["DATA_OBJ_READ_AN"]
)
send_header(h, conn)
send_msg(reader, conn)
# In[ ]:
h, m = recv(conn)
# In[ ]:
closer = opened_data_obj_inp(l1_descriptor)
h = header(
HeaderType.RODS_API_REQ.value,
closer,
int_info=API_TABLE["DATA_OBJ_CLOSE_AN"]
)
# In[ ]:
send_header(h, conn)
send_msg(closer, conn)
# In[ ]:
h, m = recv(conn)
# # Admin <a class="anchor" id="admin"></a>
# Next, we're going to look at how to perform admin tasks. Recall from the section where we implemented "ils" that the iRODS server ships with prebuilt queries stored in the catalog. These are called "specific queries." The iCommand `asq` allows administrators to add new catalog queries. Let's implement `asq` straight from the protocol.
#
# You might notice that the parameters for `generalAdminInp_PI` are not very self-describing. To get a better sense of what you can do with the admin API and how to map those to arguments, see [`server/api/src/rsGeneralAdmin.cpp`](https://github.com/irods/irods/blob/main/server/api/src/rsGeneralAdmin.cpp), and specifically the function `_rsGeneralAdmin`.
# In[ ]:
dummy_spec_query = "SELECT data_name FROM r_data_main"
## #define generalAdminInp_PI "str *arg0; str *arg1; str *arg2; \
## str *arg3; str *arg4; str *arg5; str *arg6; str *arg7; str *arg8; str *arg9;"
def general_admin_inp(
arg_zero=" ",
arg_one=" ",
arg_two=" ",
arg_three=" ",
arg_four=" ",
arg_five=" ",
arg_six=" ",
arg_seven=" ",
arg_eight=" ",
arg_nine=" "
):
return f"""
<generalAdminInp_PI>
<arg0>{arg_zero}</arg0>
<arg1>{arg_one}</arg1>
<arg2>{arg_two}</arg2>
<arg3>{arg_three}</arg3>
<arg4>{arg_four}</arg4>
<arg5>{arg_five}</arg5>
<arg6>{arg_six}</arg6>
<arg7>{arg_seven}</arg7>
<arg8>{arg_eight}</arg8>
<arg9>{arg_nine}</arg9>
</generalAdminInp_PI>
""".replace("\n", "").encode("utf-8")
# In[ ]:
new_spec_query_req = general_admin_inp(
arg_zero="add",
arg_one="specificQuery",
arg_two=dummy_spec_query,
arg_three="another_dummy_spec_query"
)
h = header(
HeaderType.RODS_API_REQ.value,
new_spec_query_req,
int_info=API_TABLE["GENERAL_ADMIN_AN"]
)
# In[48]:
send_header(h, conn)
send_msg(new_spec_query_req, conn)
# In[49]:
h, m = recv(conn) ## Assuming int_info is 0, you should now be able to run your query on the command line like this:
## "iquest --no-page --sql dummy_spec_query"
# # Rule Exec <a class="anchor" id="rule_exec"></a>
# The last thing we'll look at is sending rule execution requests.
# We won't procedurally create this string to reduce complexity, but the structure of these XML structures should be clear from the context. The text of this rule is taken from [documentation](https://vlaams-supercomputing-centrum-vscdocumentation.readthedocs-hosted.com/en/latest/data/workflow_automation.html) produced by the Vlaams Supercomputing Center.
# In[59]:
rule_text = """
veryAdvancedHelloWorldRule{
writeLine("stdout","$userNameClient says '*greeting1 *greeting2'")
}
"""
## #define ExecMyRuleInp_PI "str myRule[META_STR_LEN]; struct RHostAddr_PI; \
## struct KeyValPair_PI; str outParamDesc[LONG_NAME_LEN]; struct *MsParamArray_PI;"
rule_exec_PI = ET.fromstring(f"""
<ExecMyRuleInp_PI>
<myRule>@external
veryAdvancedHelloWorldRule{{
writeLine('stdout',"$userNameClient says '*greeting1 *greeting2'")
}}
</myRule>
<RHostAddr_PI>
<hostAddr></hostAddr>
<rodsZone></rodsZone>
<port>0</port>
<dummyInt>0</dummyInt>
</RHostAddr_PI>
<KeyValPair_PI>
<ssLen>1</ssLen>
<keyWord>instance_name</keyWord>
<svalue>irods_rule_engine_plugin-irods_rule_language-instance</svalue>
</KeyValPair_PI>
<outParamDesc>ruleExecOut</outParamDesc>
<MsParamArray_PI>
<paramLen>2</paramLen>
<oprType>0</oprType>
<MsParam_PI>
<label>*greeting1</label>
<type>STR_PI</type>
<STR_PI>
<myStr> 'Hello'</myStr>
</STR_PI>
</MsParam_PI>
<MsParam_PI>
<label>*greeting2</label>
<type>STR_PI</type>
<STR_PI>
<myStr> 'World'</myStr>
</STR_PI>
</MsParam_PI>
</MsParamArray_PI>
</ExecMyRuleInp_PI>
""".encode("utf-8"))
rule_exec_PI = ET.tostring(rule_exec_PI)
rule_exec_PI = translate_xml_to_irods_dialect(rule_exec_PI)
print(rule_exec_PI)
# In[60]:
h = header(
HeaderType.RODS_API_REQ.value,
rule_exec_PI,
int_info=API_TABLE["EXEC_MY_RULE_AN"]
)
send_header(h, conn)
send_msg(rule_exec_PI, conn)
# This rule prints "Hello World!" to stdout. Notice that when you receive that message from the server, the buffer is 5464 bytes long and contains a long string of null/garbage characters after the desired string. This is a known feature of the native rule engine; when printing to stdout, it always allocates a buffer of this size and assumes that the client will look for a null-terminator to determine to where the actual content is.
# In[61]:
h, m = recv(conn)
# # Changing Your Password <a class="anchor" id="ipasswd"></a>
# In addition to the general admin capabilities, iRODS exposes certain administrative abilities to rodsusers. First, we'll create a new user. This step just involves switching parameters in `generalAdminInp_PI`, so you might want to skip if you're not interested in that. However, switching
# In[53]:
def user_admin(
arg_zero=" ",
arg_one=" ",
arg_two=" ",
arg_three=" ",
arg_four=" ",
arg_five=" ",
arg_six=" ",
arg_seven=" ",
arg_eight=" ",
arg_nine=" "
):
return f"""
<userAdminInp_PI>
<arg0>{arg_zero}</arg0>
<arg1>{arg_one}</arg1>
<arg2>{arg_two}</arg2>
<arg3>{arg_three}</arg3>
<arg4>{arg_four}</arg4>
<arg5>{arg_five}</arg5>
<arg6>{arg_six}</arg6>
<arg7>{arg_seven}</arg7>
<arg8>{arg_eight}</arg8>
<arg9>{arg_nine}</arg9>
</userAdminInp_PI>
""".replace("\n", "").replace(" ", "").encode("utf-8")
# In[54]:
obfuscated_password = obf.obfuscate_new_password("testpass",
PASSWORD,
signature)
pw_change_request = user_admin(
arg_zero="userpw",
arg_one=RODS_USER,
arg_two="password",
arg_three=obfuscated_password
)
# In[55]:
h = header(
HeaderType.RODS_API_REQ.value,
pw_change_request,
int_info=API_TABLE["USER_ADMIN_AN"]
)
send_header(h, conn)
send_msg(pw_change_request, conn)
# In[56]:
h, m = recv(conn)
# # Disconnect <a class="anchor" id="disconnect"></a>
# Finally, we'll disconnect from the iRODS server.
# In[57]:
def disconnect(sock):
sock.send(
header(HeaderType.RODS_DISCONNECT.value, "") ## Empty string so msgLen is 0
)
# In[58]:
disconnect(conn)
conn.close()
# # Appendix: iRODS Protocol Gotchas <a class="anchor" id="gotchas"></a>
# - Forgetting to close a tag can often trip up the server's parsing logic in such a way that it sends a header with `intInfo` 0, or some other indication that the request was successful. However, the next message will have an error code `-15000` indicating a formatting error. A similar behavior is sometimes
# seen if a call to `recv` (or whatever function you write that pulls bytes out of the TCP socket) is left out after an API request.
# - Although the protocol is supposed to be white-space agnostic, sometimes beginning a message with a newline character (`\n`) can cause unexpected behavior. Caution is best in this situation.
# - The protocol is order-dependent; that is, the order in which XML elements appear in the messages must be exactly identical to the order in which they appear in the corresponding packing instruction string as defined in `rodsPackInstruct.h`
|
irods/iRODS-Protocol-Cookbook
|
iRODS Protocol Cookbook.py
|
iRODS Protocol Cookbook.py
|
py
| 35,966 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26112361495
|
__authors__ = ["V. Valls"]
__license__ = "MIT"
__date__ = "14/02/2018"
import enum
import logging
from silx.gui import qt
from silx.gui.dialog.ImageFileDialog import ImageFileDialog
from silx.gui.dialog.DataFileDialog import DataFileDialog
import silx.io
logging.basicConfig()
class Mode(enum.Enum):
DEFAULT_FILEDIALOG = 0
IMAGEFILEDIALOG = 1
DATAFILEDIALOG = 2
DATAFILEDIALOG_DATASET = 3
DATAFILEDIALOG_GROUP = 4
DATAFILEDIALOG_NXENTRY = 5
class DialogExample(qt.QMainWindow):
def __init__(self, parent=None):
super(DialogExample, self).__init__(parent)
self.__state = {}
centralWidget = qt.QWidget(self)
layout = qt.QHBoxLayout()
centralWidget.setLayout(layout)
options = self.createOptions()
layout.addWidget(options)
buttonGroup = qt.QGroupBox()
buttonGroup.setTitle("Create dialog")
layout.addWidget(buttonGroup)
buttonLayout = qt.QVBoxLayout()
buttonGroup.setLayout(buttonLayout)
# ImageFileDialog
b1 = qt.QPushButton(self)
b1.setMinimumHeight(50)
b1.setText("Open a dialog")
b1.clicked.connect(self.openDialog)
buttonLayout.addWidget(b1)
b2 = qt.QPushButton(self)
b2.setMinimumHeight(50)
b2.setText("Open a dialog with state stored")
b2.clicked.connect(self.openDialogStoredState)
buttonLayout.addWidget(b2)
b3 = qt.QPushButton(self)
b3.setMinimumHeight(50)
b3.setText("Open a dialog at home")
b3.clicked.connect(self.openDialogAtHome)
buttonLayout.addWidget(b3)
b4 = qt.QPushButton(self)
b4.setMinimumHeight(50)
b4.setText("Open a dialog at computer root")
b4.clicked.connect(self.openDialogAtComputer)
buttonLayout.addWidget(b4)
self.setCentralWidget(centralWidget)
def createOptions(self):
panel = qt.QGroupBox()
panel.setTitle("Options")
layout = qt.QVBoxLayout()
panel.setLayout(layout)
group = qt.QButtonGroup(panel)
radio = qt.QRadioButton(panel)
radio.setText("Qt QFileDialog")
radio.setProperty("Mode", Mode.DEFAULT_FILEDIALOG)
group.addButton(radio)
layout.addWidget(radio)
radio = qt.QRadioButton(panel)
radio.setText("silx ImageFileDialog")
radio.setProperty("Mode", Mode.IMAGEFILEDIALOG)
group.addButton(radio)
layout.addWidget(radio)
radio = qt.QRadioButton(panel)
radio.setChecked(True)
radio.setText("silx DataFileDialog")
radio.setProperty("Mode", Mode.DATAFILEDIALOG)
group.addButton(radio)
layout.addWidget(radio)
radio = qt.QRadioButton(panel)
radio.setText("silx DataFileDialog (filter=dataset)")
radio.setProperty("Mode", Mode.DATAFILEDIALOG_DATASET)
group.addButton(radio)
layout.addWidget(radio)
radio = qt.QRadioButton(panel)
radio.setText("silx DataFileDialog (filter=group)")
radio.setProperty("Mode", Mode.DATAFILEDIALOG_GROUP)
group.addButton(radio)
layout.addWidget(radio)
radio = qt.QRadioButton(panel)
radio.setText("silx DataFileDialog (filter=NXentry)")
radio.setProperty("Mode", Mode.DATAFILEDIALOG_NXENTRY)
group.addButton(radio)
layout.addWidget(radio)
self.__options = group
return panel
def printResult(self, dialog, result):
if not result:
print("Nothing selected")
return
print("Selection:")
if isinstance(dialog, qt.QFileDialog):
print("- Files: %s" % dialog.selectedFiles())
elif isinstance(dialog, ImageFileDialog):
print("- File: %s" % dialog.selectedFile())
print("- URL: %s" % dialog.selectedUrl())
print("- Data URL: %s" % dialog.selectedDataUrl())
image = dialog.selectedImage()
print("- Image: <dtype: %s, shape: %s>" % (image.dtype, image.shape))
elif isinstance(dialog, DataFileDialog):
print("- File: %s" % dialog.selectedFile())
print("- URL: %s" % dialog.selectedUrl())
print("- Data URL: %s" % dialog.selectedDataUrl())
try:
data = dialog.selectedData()
print("- Data: <dtype: %s, shape: %s>" % (data.dtype, data.shape))
except Exception as e:
print("- Data: %s" % e)
url = dialog.selectedDataUrl()
with silx.io.open(url.file_path()) as h5:
node = h5[url.data_path()]
print("- Node: %s" % node)
else:
assert(False)
def createDialog(self):
print("")
print("-------------------------")
print("----- Create dialog -----")
print("-------------------------")
button = self.__options.checkedButton()
mode = button.property("Mode")
if mode == Mode.DEFAULT_FILEDIALOG:
dialog = qt.QFileDialog(self)
dialog.setAcceptMode(qt.QFileDialog.AcceptOpen)
elif mode == Mode.IMAGEFILEDIALOG:
dialog = ImageFileDialog(self)
elif mode == Mode.DATAFILEDIALOG:
dialog = DataFileDialog(self)
elif mode == Mode.DATAFILEDIALOG_DATASET:
dialog = DataFileDialog(self)
dialog.setFilterMode(DataFileDialog.FilterMode.ExistingDataset)
elif mode == Mode.DATAFILEDIALOG_GROUP:
dialog = DataFileDialog(self)
dialog.setFilterMode(DataFileDialog.FilterMode.ExistingGroup)
elif mode == Mode.DATAFILEDIALOG_NXENTRY:
def customFilter(obj):
if "NX_class" in obj.attrs:
return obj.attrs["NX_class"] in [b"NXentry", u"NXentry"]
return False
dialog = DataFileDialog(self)
dialog.setFilterMode(DataFileDialog.FilterMode.ExistingGroup)
dialog.setFilterCallback(customFilter)
else:
assert(False)
return dialog
def openDialog(self):
# Clear the dialog
dialog = self.createDialog()
# Execute the dialog as modal
result = dialog.exec()
self.printResult(dialog, result)
def openDialogStoredState(self):
# Clear the dialog
dialog = self.createDialog()
if dialog.__class__ in self.__state:
dialog.restoreState(self.__state[dialog.__class__])
# Execute the dialog as modal
result = dialog.exec()
self.__state[dialog.__class__] = dialog.saveState()
self.printResult(dialog, result)
def openDialogAtHome(self):
# Clear the dialog
path = qt.QDir.homePath()
dialog = self.createDialog()
dialog.setDirectory(path)
# Execute the dialog as modal
result = dialog.exec()
self.printResult(dialog, result)
def openDialogAtComputer(self):
# Clear the dialog
path = ""
dialog = self.createDialog()
dialog.setDirectory(path)
# Execute the dialog as modal
result = dialog.exec()
self.printResult(dialog, result)
def main():
app = qt.QApplication([])
example = DialogExample()
example.show()
app.exec()
if __name__ == "__main__":
main()
|
silx-kit/silx
|
examples/fileDialog.py
|
fileDialog.py
|
py
| 7,386 |
python
|
en
|
code
| 106 |
github-code
|
6
|
859547914
|
from vistrails.core.modules.module_descriptor import ModuleDescriptor
from vistrails.core.modules.vistrails_module import Module
from vistrails.core.upgradeworkflow import UpgradeModuleRemap
class Y(Module):
_output_ports = [('result', 'basic:String')]
def compute(self):
self.set_output('result', 'Y')
_modules = [Y]
_upgrades = {'Y' : [UpgradeModuleRemap(
# Upgrade for looping_fix.y 0.1 -> 0.2
# replaces module Y with module X from looping_fix.x 0.1
'0.1', '0.2', '0.2',
new_module=ModuleDescriptor(
package='org.vistrails.vistrails.tests.looping_fix.x',
name='X',
namespace='',
package_version='0.1'))]}
|
VisTrails/VisTrails
|
vistrails/tests/resources/looping_upgrades/pkg_y/init.py
|
init.py
|
py
| 727 |
python
|
en
|
code
| 100 |
github-code
|
6
|
72173805309
|
# coding: utf-8
'''
Ex Dict & Files: 1
Načti data ze souboru 'TopTen.txt':
-- Artist -- | -- Single -- | -- Weeks --
Ezra George | Green Green Grass | 14
Styles Harry | As It Was | 37
Capaldi Lewis | Forget Me | 12
^^-- EOF --^^
a vytvoř nový soubor 'TopTen_sorted.txt' s údaji
setříděné podle počtu týdnů v TopTen od nejvyššího čisla po nejnižsí ve formátu:
-- Weeks -- | -- Single -- | -- Artist --
Očekávaný výsledek 'TopTen_sorted.txt':
-- Weeks -- | -- Single -- | -- Artist --
37 | As It Was | Styles Harry
14 | Green Green Grass | Ezra George
12 | Forget Me | Capaldi Lewis
^^-- EOF --^^
'''
fr = open('../resources/TopTen.txt', 'r', encoding='utf-8')
resArr = []
resTitle = []
for line in fr:
arr = line.split(sep='|')
if line.find('-- Weeks --') == -1:
resArr.append(arr)
else:
resTitle = arr
resArr = sorted(resArr, key=lambda i: i[2], reverse=True)
resArr.insert(0, resTitle)
fw = open('../resources/TopTen_sorted.txt', 'w', encoding='utf-8')
fw.writelines(f'{x[2].rstrip()}|{x[1].rstrip()}|{x[0].rstrip()}\n' for x in resArr)
|
Alesator/python_skoleni
|
src/ex02_01.py
|
ex02_01.py
|
py
| 1,095 |
python
|
cs
|
code
| 0 |
github-code
|
6
|
9988571316
|
import websockets, json, traceback, os, asyncio, inspect, logging
import websockets.client
import websockets.server
from websockets.exceptions import ConnectionClosedOK, ConnectionClosedError
from .client_management.client import Client
from .session_management.client_state import Client_State
from .inventory_management.profile_manager import Profile_Manager
from .inventory_management.skin_manager import Skin_Manager
from .randomizers.skin_randomizer import Skin_Randomizer
from .inventory_management.buddy_manager import Buddy_Manager
from .randomizers.buddy_randomizer import Buddy_Randomizer
from .sys_utilities.system import System
from .file_utilities.filepath import Filepath
from .sys_utilities.logging import Logger
from .user_configuartion.config import Config
from .client_config import SERVER_VERSION, IS_TEST_BUILD
from . import shared
logger_errors = logging.getLogger('VIM_errors')
logger = logging.getLogger('VIM_main')
class Server:
shared.client = Client()
shared.client.connect()
request_lookups = {
"handshake": lambda: True,
"get_server_version": lambda: SERVER_VERSION,
# system stuff
"start_game": System.start_game,
"get_running_state": System.are_processes_running,
"autodetect_account": shared.client.autodetect_account,
# config stuff
"fetch_config": lambda: shared.config,
"update_config": Config.update_config,
# inventory/loadout stuff
"fetch_loadout": shared.client.fetch_loadout,
"fetch_inventory": Skin_Manager.fetch_inventory,
"fetch_profiles": Profile_Manager.fetch_profiles,
"refresh_profiles": Profile_Manager.refresh_profiles,
"refresh_skin_inventory": Skin_Manager.refresh_skin_inventory,
"refresh_buddy_inventory": Buddy_Manager.refresh_buddy_inventory,
"randomize_skins": Skin_Randomizer.randomize,
"randomize_buddies": Buddy_Randomizer.randomize,
"put_weapon": shared.client.put_weapon,
"put_buddies": shared.client.put_buddies,
#"update_skin_inventory": Skin_Manager.update_inventory,
"update_buddy_inventory": Buddy_Manager.update_inventory,
# profile stuff
"create_profile": Profile_Manager.generate_empty_profile,
"fetch_profile_metadatas": Profile_Manager.fetch_profile_metadata,
"update_profiles": Profile_Manager.update_profiles,
"update_profile": Profile_Manager.update_profile,
"fetch_profile": Profile_Manager.fetch_profile,
"apply_profile": Profile_Manager.apply_profile,
"favorite_all_buddies": Buddy_Manager.favorite_all,
# game state stuff
"force_update_game_state": Client_State.update_game_state,
}
@staticmethod
def start():
if not os.path.exists(Filepath.get_appdata_folder()):
os.mkdir(Filepath.get_appdata_folder())
Logger.create_logger()
shared.loop = asyncio.get_event_loop()
Config.init_config()
# iniitalize any submodules
client_state = Client_State()
#start websocket server
start_server = websockets.serve(Server.ws_entrypoint, "", 8765)
print(f"open {'https://colinhartigan.github.io/valorant-inventory-manager' if not IS_TEST_BUILD else 'https://colinhartigan.github.io/VIM-test-client'} in your browser to use VIM")
shared.loop.run_until_complete(start_server)
# initialize any asynchronous submodules
shared.loop.run_until_complete(client_state.loop())
shared.loop.run_forever()
@staticmethod
async def ws_entrypoint(websocket, path):
logger.debug("a client connected")
logger.debug(shared.sockets)
shared.sockets.append(websocket)
try:
while websocket in shared.sockets:
data = await websocket.recv()
data = json.loads(data)
request = data.get("request")
args = data.get("args")
has_kwargs = True if args is not None else False
logger.debug(f"request: {request}")
payload = {}
if request in Server.request_lookups.keys():
payload = {
"success": True,
"event": request,
"data": None,
}
if inspect.iscoroutinefunction(Server.request_lookups[request]):
if has_kwargs:
payload["data"] = await Server.request_lookups[request](**args)
else:
payload["data"] = await Server.request_lookups[request]()
else:
if has_kwargs:
payload["data"] = Server.request_lookups[request](**args)
else:
payload["data"] = Server.request_lookups[request]()
else:
payload = {
"success": False,
"data": "could not find the specified request"
}
await websocket.send(json.dumps(payload))
logger.debug(f"response:\n{json.dumps(payload)} ")
except ConnectionClosedOK:
logger.info("disconnected")
shared.sockets.pop(shared.sockets.index(websocket))
except ConnectionClosedError:
logger.info("disconnected w/ error")
shared.sockets.pop(shared.sockets.index(websocket))
except Exception:
logger_errors.error("----- EXCEPTION -----")
logger_errors.error(traceback.format_exc())
except:
logger.error("idk what even happened to get here")
|
colinhartigan/valorant-inventory-manager
|
server/src/server.py
|
server.py
|
py
| 5,848 |
python
|
en
|
code
| 150 |
github-code
|
6
|
14408997276
|
from announcement.models import AnnouncementModel
from UslugiProfi.utils import create_file_absolute_url
from rest_framework import serializers
class GetAnnouncementsSeriaizer(serializers.ModelSerializer):
image = serializers.SerializerMethodField()
class Meta:
model = AnnouncementModel
fields = ('id', 'name', 'description', 'subcategory', 'user', 'address', 'address_lat', 'address_lng', 'create_date',
'update_time', 'price_type', 'fixed_price', 'upper_price', 'lower_price', 'currency', 'dimension', 'image', 'is_active')
def get_image(self, announcement):
request = self.context.get('request')
return create_file_absolute_url(request=request, file=announcement.image)
class CreateAnnouncementsSeriaizer(serializers.ModelSerializer):
class Meta:
model = AnnouncementModel
fields = ('name', 'description', 'subcategory', 'address', 'address_lat', 'address_lng', 'price_type',
'fixed_price', 'upper_price', 'lower_price', 'currency', 'dimension', 'image', 'user')
|
Johudo-old/UslugiProfi
|
announcement/serializers.py
|
serializers.py
|
py
| 1,075 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29732186780
|
import numpy
import random
def eval_proportion_diff(model1_pos, model2_pos, model1_samples, model2_samples, num_trials=10000, verbose=False):
'''compare proportion of positively predicted samples between two models'''
model1_values = numpy.zeros((model1_samples))
model1_values[:model1_pos] = 1
model2_values = numpy.zeros((model2_samples))
model2_values[:model2_pos] = 1
p_val = eval_diff(model1_values, model2_values, num_trials=num_trials, verbose=verbose)
return p_val
def eval_diff(model1_values, model2_values, num_trials=10000, verbose=False):
'''return p-value of difference between model1_values and model2_values'''
#import pdb; pdb.set_trace()
model1_values = model1_values.flatten()
model2_values = model2_values.flatten()
# filter any NaN values
model1_values = model1_values[~numpy.isnan(model1_values)]
model2_values = model2_values[~numpy.isnan(model2_values)]
model_difference = numpy.mean(model2_values) - numpy.mean(model1_values)
counter = 0
for trial in xrange(num_trials):
# reshuffle all predictions between these 2 models
values = numpy.concatenate((model1_values, model2_values))
numpy.random.shuffle(values)
model1_sample = values[:len(model1_values)]
model2_sample = values[-len(model2_values):]
sample_difference = numpy.mean(model2_sample) - numpy.mean(model1_sample)
# if one_tailed and sample_difference >= model_difference:
# counter += 1
if numpy.abs(sample_difference) >= numpy.abs(model_difference): # two tailed test
counter += 1
if verbose and trial % 500 == 0:
print("completed", trial, "trials...")
p_val = float(counter + 1) / (num_trials + 1)
return p_val
def eval_all_diffs(model_values, num_trials=10000, verbose=False):
'''takes a set of model values as input, figures out which pairs of models to evaluate
differences for based on the order of their values, and returns p-values for all tests'''
sorted_models = sorted([(numpy.mean(values), model) for model, values in model_values.items()])
model_pairs = [(sorted_models[idx][1], sorted_models[idx + 1][1]) for idx in range(len(sorted_models) - 1)]
p_vals = {}
for model1, model2 in model_pairs:
p_vals[(model1, model2)] = eval_diff(model_values[model1], model_values[model2], num_trials=num_trials, verbose=verbose)
return p_vals
def eval_all_proportion_diffs(model_pos, model_samples, num_trials=10000, verbose=False):
'''takes a set of model proportion values as input, figures out which pairs of models to evaluate
differences for based on the order of their values, and returns p-values for all tests'''
prop_values = {model: model_pos[model] * 1. / model_samples[model] for model in model_pos}
sorted_models = sorted([(numpy.mean(values), model) for model, values in prop_values.items()])
model_pairs = [(sorted_models[idx][1], sorted_models[idx + 1][1]) for idx in range(len(sorted_models) - 1)]
p_vals = {}
for model1, model2 in model_pairs:
p_vals[(model1, model2)] = eval_proportion_diff(model_pos[model1], model_pos[model2],
model_samples[model1], model_samples[model2],
num_trials=num_trials, verbose=verbose)
return p_vals
|
roemmele/narrative-prediction
|
analysis/stats.py
|
stats.py
|
py
| 3,425 |
python
|
en
|
code
| 24 |
github-code
|
6
|
27944232660
|
"""
This modules defines the base class for all machine learning models to analyse
reusability rate.
Last updated: MB 29/08/2020 - created module.
"""
# import external libraries.
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import scipy.stats as stats
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
from skopt import BayesSearchCV
# import local modules.
from utils import data_loader
"""
Base class that all models inherit from.
"""
class BaseModel:
"""
store dataset. data is a dictionary.
"""
def __init__(self, data, normalize=False, **kwargs):
print(">> initialising model...")
# if we are normalizing data, save the normalized x value.
if normalize is True:
self.normalization_params = data_loader.get_normalization_params(data['train_x'])
self.train_x = self.normalize_x(data['train_x'])
self.test_x = self.normalize_x(data['test_x'])
# if we are not normalizing data, use regular x values.
else:
self.train_x = data['train_x']
self.test_x = data['test_x']
# save the y values and other attributes.
self.train_y = data['train_y']
self.test_y = data['test_y']
self.test_predictions = pd.Series() # placeholder for 'test' function.
self.train_predictions = pd.Series() # placeholder for 'test' function.
self.is_trained = False
def hyperparameter_tuning(self, type, param_space):
# definees the type and number of cross validation splits - refer to: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RepeatedStratifiedKFold.html
# Repeated Stratified K Fold -> This repeats a stratified k fold n number of times
# Stratified k fold -> Shuffles the data once before splitting into n different parts,
# where each part is used as a test set
cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=1)
if type == 'Grid':
# Set all the variables for the grid search cross validation
search = GridSearchCV(estimator=self.model, param_grid=param_space, cv=cv, scoring='accuracy')
elif type == 'Bayesian':
# defines the bayes search cv with parameters - refer to: https://scikit-optimize.github.io/stable/modules/generated/skopt.BayesSearchCV.html
# Bayesian optimisation is a type of sequential method in which it learns from each step what the optimal hyper-parameters are
# (in contrast to grid or random search) - using some complicated maths model (im not sure about)
search = BayesSearchCV(estimator=self.model, param_grid=param_space, n_jobs=-1, cv=cv)
# perform the search - i.e. it fits the model on the training data set for the different hyper-parameter settings
search_result = search.fit(self.train_x, self.train_y)
# Prints the results - optimal hyper-parameters and the accuracy score
print("The best parameters are %s with a score of %0.2f"
% (search_result.best_params_, search_result.best_score_))
# Displays all of the hyper-parameters combination in descending order of accuracy score
grid_results = pd.concat([pd.DataFrame(search_result.cv_results_["params"]),pd.DataFrame(search_result.cv_results_["mean_test_score"], columns=["Accuracy"])],axis=1)
grid_results.sort_values(by=['Accuracy'], inplace=True, ascending=False)
print(grid_results.head)
"""
train the model with current train and test XY values saved as attributes.
"""
def train(self):
print(">> training model...")
"""
output a description of the model.
"""
def describe(self):
print(">> describing model...")
# throw an error if model is not trained yet.
if self.is_trained is False:
raise Exception('Train model before describing coefficients.')
return
"""
generate prdictions for the test_x data.
"""
def test(self):
print(">> predicting test data...")
# throw an error if model is not trained yet.
if self.is_trained is False:
raise Exception('Train model before describing coefficients.')
return
"""
analyse the performance of the predictions.
"""
def assess_performance(self):
# if there is no 'test_predictions' data generated, throw error.
if self.test_predictions is None:
raise Exception('Run the `test` function to predict test data.')
print(">> assessing prediction performance...")
"""
Convert a pandas dataframe of values into normalized values based on the
normalized params attribute. x_values is a pandas dataframe.
"""
def normalize_x(self, x_values):
# throw an error if this model was not setup to use normalized values.
if not self.normalization_params:
raise Exception("This model was not setup to use normalized values.")
return
# copy the dataframe.
normalized_values = pd.DataFrame()
# iterate over each column and normalize.
for column in x_values.columns:
# retrieve normalization parameters.
mean = self.normalization_params[column]['mean']
std = self.normalization_params[column]['std']
# if std is zero, set to 1 to prevent NaNs.
if std == 0: std = 1
# save the normalized column.
normalized_values[column] = (x_values[column] - mean) / std
# return the normalized dataframe.
return normalized_values
|
reusability/research
|
model/base_model.py
|
base_model.py
|
py
| 5,759 |
python
|
en
|
code
| 1 |
github-code
|
6
|
38043415332
|
def revisescore(wrongscores):
rightscores = [] # 建立一個空列表,用於儲存修正後的成績
for i in wrongscores:
# 將十位數和個位數互換,然後添加到正確成績列表中
units = i // 10
tens = i % 10
revisescore = tens * 10 + units
rightscores.append(revisescore)
return rightscores
# 輸入錯誤的成績列表
wrongscores = [35, 46, 57, 91, 29]
# 調用函數來修正成績
revisescores = revisescore(wrongscores)
# 列印修正後的成績
print(revisescores)
|
7553yn/Cathybank
|
scores.py
|
scores.py
|
py
| 571 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34632215573
|
import cv2
import numpy as np
img = cv2.imread('img\\ttt.jpg')
#定义结构元素
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
#腐蚀图像
eroded = cv2.erode(img, kernel)
cv2.imshow("fs_eroded", eroded)
#膨胀图像
dilated = cv2.dilate(img, kernel)
cv2.imshow("pz_dilated", dilated)
#NumPy定义的结构元素
NpKernel = np.uint8(np.ones((3,3)))
Nperoded = cv2.erode(img, NpKernel)
#显示腐蚀后的图像
cv2.imshow("Eroded by NumPy kernel", Nperoded)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
liuyuhua-ha/opencvStudy
|
opencvStudy/structTest.py
|
structTest.py
|
py
| 527 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71432257788
|
from jaqsmds.server.repliers.utils import QueryInterpreter as Qi
class SymbolQI(Qi):
def __init__(self, view, *args, **kwargs):
super(SymbolQI, self).__init__(view, *args, primary="symbol", **kwargs)
InstrumentInfo = Qi("jz.instrumentInfo", trans={"inst_type": int, "list_date": int, "status": int}, sort="symbol")
SecDividend = Qi(
"lb.secDividend",
defaults=['ann_date', 'bonus_list_date', 'cash', 'cash_tax', 'cashpay_date', 'div_enddate',
'exdiv_date', 'publish_date', 'record_date', 'share_ratio', 'share_trans_ratio', 'symbol'],
**{"date": "ann_date"}
)
SecAdjFactor = SymbolQI("lb.secAdjFactor", defaults=['adjust_factor', 'symbol', 'trade_date'], **{"date": "trade_date"})
# SecSusp = Qi("lb.secSusp",
# defaults=['ann_date', 'resu_date', 'susp_date', 'susp_reason', 'symbol'],
# **{"date": "date"})
# IndexCons = Qi("lb.indexCons", primary='index_code', defaults=['in_date', 'index_code', 'out_date', 'symbol'], sort=["index_code", "in_date"],
# **{"date": "date"})
SecDailyIndicator = Qi("lb.secDailyIndicator",
defaults=['symbol', 'trade_date'],
primary="symbol",
**{"date": "trade_date"})
BalanceSheet = SymbolQI(
"lb.balanceSheet",
defaults=['acct_rcv', 'ann_date', 'inventories', 'notes_rcv',
'report_date', 'report_type', 'symbol', 'tot_cur_assets'],
sort="symbol",
**{"date": "ann_date", "actdate": "act_ann_date", "reportdate": "report_date"}
)
Income = SymbolQI(
"lb.income",
defaults=['ann_date', 'int_income', 'tot_oper_cost', 'net_int_income', 'oper_exp', 'oper_profit',
'oper_rev', 'report_date', 'symbol', 'less_handling_chrg_comm_exp', 'tot_profit', 'total_oper_rev'],
sort="symbol",
**{"date": "ann_date", "actdate": "act_ann_date", "reportdate": "report_date"}
)
CashFlow = SymbolQI(
"lb.cashFlow",
defaults=['ann_date', 'cash_recp_prem_orig_inco', 'cash_recp_return_invest', 'cash_recp_sg_and_rs',
'incl_dvd_profit_paid_sc_ms', 'net_cash_flows_inv_act', 'net_cash_received_reinsu_bus',
'net_incr_dep_cob', 'net_incr_disp_tfa', 'net_incr_fund_borr_ofi', 'net_incr_insured_dep',
'net_incr_int_handling_chrg', 'net_incr_loans_central_bank', 'other_cash_recp_ral_fnc_act',
'other_cash_recp_ral_oper_act', 'recp_tax_rends', 'report_date', 'report_type',
'stot_cash_inflows_oper_act', 'stot_cash_outflows_oper_act', 'symbol'],
sort="symbol",
**{"date": "ann_date", "actdate": "act_ann_date", "reportdate": "report_date"}
)
ProfitExpress = SymbolQI(
"lb.profitExpress",
defaults=['ann_date', 'net_profit_int_inc', 'oper_profit', 'oper_rev',
'report_date', 'symbol', 'total_assets', 'total_profit'],
sort="symbol",
**{"anndate": "ann_date", "reportdate": "report_date"}
)
SecRestricted = SymbolQI("lb.secRestricted", defaults=['lifted_shares', 'list_date', 'symbol'], **{"date": "list_date"})
IndexWeightRange = Qi("lb.indexWeightRange",
defaults=['index_code', 'symbol', 'trade_date', 'weight'],
primary="index_code", sort=["index_code", "trade_date"],
**{"date": "trade_date"})
FinIndicator = SymbolQI("lb.finIndicator",
defaults=['ann_date', 'bps', 'report_date', 'roa', 'roe', 'symbol'],
sort="symbol",
**{"date": "ann_date"})
ApiList = Qi("help.apiList", sort="api")
ApiParam = Qi("help.apiParam", sort="api")
WindFinance = SymbolQI("lb.windFinance", **{"date": "index"})
SecTradeCal = Qi("jz.secTradeCal", defaults=['istradeday', 'trade_date'],
trans={"start_date": int, "end_date": int}, sort="trade_date",
date="trade_date")
SState = SymbolQI("lb.sState", trans={"start_date": int, "end_date": int}, sort="effDate", date="effDate")
class SecIndustryInterpreter(Qi):
def catch(self, dct):
i_s = dct.pop("industry_src", None)
if i_s:
yield "industry_src", i_s.lower()
yield from super(SecIndustryInterpreter, self).catch(dct)
SecIndustry = Qi(
"lb.secIndustry",
defaults=['in_date', 'industry1_code', 'industry1_name', 'industry2_code',
'industry2_name', 'industry3_code', 'industry3_name', 'industry4_code',
'industry4_name', 'industry_src', 'out_date', 'symbol'],
sort=None,
trans={"industry_src": lambda s: s.lower()}
)
class IndexConsInterpreter(Qi):
def catch(self, dct):
start = dct.pop("start_date", None)
if start:
yield "out_date", (start, None)
end = dct.pop("end_date", None)
if end:
yield "in_date", (None, end)
IndexCons = IndexConsInterpreter(
"lb.indexCons", primary='index_code',
defaults=['in_date', 'index_code', 'out_date', 'symbol'],
sort=["index_code", "in_date"]
)
class SecSuspInterpreter(Qi):
def catch(self, dct):
start = dct.pop("start_date", None)
if start:
yield "resu_date", (start, None)
end = dct.pop("end_date", None)
if end:
yield "susp_date", (None, end)
SecSusp = SecSuspInterpreter(
"lb.secSusp",
defaults=['ann_date', 'resu_date', 'susp_date', 'susp_reason', 'symbol'],
sort=["symbol", "susp_date"]
)
DailyIndicator = Qi("lb.secDailyIndicator", date="trade_date", defaults=["symbol", "trade_date"])
DailyFactor = Qi("factor", date="trade_date", default=["symbol", "trade_date"])
FxdayuFactor = Qi("fxdayu.factor", date="trade_date")
ViewFields = Qi("jz.viewFields")
UpdateStatus = Qi('updateStatus', date="trade_date")
API_JSETS = [
ApiList, ApiParam, InstrumentInfo, SecTradeCal, BalanceSheet, CashFlow, FinIndicator, Income,
IndexCons, IndexWeightRange, ProfitExpress, SState, SecDividend, SecIndustry, SecRestricted, SecSusp,
WindFinance, DailyFactor, FxdayuFactor, SecDailyIndicator, SecAdjFactor, UpdateStatus
]
|
cheatm/jaqsmds
|
jaqsmds/server/repliers/jsets.py
|
jsets.py
|
py
| 6,039 |
python
|
en
|
code
| 4 |
github-code
|
6
|
19757717889
|
# unit.test_shop.test_shopRepo.py
from unittest.mock import Mock
import tinydb as tdb
from fixtures.shop import ShopFixture, TEMP_SHOPS_TINYDB_TEST_PATH, \
PRODUCTS_URLS_9_VALID_TEST_PATH, PRODUCTS_URLS_TEST_DIR
from shop.shop import Shop
from shop.shopDao import TinyShopDao
from shop.shopRepo import ShopRepo
from unit.testhelper import WebtomatorTestCase, ProductsUrlsRepoMock
class ShopRepoTest(WebtomatorTestCase):
testDBPath = TEMP_SHOPS_TINYDB_TEST_PATH
tempProductsUrlsRepoPath = PRODUCTS_URLS_TEST_DIR / "ProductsUrls_deleteMe.txt"
def setUp(self) -> None:
# Creates new DB at given path if not exists.
# Deletes all records in all tables if DB exists.
dbRef = tdb.TinyDB(str(self.testDBPath))
dbRef.purge_tables()
dbRef.close()
def tearDown(self) -> None:
if self.tempProductsUrlsRepoPath.is_file():
self.tempProductsUrlsRepoPath.unlink()
def test_ifVitalAttributesArePresent(self):
# Given
sut = ShopRepo
# Then
# Check presence of vital public properties/methods
self.assertHasAttribute(sut, 'getAll')
self.assertHasAttribute(sut, 'setAll')
self.assertHasAttribute(sut, 'update')
def test_init_shouldSetDefaultValues(self):
# When
daoMock = Mock()
daoMock.myValue = "DAO Mock checkValue"
sut = ShopRepo(dao=daoMock)
# Then
self.assertEqual("DAO Mock checkValue", sut._dao.myValue)
def test_getAll(self):
# Given
testTinyShopDao = TinyShopDao(path=self.testDBPath)
# Create 2 shops in TinyDB for testing.
# Note that we use client code to create them, which is more of an integration test...
fixture = ShopFixture()
fixture.create2Shops()
expectedShops = fixture.shops
ShopRepo(dao=testTinyShopDao).setAll(shops=expectedShops)
sut = ShopRepo(dao=testTinyShopDao)
# When
loadedShops = sut.getAll()
# Then
# Expect that loaded shops match the expected
self.assertEqual(expectedShops, loadedShops)
def test_setAll(self):
# Given
# Insert a document into a fresh 'Shops' table. This data is expected to be
# completely overridden by the test.
existingData = dict(OneTestOne="Test data val 1", TwoTestTwo="Test data val 2")
with tdb.TinyDB(self.testDBPath) as db:
shopTable: tdb.database.Table = db.table(TinyShopDao._TABLE_NAME)
shopTable.insert(existingData)
# These data are expected:
fixture = ShopFixture()
fixture.create2Shops()
expectedShops = fixture.shops
# Setup repo
testTinyShopDao = TinyShopDao(path=self.testDBPath)
sut = ShopRepo(dao=testTinyShopDao)
# When
sut.setAll(shops=expectedShops)
# Then
with tdb.TinyDB(self.testDBPath) as db:
shopTable: tdb.database.Table = db.table(TinyShopDao._TABLE_NAME)
recordList: list = shopTable.all()
# Expect that previous data do not exist anymore
self.assertLessEqual(0, len(recordList))
self.assertIsNone(recordList[0].get("OneTestOne"))
self.assertIsNone(recordList[0].get("TwoTestTwo"))
# Note that we use client code to load the shops again, which is
# more of an integration test...
loadedShops = sut.getAll()
# Expect that loaded shops match the expected ones
self.assertEqual(expectedShops, loadedShops)
def test_update(self):
# Given
# Create 2 shops in TinyDB for testing.
fixture = ShopFixture()
fixture.create2Shops()
expectedShop = fixture.shops[0]
assert expectedShop.uid is not None and expectedShop.uid != ""
# Write a shop which we can try to update by UID.
existingData = dict(uid=expectedShop.uid, name="I don't know this shop's name")
with tdb.TinyDB(self.testDBPath) as db:
shopTable: tdb.database.Table = db.table(TinyShopDao._TABLE_NAME)
shopTable.insert(existingData)
# Setup repo
testTinyShopDao = TinyShopDao(path=self.testDBPath)
sut = ShopRepo(dao=testTinyShopDao)
# When
sut.update(shop=expectedShop)
# Then
with tdb.TinyDB(self.testDBPath) as db:
shopTable: tdb.database.Table = db.table(TinyShopDao._TABLE_NAME)
recordList: list = shopTable.all()
self.assertEqual(1, len(recordList))
# Expect that data with previous uid still exist
self.assertEqual(expectedShop.uid, recordList[0].get("uid"))
# Expect that shop's name has been updated
self.assertNotEqual("I don't know this shop's name", recordList[0].get("name"))
# Note that we use client code to load the shop again, which is
# more of an integration test...
updatedShops = sut.getAll()
self.assertIsInstance(updatedShops, list)
self.assertEqual(1, len(recordList))
# Expect that updated shop matches the expected one
self.assertEqual(expectedShop, updatedShops[0])
def test_findByUID(self):
# Given
# Create test data to search for.
uidToFind = "b0e2e467-6fd5-4a06-bb1e-9ad60223cafa"
shopData1 = dict(uid="ca0f5926-7d55-4973-a8e1-d3e2cc89fca6",
name="The name of the first test shop")
shopData2 = dict(uid=uidToFind,
name="The name of the second test shop")
expectedShop = Shop(**shopData2)
with tdb.TinyDB(self.testDBPath) as db:
shopTable: tdb.database.Table = db.table(TinyShopDao._TABLE_NAME)
shopTable.insert(shopData1)
shopTable.insert(shopData2)
# Setup repo
testTinyShopDao = TinyShopDao(path=self.testDBPath)
sut = ShopRepo(dao=testTinyShopDao)
# When
foundShop = sut.findByUID(uidToFind)
# Then
self.assertIsInstance(foundShop, Shop)
self.assertEqual(foundShop.uid, uidToFind)
self.assertEqual(expectedShop, foundShop)
def test_findByName(self):
# Given
# Create test data to search for. We use two shops with the same name here.
shopData1 = dict(uid="ca0f5926-7d55-4973-a8e1-d3e2cc89fca6",
name="Shop with same name")
shopData2 = dict(uid="e68782fd-19af-428e-881f-99d7af9b83b0",
name="This shop should not be found")
shopData3 = dict(uid="b0e2e467-6fd5-4a06-bb1e-9ad60223cafa",
name="Shop with same name")
expectedShops = [Shop(**shopData1), Shop(**shopData3)]
with tdb.TinyDB(self.testDBPath) as db:
shopTable: tdb.database.Table = db.table(TinyShopDao._TABLE_NAME)
shopTable.insert(shopData1)
shopTable.insert(shopData2)
shopTable.insert(shopData3)
# Setup repo
testTinyShopDao = TinyShopDao(path=self.testDBPath)
sut = ShopRepo(dao=testTinyShopDao)
# When
foundShops = sut.findByName("Shop with same name")
# Then
self.assertIsInstance(foundShops, list)
self.assertEqual(2, len(foundShops))
self.assertEqual(expectedShops, foundShops)
def test_updateFromProductsUrls(self):
# Given
# Copy fixture to new arbitrary file as we will modify its contents within this test.
with open(str(PRODUCTS_URLS_9_VALID_TEST_PATH), "r", encoding="utf-8") as source:
content = source.read()
with open(str(self.tempProductsUrlsRepoPath), "w+", encoding="utf-8") as target:
target.write(content)
# Note that the table gets deleted by the unit test's setup() method - so we
# start with a fresh empty table.
testTinyShopDao = TinyShopDao(path=self.testDBPath)
sut = ShopRepo(dao=testTinyShopDao)
productsUrlsRepo = ProductsUrlsRepoMock(productsUrlsRepoPath=self.tempProductsUrlsRepoPath)
expectedProducts = productsUrlsRepo.getAll()
expectedProductUrls = [p.url for p in expectedProducts]
# 1. Test initial update -----------------------------------------------------------
# When
# This is expected to fill the table with all the fixture data of ProductsUrls repo.
sut.updateFromProductsUrls(productsUrlsRepo=productsUrlsRepo)
# Then
shops = sut.getAll()
self.assertIsInstance(shops, list)
self.assertEqual(3, len(shops))
# Expect that all shops have been inserted
shopsUrls = [s.url for s in shops]
self.assertIn("https://www.solebox.com", shopsUrls)
self.assertIn("http://real.fantastic.de", shopsUrls)
self.assertIn("https://www.dbyte.org", shopsUrls)
# Expect that all products have been inserted
soleboxShop = list(filter(lambda s: s.url == "https://www.solebox.com", shops))[0]
self.assertIsInstance(soleboxShop.products, list)
self.assertEqual(1, len(soleboxShop.products))
for product in soleboxShop.products:
self.assertIn(product.url, expectedProductUrls)
realFantasticShop = list(filter(lambda s: s.url == "http://real.fantastic.de", shops))[0]
self.assertIsInstance(realFantasticShop.products, list)
self.assertEqual(2, len(realFantasticShop.products))
for product in realFantasticShop.products:
self.assertIn(product.url, expectedProductUrls)
dbyteShop = list(filter(lambda s: s.url == "https://www.dbyte.org", shops))[0]
self.assertIsInstance(dbyteShop.products, list)
self.assertEqual(6, len(dbyteShop.products))
for product in dbyteShop.products:
self.assertIn(product.url, expectedProductUrls)
# 2. Test delete product/shop -----------------------------------------------------
# Given
# Remove all http://real.fantastic.de/... URLs from ProductsUrls repo.
with open(str(self.tempProductsUrlsRepoPath), "r+", encoding="utf-8") as target:
lines = target.readlines()
for line in reversed(lines):
if line.startswith("http://real.fantastic.de/shop/great-realdumbtrump.htm"):
lines.remove(line)
if line.startswith("http://real.fantastic.de/shop/buy-new-holo?prodid=682357ac"):
lines.remove(line)
# Overwrite file with the updated data
target.seek(0)
target.writelines(lines)
# When
# This is expected to remove shop http://real.fantastic.de entirely from database,
# because it's products do not exist anymore in ProductsUrls repo.
sut.updateFromProductsUrls(productsUrlsRepo=productsUrlsRepo)
# Then
shops = sut.getAll()
self.assertIsInstance(shops, list)
self.assertEqual(2, len(shops))
# Expect that shop http://real.fantastic.de has been entirely removed from database
realFantasticShop = list(filter(lambda s: s.url == "http://real.fantastic.de", shops))
self.assertIsInstance(realFantasticShop, list)
self.assertEqual(0, len(realFantasticShop))
# 3. Test add product to existing shop ----------------------------------------------
# Given
with open(str(self.tempProductsUrlsRepoPath), "r+", encoding="utf-8") as target:
lines = target.readlines()
lines.append("\nhttps://www.solebox.com/some-new-product\n")
# Overwrite file with the updated data
target.seek(0)
target.writelines(lines)
expectedProducts = productsUrlsRepo.getAll()
expectedProductUrls = [p.url for p in expectedProducts]
# When
# This is expected to update shop https://www.solebox.com with the above added
# product https://www.solebox.com/some-new-product
sut.updateFromProductsUrls(productsUrlsRepo=productsUrlsRepo)
# Then
shops = sut.getAll()
self.assertIsInstance(shops, list)
self.assertEqual(2, len(shops))
# Expect that product https://www.solebox.com/some-new-product has been added to
# existing shop with URL https://www.solebox.com
soleboxShop = list(filter(lambda s: s.url == "https://www.solebox.com", shops))[0]
self.assertIsInstance(soleboxShop.products, list)
self.assertEqual(2, len(soleboxShop.products))
for product in soleboxShop.products:
self.assertIn(product.url, expectedProductUrls)
# 4. Test add shop to existing shops -------------------------------------------------
# Given
with open(str(self.tempProductsUrlsRepoPath), "r+", encoding="utf-8") as target:
lines = target.readlines()
lines.append("\nhttps://new-shop-1833663.com/new-product.htm\n")
# Overwrite file with the updated data
target.seek(0)
target.writelines(lines)
expectedProducts = productsUrlsRepo.getAll()
expectedProductUrls = [p.url for p in expectedProducts]
# When
# This is expected to update the shop table (which already has shops in it) with
# the above added product which has a base url which currently not exists
# in the shops table. So a new shop with this product must be created in shopRepo.
sut.updateFromProductsUrls(productsUrlsRepo=productsUrlsRepo)
# Then
shops = sut.getAll()
self.assertIsInstance(shops, list)
self.assertEqual(3, len(shops))
# Expect that shop https://new-shop-1833663.com has been added to
# existing database.
newShop = list(filter(lambda s: s.url == "https://new-shop-1833663.com", shops))[0]
self.assertIsInstance(newShop.products, list)
self.assertEqual(1, len(newShop.products))
for product in newShop.products:
self.assertIn(product.url, expectedProductUrls)
|
dbyte/WebtomatorPublicEdition
|
tests/unit/test_shop/test_shopRepo.py
|
test_shopRepo.py
|
py
| 14,072 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17690019803
|
#!/usr/bin/env python3
""" Module for view definition """
from flask import Flask, render_template, request
from flask_babel import Babel, _
from typing import Optional
class Config(object):
""" Config class """
# ...
LANGUAGES = ['en', 'fr']
BABEL_DEFAULT_LOCALE = 'en'
BABEL_DEFAULT_TIMEZONE = 'UTC'
app = Flask(__name__)
babel = Babel(app)
app.config.from_object(Config)
# def create_app(config_class=Config):
# app = Flask(__name__)
# babel.init_app(app)
# app.config.from_object(config_class)
# return app
@babel.localeselector
def get_locale() -> Optional[str]:
""" Get preferred local function """
if request.args.get('locale'):
locale = request.args.get('locale')
# print(locale)
if locale in app.config['LANGUAGES']:
print(locale)
return locale
else:
return request.accept_languages.best_match(app.config['LANGUAGES'])
@app.route('/', methods=['GET'], strict_slashes=False)
def index() -> str:
""" Index function """
return render_template('4-index.html')
if __name__ == '__main__':
app.run(debug=True)
|
dnjoe96/alx-backend
|
0x02-i18n/4-app.py
|
4-app.py
|
py
| 1,140 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41243183736
|
from flask import Blueprint, request, jsonify, make_response
from tabledef import Technician
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import update
from tabledef import Technician, Call
import config
# create a query that extracts the information of the table "technician" for a specific company
select_technicians = """
SELECT id_technician, id_company, data_technician, chat_id, status, message FROM technicians WHERE id_company is {};
"""
select_technician_company = """
SELECT id_company FROM technicians WHERE chat_id is {};
"""
select_technician_info_by_chat_id = """
SELECT id_technician, id_company, data_technician, chat_id, status FROM technicians WHERE chat_id is {};
"""
select_technician_info_by_tech_id = """
SELECT id_technician, id_company, data_technician, chat_id, status FROM technicians WHERE id_technician is {};
"""
#'''
# Call
select_call_from_status = """
SELECT id_call, id_company, id_condominium, date_call, data_call, call_status FROM calls WHERE call_status is {} AND id_company is {};
"""
#'''
# allows main_data to recall the underlying endpoint
api_technician_company = Blueprint('api_technician_company', __name__)
@api_technician_company.route('/<id_company>/technician', methods=['GET'])
def technician_company_id(id_company):
"""
endpoint which is used to find the technicians of a given company in the database
:param id_company: company_id
:return: return the technician referred to the id_company
"""
engine = create_engine('sqlite:///call_center.db', echo=True)
conn = engine.connect()
result = conn.execute(select_technicians.format(id_company))
technicians = []
for el in result:
technicians.append(
{
config.TECH_ID: el[0],
config.TECH_INFO: el[2]
}
)
if result:
response = {
"message": "technicians:",
'status': 'OK',
"items": technicians
}
res_technicians = make_response(jsonify(response), 200)
else:
response = {
"message": "ERROR: No technicians in database",
'status': 'ERROR',
"items": []
}
res_technicians = make_response(jsonify(response), 404)
return res_technicians
#@api_technician_company.route('/technician/<id_technician>/<chat_id>', methods=['GET'])
@api_technician_company.route('/technician/<id_technician>/add_chat_id/<chat_id>', methods=['GET'])
def update_chat_id(id_technician, chat_id):
"""
endpoint which is used to login the technician
:param id_technician: id_technician, chat_id: chat_id
:return: insert in the database the chat_id referred to the id_technician
"""
engine = create_engine('sqlite:///call_center.db', echo=True)
conn = engine.connect()
update_chat_id = update(Technician).where(Technician.id_technician == id_technician).values(chat_id=chat_id)
conn.execute(update_chat_id)
response = {
'status': 'OK'
}
res_status = make_response(jsonify(response), 200)
return res_status
@api_technician_company.route('/technician_chat/<chat_id>/logout', methods=['GET'])
def logout_chat_id(chat_id):
"""
endpoint which is used to logout the technician
:param chat_id: chat_id
:return: when technician logout cancel the chat_id referred to the technician with the same chat_id
"""
engine = create_engine('sqlite:///call_center.db', echo=True)
conn = engine.connect()
update_chat_id = update(Technician).where(Technician.chat_id == chat_id).values(chat_id='')
conn.execute(update_chat_id)
response = {
'status': 'OK'
}
res_status = make_response(jsonify(response), 200)
return res_status
@api_technician_company.route('/technician_chat/<chat_id>/update/<status>', methods=['GET'])
def update_status_tech_by_chat_id(chat_id, status):
"""
endpoint which is used to update the status of technician referred to chat_id
:param chat_id: chat_id, status: status
:return: update the status of technician referred to chat_id
"""
if status in config.TECH_STATUS_LABEL:
engine = create_engine('sqlite:///call_center.db', echo=True)
conn = engine.connect()
update_status = update(Technician).where(Technician.chat_id == chat_id).values(status=status)
conn.execute(update_status)
response = {
"tech_status": config.TECH_STATUS_LABEL[status],
'status': 'OK'
}
if status == '1':
comp = conn.execute(select_technician_company.format(chat_id))
free_calls = conn.execute(select_call_from_status.format(1, next(comp)[0]))
calls = []
for el in free_calls:
calls.append(
{
config.CALL_ID: el[0],
config.COMPANY_ID: el[1],
config.BUILDING_ID: el[2],
config.CALL_DATE: el[3],
config.CALL_INFO: el[4],
config.CALL_STATUS: el[5]
}
)
#Call(input_data[config.COMPANY_ID], input_data[config.BUILDING_ID], datetime.now(), {config.CALL_MESSAGE: input_data[config.CALL_MESSAGE]}, call_status)
response = {
"tech_status": config.TECH_STATUS_LABEL[status],
'status': 'OK',
"free_calls": calls
}
res_status = make_response(jsonify(response), 200)
else:
response = {
"tech_status": "Status must be between 0 and 4",
'status': 'ERROR'
}
res_status = make_response(jsonify(response), 404)
return res_status
@api_technician_company.route('/technician/<tech_id>/update/<status>', methods=['GET'])
def update_status_tech_by_tech_id(tech_id, status):
"""
endpoint which is used to update the status of technician referred to tech_id
:param tech_id: tech_id, status: status
:return: update the status of technician referred to tech_id
"""
if status in config.TECH_STATUS_LABEL:
engine = create_engine('sqlite:///call_center.db', echo=True)
conn = engine.connect()
update_status = update(Technician).where(Technician.id_technician == tech_id).values(status=status)
conn.execute(update_status)
response = {
"tech_status": config.TECH_STATUS_LABEL[status],
'status': 'OK'
}
res_status = make_response(jsonify(response), 200)
else:
response = {
"tech_status": "Status must be between 0 and 4",
'status': 'ERROR'
}
res_status = make_response(jsonify(response), 404)
return res_status
##### select_technician_info_by_chat_id
@api_technician_company.route('/technician_chat/<chat_id>/info', methods=['GET'])
def get_tech_info_by_chat_id(chat_id):
"""
endpoint which is used to select the information of technician by chat id
:param chat_id: chat_id
:return: return the information of technician by chat id
"""
engine = create_engine('sqlite:///call_center.db', echo=True)
conn = engine.connect()
result =conn.execute(select_technician_info_by_chat_id.format(chat_id))
info = {}
for el in result:
info = {
config.TECH_ID: el[0],
config.COMPANY_ID: el[1],
config.TECH_INFO: el[2],
config.TECH_CHAT: el[3],
config.TECH_STATUS: el[4]
}
response = {
"info": info,
'status': 'OK'
}
res_status = make_response(jsonify(response), 200)
return res_status
@api_technician_company.route('/technician/<tech_id>/info', methods=['GET'])
def get_tech_info_by_tech_id(tech_id):
"""
endpoint which is used to select the information of technician by chat id
:param tech_id: chat_id
:return: return the information of technician by chat id
"""
engine = create_engine('sqlite:///call_center.db', echo=True)
conn = engine.connect()
result =conn.execute(select_technician_info_by_tech_id.format(tech_id))
for el in result:
info = {
config.TECH_ID: el[0],
config.COMPANY_ID: el[1],
config.TECH_INFO: el[2],
config.TECH_CHAT: el[3],
config.TECH_STATUS: el[4]
}
response = {
"info": info,
'status': 'OK'
}
res_status = make_response(jsonify(response), 200)
return res_status
|
fmauri90/call_center
|
dataservice/api_technician_company.py
|
api_technician_company.py
|
py
| 8,617 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23796575414
|
from fish import Fish
class FishTracker:
def __init__(self, initial_fish):
self.fishies = [
Fish(timer)
for timer in initial_fish
]
self.days_past = 0
def pass_day(self):
self.spawn_fishies()
for fish in self.fishies:
fish.age()
self.days_past += 1
def spawn_fishies(self):
new_fishies = []
for fish in self.fishies:
if fish.timer == 0:
new_fishies.append(Fish(9))
fish.reset()
self.fishies.extend(new_fishies)
def __str__(self):
report = ""
if self.days_past == 0:
label = "Initial state: "
elif self.days_past == 1:
label = "After 1 day: "
else:
label = f"After {str(self.days_past):2} days: "
report += label
report += ",".join(str(fish.timer) for fish in self.fishies)
return report
|
maariaw/advent-of-code-2021
|
Day06/puzzle1/src/fish_tracker.py
|
fish_tracker.py
|
py
| 964 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8631452934
|
import pytest
import numpy as np
from abito.lib.significance import *
def test_t_test():
np.random.seed(0)
treatment = np.random.normal(100, size=100)
control = np.random.normal(100, size=100)
r = t_test(treatment, control)
assert r.p_value == pytest.approx(0.9, 0.1)
r = t_test_1samp(treatment, 100)
assert r.p_value == pytest.approx(0.6, 0.1)
|
avito-tech/abito
|
tests/test_significance.py
|
test_significance.py
|
py
| 376 |
python
|
en
|
code
| 14 |
github-code
|
6
|
11626980377
|
import pandas as pd
import json
from message_reader import start
# setting up config file
input_file = "input.csv"
output_file = "out.csv"
json_file = "var_old.json"
# reading input file & sheet
df = pd.read_csv(input_file, header=None)
final_written = None
def to_every_message(row):
global final_written
cell_value = row[0]
print(cell_value)
# getting response from core function
path_list, response_list = start(file_name=json_file)
write_list = [(cell_value, "", "")]
for new_row in zip(path_list, response_list):
write_list.append(("send", "text", new_row[0]))
write_list.append(("expectPayload", "equalTo", new_row[1]))
if final_written is None:
final_written = write_list
else:
final_written = final_written + write_list
# after reading each yellow lable applying function
df.apply(to_every_message, raw=True, axis=1)
# from dataframes to output excel
df_out = pd.DataFrame(final_written)
df_out.to_csv(output_file, index=False, header=False,encoding='Windows-1252')
|
EhtishamSabir/json_parser
|
main_csv.py
|
main_csv.py
|
py
| 1,053 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30786451692
|
import unittest
from unittest.mock import Mock
from book.book_repository import BookRepository
from book.book import Book
from book.book_service import BookService
class TestBookService(unittest.TestCase):
def test_find_book_by_id(self):
mock_repository = Mock(spec=BookRepository)
mock_repository.find_by_id.return_value = Book("1", "Book1", "Author1")
book_service = BookService(mock_repository)
result = book_service.find_book_by_id("1")
self.assertEqual(result.title, "Book1")
self.assertEqual(result.author, "Author1")
def test_find_all_books(self):
mock_repository = Mock(spec=BookRepository)
mock_repository.find_all.return_value = [
Book("1", "Book1", "Author1"),
Book("2", "Book2", "Author2"),
]
book_service = BookService(mock_repository)
result = book_service.find_all_books()
self.assertEqual(len(result), 2)
self.assertEqual(result[0].title, "Book1")
self.assertEqual(result[1].title, "Book2")
|
nadia3373/Tests
|
s4/book_tests/test.py
|
test.py
|
py
| 1,059 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70713803388
|
import re
import os
import sys
import time
import json
import torch
import wandb
import random
import datasets
import evaluate
import numpy as np
import transformers
from accelerate import Accelerator
from accelerate.utils import set_seed
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, DefaultDataCollator, AutoModelForSequenceClassification
set_seed(42)
MODEL_NAME = str(sys.argv[1])
MIXED_PRECISION = str(sys.argv[2])
def prepare_dataset(data_folder, label2id, data_types):
def combine_data(example):
temp_text = ""
for data_type in data_types:
temp_text += example[data_type] + " "
example["text"] = temp_text
return example
dataset = datasets.load_from_disk(data_folder + "dataset/")
dataset = dataset["train"]
dataset_encoded = dataset.class_encode_column("category")
dataset_aligned = dataset_encoded.align_labels_with_mapping(label2id, "category")
dataset = dataset_aligned.map(combine_data, remove_columns=["title", "body"])
dataset = dataset.rename_column("category", "label")
return dataset
def main():
def preprocess_function(examples):
return tokenizer(examples["text"], truncation=True, padding='max_length', max_length=hps["max_length"], return_tensors='pt')
models = {"bert": "bert-base-uncased", "distilbert": "distilbert-base-uncased", "tinybert": "huawei-noah/TinyBERT_General_4L_312D"}
hps = {
"batch_size": 32,
"gradient_accumulation_steps": 2,
"learning_rate": 2e-5,
"data_types": ["title", "body"],
"model_name": models[MODEL_NAME],
"num_epochs": 3,
"max_length": 256,
"weight_decay": 0.01,
"num_warmup_steps": 0.2,
"mixed_precision": MIXED_PRECISION,
"split_batches": True,
}
wandb_id = wandb.util.generate_id()
accelerator = Accelerator(log_with="wandb", gradient_accumulation_steps=hps["gradient_accumulation_steps"], split_batches=hps["split_batches"], mixed_precision=hps["mixed_precision"])
accelerator.init_trackers(
project_name="DMOZ-classification",
config=hps,
init_kwargs={"wandb": {
"name": MODEL_NAME.upper() + "_DMOZ_" + str(wandb_id),
"job_type": "training",
"group": str(wandb_id),
"tags": [MODEL_NAME.upper(), "DMOZ"],
}
},
)
data_folder = str(sys.argv[3])
id2label = {0: "Arts", 1: "Business", 2: "Computers", 3: "Health", 4: "Home", 5: "News", 6: "Recreation", 7: "Reference", 8: "Science", 9: "Shopping", 10: "Society", 11: "Sports", 12: "Games"}
label2id = {v: k for k, v in id2label.items()}
labels = label2id.keys()
dataset = prepare_dataset(data_folder, label2id, hps["data_types"])
tokenizer = AutoTokenizer.from_pretrained(hps["model_name"])
data_collator = DefaultDataCollator()
tokenized_data = dataset.map(preprocess_function, batched=True)
tokenized_data = tokenized_data.remove_columns("text")
train_dataloader = DataLoader(
tokenized_data,
shuffle=True,
batch_size=hps["batch_size"],
collate_fn=data_collator,
drop_last=True,
)
model = AutoModelForSequenceClassification.from_pretrained(
hps["model_name"],
num_labels=len(labels),
id2label=id2label, label2id=label2id,
)
optimizer = torch.optim.AdamW(
model.parameters(),
lr=(hps["learning_rate"] * accelerator.num_processes),
weight_decay=hps["weight_decay"],
eps=1e-8,
)
num_training_steps = hps["num_epochs"] * len(tokenized_data)
num_warmup_steps = int(hps["num_warmup_steps"] * len(train_dataloader))
lr_scheduler = transformers.get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps
)
train_dataloader, model, optimizer, lr_scheduler = accelerator.prepare(train_dataloader, model, optimizer, lr_scheduler)
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
accuracy = evaluate.load("accuracy")
model.train()
starter.record()
for epoch in range(hps["num_epochs"]):
for idx, batch in enumerate(train_dataloader):
with accelerator.accumulate(model):
outputs = model(**batch)
loss = outputs.loss
logits = outputs.logits
accelerator.backward(loss)
predictions = logits.argmax(dim=-1)
accelerator.log({"batch/batch_step": idx, "batch/loss": loss, "batch/accuracy": accuracy.compute(predictions=predictions, references=batch["labels"])["accuracy"]})
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
ender.record()
torch.cuda.synchronize()
training_time = starter.elapsed_time(ender)
accelerator.log({"train": {"train_time": training_time}})
# Saving model
accelerator.wait_for_everyone()
model = accelerator.unwrap_model(model)
state_dict = model.state_dict()
filename = data_folder + "models/BERT/model.pt"
accelerator.save(state_dict, filename)
accelerator.end_training()
if accelerator.is_main_process:
wandb.init(
project="DMOZ-classification",
name="MODEL_" + str(wandb_id),
group=str(wandb_id),
job_type="model",
tags=["model"],
)
model_artifact = wandb.Artifact(
name="model_" + MODEL_NAME.upper() + "_DMOZ",
type="model"
)
model_artifact.add_file(filename)
wandb.log_artifact(model_artifact)
wandb.finish()
if __name__ == "__main__":
main()
|
JesseBrons/Webpageclassification
|
training/train_model_BERT.py
|
train_model_BERT.py
|
py
| 5,845 |
python
|
en
|
code
| 1 |
github-code
|
6
|
43269450493
|
from django.conf.urls import include, url
from provisioner.views import ProvisionStatus, login
urlpatterns = [
url(r'^$', ProvisionStatus, name='home'),
url(r'login.*', login),
url(r'^events/', include('events.urls')),
url(r'^provisioner/', include('provisioner.urls')),
]
|
uw-it-aca/msca-provisioner
|
msca_provisioner/urls.py
|
urls.py
|
py
| 291 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6969788756
|
import os
import re
from PIL import Image
import numpy as np
import torch
import random
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torchvision.datasets.folder import default_loader
class Celeb(Dataset):
def __init__(self, data_file, dst_path='cropped_CelebA', training=True, transform=None, train_num=16000):
src_path = data_file + 'CelebA_info'
if train_num == 10240:
category = 'celeb_sample_10240.txt'
else:
category = 'list_attr_celeba.txt'
fn = open(src_path + '/Anno/' + category, 'r')
fh2 = open(src_path + '/Eval/list_eval_partition.txt', 'r')
imgs = []
lbls = []
ln = 0
train_bound = 162770 + 2
test_bound = 182638 + 2
regex = re.compile('\s+')
for line in fn:
ln += 1
if ln <= 2:
continue
if ln < test_bound and not training:
continue
if (ln - 2 <= train_num and training and ln <=train_bound) or\
(ln - test_bound < train_num and not training):
line = line.rstrip('\n')
line_value = regex.split(line)
imgs.append(line_value[0])
lbls.append(list(int(i) if int(i) > 0 else 0 for i in line_value[1:]))
self.imgs = imgs
self.lbls = lbls
self.is_train = training
self.dst_path = data_file + dst_path
if transform is None:
if training:
self.transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
else:
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
else:
self.transform = transform
def __getitem__(self, idx):
fn = self.imgs[idx]
lbls = self.lbls[idx]
if self.is_train:
imgs = default_loader(self.dst_path + '/train/' + fn)
else:
imgs = default_loader(self.dst_path + '/test/' + fn)
imgs = self.transform(imgs)
lbls = torch.Tensor(lbls)
return [imgs, lbls]
def __len__(self):
return len(self.imgs)
def sample_celeb(data_file, category='list_attr_celeba.txt', training=True, sample_num=10240, train_num=162770):
src_path = data_file + 'CelebA_info'
fn = open(src_path + '/Anno/' + category, 'r')
sample_path = src_path + '/Anno/celeb_sample_'+str(sample_num)+'.txt'
if os.path.exists(sample_path):
os.system('rm '+ sample_path)
sample_fh = open(sample_path, 'w')
ln = 0
train_bound = 162770 + 2
test_bound = 182638 + 2
regex = re.compile('\s+')
content = []
trainnum_list = np.arange(0, train_bound-2)
sample_num_list = random.sample(trainnum_list.tolist(), sample_num)
for line in fn:
ln += 1
if ln <= 2:
sample_fh.write(line)
if ln < test_bound and not training:
continue
if (ln - 2 <= train_num and training and ln <=train_bound) or\
(ln - test_bound < train_num and not training):
content.append(line)
for idx in sample_num_list:
sample_fh.write(content[idx])
sample_fh.close()
if __name__ == '__main__':
data_file = '/home/wzh/project/fjq/dataset/CelebA/'
sample_celeb(data_file, sample_num=10240)
|
ada-shen/icCNN
|
celeb.py
|
celeb.py
|
py
| 3,923 |
python
|
en
|
code
| 18 |
github-code
|
6
|
8861910661
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Esteban Quintana
# Javier Rodríguez
# Tree
#id3
# Information gain
# Greater gain
import re
import fileinput
import math
import copy
from node import Node
from main import *
def get_entropy(node, root, data_types):
entropies = []
entropy = 0.0
denominator = len(node.data_frame) - 1
for element in data_types[root.name]:
counter = 0
for i in range(1, denominator + 1):
if element == node.data_frame[i][root.number]:
counter += 1
if counter > 0:
e_entropy = -((counter/denominator) * math.log(counter/denominator, 2))
else:
e_entropy = 0
entropies.append(e_entropy)
for e in entropies:
entropy += e
return entropy
# id3 Alg
def id3(node, root, data_types, visited):
new_list = copy.deepcopy(visited)
node.entropy = get_entropy(node, root, data_types)
#print(node)
if float(node.entropy) == 0.0:
#print("LEAF NODE")
node.answer = node.data_frame[1][-1]
#print(node.answer)
return 0
#print("entropy " + str(node.entropy))
gains = {}
#print(node.data_frame)
for i in range(0, len(node.data_frame[0]) -1):
if i not in visited:
next = Node(node.data_frame[0][i], None, None, node, i, node.data_frame, None, None)
gains[node.data_frame[0][i]] = information_gain(node, next, root, data_types)
split_to_node = greater_gain(gains)
new_number = root.data_frame[0].index(split_to_node)
new_list.append(new_number)
for element in data_types[split_to_node]:
new_data_frame = []
new_data_frame.append(root.data_frame[0])
new_node = Node(element, None, [], node, new_number, None, None, node.data_frame[0][new_number])
for row in node.data_frame:
if row[new_number] == element:
new_data_frame.append(row)
new_node.data_frame = new_data_frame
node.children.append(new_node)
id3(new_node, root, data_types, new_list)
return 0
def greater_gain(gains):
current_greater = 0.0
for g in gains:
if gains[g] > current_greater:
current_greater = gains[g]
for g in gains:
if gains[g] == current_greater:
return g
def information_gain(actual_node, next, root, data_types):
gain = 0.0
entropy = 0.0
entropies = []
for k in data_types.keys():
if k == next.name:
for datatype in data_types[k]:
aux = []
aux.append(root.data_frame[0])
for i in range(1, len(next.data_frame)):
if next.data_frame[i][next.number] == datatype:
aux.append(next.data_frame[i])
n = Node(next.name, None, None, next, next.number, aux, None, None)
en = get_entropy(n, root, data_types)
en = en * ((len(n.data_frame) - 1) / (len(root.data_frame) -1) )
entropies.append(en)
for e in entropies:
entropy += e
gain = actual_node.entropy - entropy
return gain
|
JRC2307/Desicion-Trees
|
id3.py
|
id3.py
|
py
| 3,181 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11801518455
|
def magic_square(matrix):
n = len(matrix)
M = (n * (n * 2 + 1)) / 2
l = []
d1 = []
d2 = []
for i in range(0, len(matrix)):
l.append([item[i] for item in matrix])
d1.append(matrix[i][i])
d2.append(matrix[i][n - 1 - i])
l = matrix + l + [d1] + [d2]
l = list(map(lambda x: sum(x), l))
l.append(M)
return all(l)
|
TsvetaKandilarova/Programming101
|
week0/Problem33/solution.py
|
solution.py
|
py
| 373 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
1406414263
|
#String Methods
trek= "ncc 1701-d"
a= "the prime directive"
#using split method
a= a.split()
print(a)
# using join method result will be the_prime_directive
a = "_".join(a)
print(a)
# create a small string
lilstring = "Alta3 Research offers classes on Python coding"
newlist= lilstring.split(" ")
print(newlist)
# create a list of strings
myiplist = ["192", "168", "0", "12"]
#use join mehtod here
mynewiplist= ".".join(myiplist)
print(mynewiplist)
|
tapantriv/py06292020
|
lab15.py
|
lab15.py
|
py
| 449 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39403565414
|
from functools import partial
import mmcv
import numpy as np
import torch
from six.moves import map, zip
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
"""Convert tensor to images
Args:
tensor (torch.Tensor): Tensor that contains multiple images
mean (tuple[float], optional): Mean of images. Defaults to (0, 0, 0).
std (tuple[float], optional): Standard deviation of images.
Defaults to (1, 1, 1).
to_rgb (bool, optional): Whether convert the images to RGB format.
Defaults to True.
Returns:
list[np.ndarray]: A list that contains multiple images.
"""
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
imgs = []
for img_id in range(num_imgs):
img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
img = mmcv.imdenormalize(
img, mean, std, to_bgr=to_rgb).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs
def multi_apply(func, *args, **kwargs):
"""Apply function to a list of arguments
Note:
This function applies the ``func`` to multiple inputs and
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.
Args:
func (Function): A function that will be applied to a list of
arguments
Returns:
tuple(list): A tuple containing multiple list, each list contains
a kind of returned results by the function
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds.type(torch.bool)] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds.type(torch.bool), :] = data
return ret
def vectorize_labels(flat_labels, num_classes, label_weights = None):
prediction_number = flat_labels.shape[0]
labels = torch.zeros( [prediction_number, num_classes], dtype=flat_labels.dtype, device=flat_labels.device)
pos_labels = flat_labels < num_classes
labels[pos_labels, flat_labels[pos_labels]] = 1
if label_weights is not None:
ignore_labels = (label_weights == 0)
labels[ignore_labels, :] = -1
return labels.reshape(-1)
def giou(pred, target, eps=1e-7):
"""
Generalized Intersection over Union: A Metric and A Loss for
Bounding Box Regression
https://arxiv.org/abs/1902.09630
code refer to:
https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py#L36
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
enclose_area = enclose_wh[:, 0] * enclose_wh[:, 1] + eps
# GIoU
gious = ious - (enclose_area - union) / enclose_area
return gious
def iou(pred, target, eps=1e-7):
"""
Generalized Intersection over Union: A Metric and A Loss for
Bounding Box Regression
https://arxiv.org/abs/1902.09630
code refer to:
https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py#L36
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
return ious
|
fundamentalvision/Parameterized-AP-Loss
|
mmdet/core/utils/misc.py
|
misc.py
|
py
| 5,069 |
python
|
en
|
code
| 48 |
github-code
|
6
|
40211307735
|
from __future__ import division
import sys, os, math
import vtk
from pbrainlib.gtkutils import error_msg, simple_msg, make_option_menu,\
get_num_value, get_num_range, get_two_nums, str2int_or_err,\
OpenSaveSaveAsHBox, ButtonAltLabel
import pickle
from scipy import array, zeros, ones, sort, absolute, sqrt, divide,\
argsort, take, arange
class MeshManager:
"""
CLASS: MeshManager
DESCR: Handles rendering of VTK mesh (e.g. segmented cortex from ITK-Snap).
"""
def __init__ (self, interactor, renderer, mesh_filename, reg_filename):
self.interactor = interactor
self.renderer = renderer
reader = vtk.vtkStructuredPointsReader()
reader.SetFileName(mesh_filename)
cf = vtk.vtkContourFilter()
cf.SetInput(reader.GetOutput())
cf.SetValue(0, 1)
deci = vtk.vtkDecimatePro()
deci.SetInput(cf.GetOutput())
deci.SetTargetReduction(.1)
deci.PreserveTopologyOn()
smoother = vtk.vtkSmoothPolyDataFilter()
smoother.SetInput(deci.GetOutput())
smoother.SetNumberOfIterations(100)
normals = vtk.vtkPolyDataNormals()
normals.SetInput(smoother.GetOutput())
normals.FlipNormalsOn()
normals.SetFeatureAngle(60.0)
stripper = vtk.vtkStripper()
stripper.SetInputConnection(normals.GetOutputPort())
lut = vtk.vtkLookupTable()
lut.SetHueRange(0, 0)
lut.SetSaturationRange(0, 0)
lut.SetValueRange(0.2, 0.55)
contourMapper = vtk.vtkPolyDataMapper()
#contourMapper.SetInput(normals.GetOutput())
contourMapper.SetInput(stripper.GetOutput())
contourMapper.SetLookupTable(lut)
self.contours = vtk.vtkActor()
self.contours.SetMapper(contourMapper)
#self.contours.GetProperty().SetRepresentationToWireframe()
self.contours.GetProperty().SetRepresentationToSurface()
#self.contours.GetProperty().SetInterpolationToGouraud()
#self.contours.GetProperty().SetOpacity(1.0)
#self.contours.GetProperty().SetAmbient(0.1)
self.contours.GetProperty().SetDiffuse(0.1)
#self.contours.GetProperty().SetSpecular(0.1)
#self.contours.GetProperty().SetSpecularPower(0.1)
# now setmatrix() on the actor from the reg file !
def array_to_vtkmatrix4x4(scipy_array):
vtkmat = vtk.vtkMatrix4x4()
for i in range(0,4):
for j in range(0,4):
vtkmat.SetElement(i,j, scipy_array[i,j])
return vtkmat
mat = pickle.load(file(reg_filename, 'r'))
vtkmat = array_to_vtkmatrix4x4(mat)
self.contours.SetUserMatrix(vtkmat)
#self.contours.GetProperty().SetOpacity(.38) #adjustable in the grid manager now
# XXX YAH somehow get a callback when actor is moved...
self.renderer.AddActor(self.contours)
|
nipy/pbrain
|
eegview/mesh_manager.py
|
mesh_manager.py
|
py
| 2,967 |
python
|
en
|
code
| 94 |
github-code
|
6
|
1741698512
|
import pytest
import numpy as np
import piquasso as pq
import strawberryfields as sf
pytestmark = pytest.mark.benchmark(
group="pure-fock",
)
@pytest.fixture
def theta():
return np.pi / 5
@pytest.fixture
def d():
return 5
@pytest.mark.parametrize("cutoff", range(3, 14))
def piquasso_benchmark(benchmark, d, cutoff, theta):
@benchmark
def func():
state_vector = [cutoff // d] * d
state_vector[0] += cutoff % d - 1
with pq.Program() as program:
pq.Q(all) | pq.StateVector(state_vector)
for i in range(d - 1):
pq.Q(i, i + 1) | pq.Beamsplitter(theta)
simulator_fock = pq.PureFockSimulator(d=d, config=pq.Config(cutoff=cutoff))
simulator_fock.execute(program)
@pytest.mark.parametrize("cutoff", (3, 4, 5))
def strawberryfields_benchmark(benchmark, d, cutoff, theta):
@benchmark
def func():
eng = sf.Engine(backend="fock", backend_options={"cutoff_dim": cutoff})
circuit = sf.Program(d)
state_vector = [cutoff // d] * d
state_vector[0] += cutoff % d - 1
with circuit.context as q:
for i, n in enumerate(state_vector):
sf.ops.Fock(n) | q[i]
for w in range(d - 1):
sf.ops.BSgate(theta) | (q[w], q[w + 1])
eng.run(circuit).state
|
Budapest-Quantum-Computing-Group/piquasso
|
benchmarks/purefock_beamsplitter_increasing_cutoff_benchmark.py
|
purefock_beamsplitter_increasing_cutoff_benchmark.py
|
py
| 1,353 |
python
|
en
|
code
| 19 |
github-code
|
6
|
32704679818
|
from django.urls import path
from .views import *
urlpatterns = [
path('', PostList.as_view(), name="post_list_url"),
path("search/", Search.as_view(), name='search_form_url'),
path("filter/<int:pk>", DateFilter.as_view(), name='date_filter_url'),
path("<slug:category>/", PostList.as_view(), name='post_by_category_url'),
path("<slug:category>/<slug:slug>/", PostDetail.as_view(), name='post_detail_url'),
]
|
djaffic/blog_project
|
news/urls.py
|
urls.py
|
py
| 429 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10251553901
|
class Solution:
# TC: O(m * n)
# SC: O(m * n)??
# cp 103 Jun 9 class exercise
def leastBricks(self, wall: List[List[int]]) -> int:
lines = {}
for row in wall:
sum = 0
for brick in row[:-1]:
sum += brick # total number of collision at each col
lines[sum] = lines.get(sum,0) + 1
if len(lines) < 1: # no collision at all?
return len(wall)
else:
return len(wall) - max(lines.values())
|
stevenwcliu/leetcode_footprints
|
554-brick-wall/554-brick-wall.py
|
554-brick-wall.py
|
py
| 536 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41254672775
|
class Solution:
def twoSum(self, nums, target):
"""
:param nums: : List[int]
:param target: : int
:return: -> List[int]
"""
map = dict()
for i in range(len(nums)):
temp = target - nums[i]
if temp in map:
return [map[temp], i]
else:
map[nums[i]] = i
|
baichuan1997/leetcode
|
9.两数之和/1.两数之和.py
|
1.两数之和.py
|
py
| 378 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71791729148
|
import numpy.linalg as LA
from sklearn.neighbors import KDTree
from sampler import Sampler
import networkx as nx
from shapely.geometry import LineString
def can_connect(p1, p2, polygons):
line = LineString([p1, p2])
for p in polygons:
if p.crosses(line) and p.height >= min(p1[2], p2[2]):
return False
return True
def create_graph(nodes, polygons, k=10):
g = nx.Graph()
tree = KDTree(nodes)
for n in nodes:
indicies = tree.query([n], k, return_distance=False)[0]
for i in indicies:
target_node = nodes[i]
if n == target_node:
continue
if can_connect(n, target_node, polygons):
g.add_edge(tuple(n), tuple(target_node), weight=1)
return g
def prm(data, num_samples=1000, extra_points=[]):
sampler = Sampler(data)
nodes = sampler.sample(num_samples=num_samples)
print('# sampled nodes {}'.format(len(nodes)))
nodes += extra_points
return create_graph(nodes, sampler.polygons), nodes
|
magnusja/udacity-flying-cars
|
FCND-Motion-Planning/prm.py
|
prm.py
|
py
| 1,063 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29576976470
|
# -*- coding: utf-8 -*-
"""
scikit-learnを用いたサンプルデータ生成
http://overlap.hatenablog.jp/entry/2015/10/08/022246
Created on Wed Jul 11 15:25:41 2018
@author: Akitaka
"""
### classification sample
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import roc_auc_score
# サンプルデータの生成
# 1000 samples、5(infomative) + 2(redundant) + 13(independent) = 20 feature のデータを生成
dat = make_classification(n_samples=1000, n_features=20, n_informative=5,
n_redundant=2, n_classes=2, n_clusters_per_class=10)
X = dat[0]
y = dat[1]
print("X shape", X.shape)
print("y shape", y.shape)
# 学習用とテスト用データの分割
# 80%を学習、20%をテストに利用する
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123)
# 学習モデルの構築とパフォーマンス評価
# ロジスティック回帰、ランダムフォレスト、KNNの3つのモデルを作成しそれぞれのAUCを計算
clf = LogisticRegression()
clf.fit(X_train, y_train)
print("LogisticRegression AUC =", roc_auc_score(y_test, clf.predict_proba(X_test)[:,1]))
clf = RandomForestClassifier(n_estimators=500, random_state=123)
clf.fit(X_train, y_train)
print("RandomForestClassifier AUC =", roc_auc_score(y_test, clf.predict_proba(X_test)[:,1]))
clf = KNeighborsClassifier(n_neighbors=10)
clf.fit(X_train, y_train)
print("KNeighborsClassifier AUC =", roc_auc_score(y_test, clf.predict_proba(X_test)[:,1]))
|
nakanishi-akitaka/python2018_backup
|
0711/test4_make_sample.py
|
test4_make_sample.py
|
py
| 1,780 |
python
|
en
|
code
| 5 |
github-code
|
6
|
33562082348
|
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
def thresholdingvivas(inp):
f, c = inp.shape
for i in range(f):
for j in range(c):
if(inp[i][j]>=195):
inp[i][j]=0
cv.imshow('vivas',inp)
def thresholdingmuertas(inp):
f, c = inp.shape
for i in range(f):
for j in range(c):
if(inp[i][j]<=150):
inp[i][j]=0
cv.imshow('muertas',inp)
def thresholdingcolores(inp):
f, c ,color = inp.shape
for i in range(f):
for j in range(c):
if(img[i][j][0]<=121 or img[i][j][1]<=144 or img[i][j][2]<=184):
inp[i][j][0]=0
inp[i][j][1]=0
inp[i][j][2]=0
cv.imshow('colores',inp)
img = cv.imread('thresh2.png', cv.IMREAD_GRAYSCALE)
hist = cv.calcHist([img], [0], None, [256], [0, 256])
thresholdingmuertas(img)
plt.plot(hist, color='gray')
plt.xlabel('intensidad de iluminacion')
plt.ylabel('cantidad de pixeles')
plt.show()
|
renzovc987/CG
|
Thresholdingrenzo.py
|
Thresholdingrenzo.py
|
py
| 1,044 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26628419049
|
import cv2
import numpy as np
import urllib.request
from threading import Thread
import socket
import time
import requests
import json
class Streamer:
'''
description:-
Class responsible for connecting to the anrdroid app and managing the data communication.
How it works:
- every massege from and to the app are encapsulated by a starting tag and an ending tag
- the sending side (either android or pc side) first turn the massege to a byte array
then appends to the start and end of that array with a tag.
- for example when sending frame masseges from the app, the massege is as follows:
[FRAME START TAG] [BYTE STREAM] [FRAME END TAG]
Inputs:
src: string, ip address of the android
port: int, port of the app on the android
buffer_size: int, amount of incoming frames to buffer
f_st: string, specify the frame start tag
f_en: string, specify the frame end tag
d_st: string, specify the data start tag
d_en: string, specify the data end tag
'''
def __init__(self, src, port, buffer_size=5, f_st="frame_start", f_en="frame_end",
d_st="data_start", d_en="data_end"):
self.src = src
self.port = port
self.buffer_size = buffer_size
self.f_st, self.f_en, self.d_st, self.d_en =f_st, f_en, d_st, d_en
# initialize the socket and connect
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setblocking(True)
self.sock.settimeout(3)
try:
self.sock.connect((src, port))
except:
self.sock = None
self.stop_threads = True
return None
# initialize the buffers
# frame buffer (circular buffer)
self.frame_insert_idx = 0
self.frame_output_idx = 0
self.frames = [None] * buffer_size
self.data = None # data buffer (1 slot buffer)
# start the thread responsible for receiving and buffering the incoming masseges
self.stop_threads = False
Thread(target=self.thread).start()
def thread(self):
'''
Main thread that recives and extracts masseges from the app
'''
frame_conversion, data_conversion = False, False
recv_size = 1024 # initial byte buffer size for the socket
buffer = b'' # general byte buffer
frame_buffer, data_buffer = b'', b'' # byte buffer for the frame and data masseges
while self.stop_threads == False:
if(self.sock._closed): # stop if socket is closed
self.stop_threads = self.sock._closed
break
try:
r = self.sock.recv(recv_size) # receive the byte stream
if len(r) == 0:
exit(0)
buffer += r # add the received byte stream to the general buffer
# Extract frame masseges============================================
if frame_conversion == False:
s = buffer.find(bytearray(self.f_st, encoding ='utf-8'))
if s != -1:
frame_conversion = True
frame_buffer = b''
if frame_conversion:
e = buffer.find(bytearray(self.f_en, encoding ='utf-8'))
if e != -1:
frame_conversion = False
frame_buffer = buffer[s+len(self.f_st):e]
buffer = buffer[:s] +buffer[e+len(self.f_en):]
recv_size = 512 + len(frame_buffer)
else:
continue
####################################################################
# Extract data masseges=============================================
if data_conversion == False:
s = buffer.find(bytearray(self.d_st, encoding ='utf-8'))
if s != -1:
data_conversion = True
data_buffer = b''
if data_conversion:
e = buffer.find(bytearray(self.d_en, encoding ='utf-8'))
if e != -1:
data_conversion = False
data_buffer = buffer[s+len(self.d_st):e]
buffer = buffer[:s] +buffer[e+len(self.d_en):]
self.data = data_buffer.decode('ascii')
else:
continue
####################################################################
except Exception as e:
print(e)
continue
try:
# if frame buffer is not full
if (self.frame_insert_idx+1) % self.buffer_size != self.frame_output_idx:
# decode the byte frame massege to a numpy array
nparr = np.fromstring(frame_buffer, np.uint8)
frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if type(frame) is type(None):
print("frame dropped")
pass
# store the frame in the frame buffer
self.frames[self.frame_insert_idx] = frame
# increment the input index of the ring buffer
self.frame_insert_idx = (self.frame_insert_idx+1) % self.buffer_size
except Exception as e:
print(e)
pass
self.sock.close()
def fetch_frame(self):
'''
Blocking loop until a frame is available
'''
while(self.frame_insert_idx == self.frame_output_idx and self.stop_threads == False ):
continue
frame = self.frames[self.frame_output_idx].copy()
# increment the output index of the ring buffer
self.frame_output_idx = (self.frame_output_idx+1) % self.buffer_size
return frame
def fetch_data(self):
'''
fetch received data
note: data is in json format and needs to be converted to json object first
'''
try:
if type(self.data) is not type(None) and self.data != "":
data = self.data[self.data.find("{"):]
data = json.loads(data)
self.data= None
return data
except json.JSONDecodeError as e:
print("fetch_data error:" +str(e))
self.data = None
return None
def send_data(self, data):
'''
converts data to json format and encapsulates with start and end tags before sendong
input:
data: dictionary, data to be sent
'''
try:
data = "START" + json.dumps(data) + "END"
self.sock.send(data.encode('utf-8'))
# self.sock.send("START".encode('utf-8'))
# self.sock.send(json.dumps(data).encode('utf-8'))
# self.sock.send("END".encode('utf-8'))
except ConnectionAbortedError as e:
print("send_data error:" + str(e))
def release(self):
self.stop_threads = True
# testing
if __name__ == "__main__":
src = "172.16.17.188"
port = 8888
streamer = Streamer(src, port)
key = ' '
while key != ord("q"):
frame = streamer.fetch_frame()
cv2.imshow("show", frame)
data = streamer.fetch_data()
if type(data) is not type(None):
# streamer.send_data(data)
print(data)
key = cv2.waitKey(1)
streamer.release()
|
MohamedEshmawy/DeepRoasters
|
streamer/streamer_v2.py
|
streamer_v2.py
|
py
| 7,791 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72577662587
|
from dataclasses import dataclass, field
from random import randint
maps = [
"de_anubis",
"de_inferno",
"de_ancient",
"de_mirage",
"de_nuke",
"de_overpass",
"de_vertigo",
]
@dataclass(frozen=True)
class Defaultsettings:
"""Sets basic match information. You can override the number of maps, first veto and knife round."""
matchid: int = field(
default=randint(10000000, 999999999), init=False
) # generates 8 digit match ID
num_maps: int = field(default=3) # number of maps to play
players_per_team: int = field(default=5, init=False) # number of players per team
coaches_per_team: int = field(default=1, init=False) # number of coaches per team
min_players_to_ready: int = field(
default=8, init=False
) # minimum number of players to enabley !forceready
min_spectators_to_ready: int = field(
default=0, init=False
) # minimum number of spectators to ready
skip_veto: bool = field(default=False) # skip map veto if True
veto_first: str = field(default="team1") # which team vetoes first (1=CT, 2=T)
side_type: str = field(
default="standard"
) # standard is valve BO3, always/never knife for knife rounds
spectators: dict = field(default_factory=dict)
@dataclass(frozen=True)
class Matchinfo:
"""arrays of teams, spectators, maps"""
maplist: list[str] = field(
default_factory=list
) # List of maps to be passed in the main script. Defaults to current Active Duty
team1: dict = field(default_factory=dict) # Initialize empty team 1 dict
team2: dict = field(default_factory=dict) # Initialize empty team 2 dict
cvars: dict = field(default_factory=dict) # Adds cvars - server name
@dataclass(frozen=True)
class Teaminfo:
name: str
tag: str
flag: str = field(default="SI")
logo: str = field(default="")
players: list[str] = field(default_factory=list)
if __name__ == "__main__":
print("You're running the wrong file. Aborting")
quit()
|
Rogris/get5matchgen
|
tools.py
|
tools.py
|
py
| 2,036 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73839717628
|
from asyncio import sleep, run
import os
import random
from dotenv import load_dotenv
import discord
from discord.ext import commands, tasks
import data
from table2ascii import table2ascii as t2a, PresetStyle
import asyncpg
from datetime import datetime, timedelta
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
intents = discord.Intents.all()
intents.members = True
bot = commands.Bot(command_prefix='!', intents=intents)
async def create_db_pool():
bot.db = await asyncpg.create_pool(dsn="postgres://postgres:database@localhost:5432/finance_bot")
print("connected to db")
@bot.event
async def on_ready():
print(f'{bot.user.name} has connected to Discord!')
@bot.command(name='curr_price', help='Get the current price of one or more stocks')
async def curr_price(ctx, *args):
ret_array = []
non_existent = []
for tag in args:
if data.ticker_exists(str(tag)):
ret_array.append([str(tag), f"${round(data.current_price(str(tag)), 2)}"])
else:
non_existent.append(str(tag))
output = t2a(
header=["Ticker", "Price"],
body=[arr for arr in ret_array],
style=PresetStyle.thin_compact
)
await ctx.send(f"```\n{output}\n```")
if len(non_existent) > 0:
await ctx.send(f"{', '.join(non_existent)} symbol/s do not exist")
@bot.command(name='info', help='Get info of a particular stock according to the list of keys')
async def get_info(ctx, symbol: str, key: str):
if not data.ticker_exists(symbol):
await ctx.send(f"Ticker symbol {symbol} does not exist or may be delisted.")
else:
try:
await ctx.send(data.get_info(symbol, key))
except KeyError:
await ctx.send(f"{key} is not a valid information identifier")
@get_info.error
async def info_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Incorrect arguments entered. Please enter: !get_info \{ticker symbol\} \{information requested\}")
@bot.command(name='balance_sheet', help='Returns the most recent balance sheet of a single company specified by the ticker symbol entered')
async def balance_sheet(ctx, symbol: str):
print("calling")
if not data.ticker_exists(symbol):
await ctx.send(f"Ticker symbol {symbol} does not exist or may be delisted.")
return
print("calling2")
bsheet = data.get_balance_sheet(symbol)
print("calling3")
for i in range(0, 4):
print("calling4")
sheet1 = bsheet[int((i / 4) * len(bsheet)):int(len(bsheet) * ((i + 1) / 4))]
output = t2a(
body=[arr for arr in sheet1],
style=PresetStyle.thin_compact
)
await ctx.send(f"```\n{output}\n```")
@balance_sheet.error
async def bsheet_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Incorrect arguments entered. Please enter: !balance_sheet \{ticker symbol\}")
@bot.command(name='earnings', help='Returns a graph of a companies revenue and earnings over the past 4 years')
async def earnings(ctx, symbol: str):
if not data.ticker_exists(symbol):
await ctx.send(f"Ticker symbol {symbol} does not exist or may be delisted.")
return
url = data.get_earnings(symbol, False)
embed = discord.Embed(title=f"{symbol} Earnings")
embed.set_image(url=url)
await ctx.send(embed=embed)
@earnings.error
async def earnings_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Incorrect arguments entered. Please enter: !earnings \{ticker symbol\}")
@bot.command(name='quarterly_earnings', help='Returns a graph of a companies revenue and earnings over the past 4 quarters')
async def quarterly_earnings(ctx, symbol: str):
if not data.ticker_exists(symbol):
await ctx.send(f"Ticker symbol {symbol} does not exist or may be delisted.")
return
url = data.get_earnings(symbol, True)
embed = discord.Embed(title=f"{symbol} Earnings")
embed.set_image(url=url)
await ctx.send(embed=embed)
@quarterly_earnings.error
async def qearnings_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Incorrect arguments entered. Please enter: !quarterly_earnings \{ticker symbol\}")
@bot.command(name='add_news', help='Adds a ticker to get daily news for')
async def add_news(ctx, symbol: str):
if not data.ticker_exists(symbol):
await ctx.send(f"Ticker symbol {symbol} does not exist or may be delisted.")
return
check_ticker = await bot.db.fetch('SELECT ticker FROM news_tickers WHERE ticker = $1', symbol)
if len(check_ticker) > 0:
await ctx.send(f"Ticker symbol {symbol} has already been added")
else:
await bot.db.execute('INSERT INTO news_tickers(ticker) VALUES ($1)', symbol)
@tasks.loop(hours=24)
async def daily_news(ctx):
tickers = await bot.db.fetch('SELECT ticker FROM news_tickers')
ticker_array = [ticker[0] for ticker in tickers]
news = data.get_news(ticker_array)
set_of = set(ticker_array)
for article in news.values():
related_tickers = [company for company in article['relatedTickers'] if company in set_of]
ticker_string = ", ".join(related_tickers)
publisher = article['publisher']
thumbnail = None
try:
thumbnail = article['thumbnail']['resolution'][0]['url']
except KeyError:
pass
embed=discord.Embed(title=article['title'], url=article['link'], color=0x00ffff)
if thumbnail:
embed.set_thumbnail(url=thumbnail)
embed.add_field(name="Publisher", value=publisher, inline=False)
embed.add_field(name="Related Tickers", value=ticker_string, inline=True)
await ctx.send(embed=embed)
@daily_news.before_loop
async def before_daily_news():
now = datetime.now()
current_hour = now.strftime("%H")
if int(current_hour) > 8:
nine_am = (now + timedelta(days=1)).replace(hour=9, minute=0, microsecond=0, second=0)
else:
nine_am = datetime(year=int(now.strftime("%Y")), month=int(now.strftime("%m")), day=int(now.strftime("%d")), hour=9)
diff = (nine_am - now).seconds
await sleep(diff)
@bot.command(name="remove_news", help="Remove a ticker from the news watchlist")
async def remove_news(ctx, symbol: str):
tickers = await bot.db.fetch('SELECT ticker FROM news_tickers')
ticker_array = [ticker[0] for ticker in tickers]
if symbol not in ticker_array:
await ctx.send(f"Ticker {symbol} is not in the watchlist.")
else:
await bot.db.execute('''DELETE FROM news_tickers where ticker = $1''', symbol)
async def main():
await create_db_pool()
await bot.start(TOKEN)
run(main())
|
NexhmedinQ/Discord-Finance-Bot
|
bot.py
|
bot.py
|
py
| 6,920 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30367946761
|
import numpy as np
from chaco.api import ArrayPlotData, Plot
from enable.api import ComponentEditor
from traits.api import Array, HasStrictTraits, Instance, Range, on_trait_change
from traitsui.api import Item, VGroup, View
class PowerFunctionExample(HasStrictTraits):
""" Display a plot of a power function. """
#: The plot holding the visualization
plot = Instance(Plot)
#: The power of the monomial to use.
power = Range(0, 5, value=2)
#: The x-values to plot.
x = Array(shape=(None,), dtype="float")
# Trait defaults --------------------------------------------------------
def _plot_default(self):
y = self.x ** self.power
plot_data = ArrayPlotData(x=self.x, y=y)
plot = Plot(plot_data)
plot.plot(("x", "y"), "line", name="power function", color="auto")
# configure the plot
plot.padding_top = 25
plot.border_visible = False
plot.index_grid.visible = False
plot.value_grid.visible = False
plot.title = "Power Function n={}".format(self.power)
plot.title_position = "right"
plot.title_angle = -90
plot.legend_alignment = "ul"
plot.legend.border_visible = False
plot.legend.bgcolor = (0.9, 0.9, 0.9, 0.5)
plot.legend.visible = True
plot.index_axis.title = "y"
plot.value_axis.title = "x"
return plot
def _x_default(self):
return np.linspace(-2.0, 2.0, 101)
# Trait change handlers -------------------------------------------------
@on_trait_change("power")
def _update_y(self):
y = self.x ** self.power
self.plot.data.set_data("y", y)
@on_trait_change("x")
def _update_data(self):
y = self.x ** self.power
self.plot.data.update_data(x=self.x, y=y)
@on_trait_change("power")
def _update_title(self):
self.plot.title = "Power Function n={}".format(self.power)
# TraitsUI view ---------------------------------------------------------
view = View(
VGroup(
Item("plot", editor=ComponentEditor()),
VGroup(
Item("power"),
),
show_labels=False,
),
resizable=True,
title="Power Function Example",
)
if __name__ == "__main__":
view = PowerFunctionExample()
view.configure_traits()
|
enthought/chaco
|
examples/user_guide/power_function_example.py
|
power_function_example.py
|
py
| 2,379 |
python
|
en
|
code
| 286 |
github-code
|
6
|
5708829851
|
import rename_tool
import torch
import torchaudio
from TTS.tts.configs.xtts_config import XttsConfig
from TTS.tts.models.xtts import Xtts
import os
current_dir = os.getcwd()
config_path = os.path.join(current_dir, "source", "model_v2", "config.json")
checkpoint_dir = os.path.join(current_dir, "source", "model_V2")
config = XttsConfig()
config.load_json(config_path)
model = Xtts.init_from_config(config)
model.load_checkpoint(config, checkpoint_dir=checkpoint_dir, eval=True)
model.cuda()
def generate(clone_audio_path, text, language, temperature, length_penalty, repetition_penalty, top_k, top_p, num_gpt_outputs, gpt_cond_len, gpt_cond_chunk_len, max_ref_len, sound_norm_refs, gpt_batch_size, num_chars):
config.temperature = temperature
config.length_penalty = float(length_penalty)
config.repetition_penalty = float(repetition_penalty)
config.top_k = top_k
config.top_p = top_p
config.num_gpt_outputs = num_gpt_outputs
config.gpt_cond_len = gpt_cond_len
config.gpt_cond_chunk_len = gpt_cond_chunk_len
config.max_ref_len = max_ref_len
repair = False
if len(sound_norm_refs) > 0:
repair = True
config.sound_norm_refs = repair
config.model_args.gpt_batch_size = gpt_batch_size
config.model_args.num_chars = num_chars
print(config)
outputs = model.synthesize(
text,
config,
speaker_wav=clone_audio_path,
language=language,
)
output_audio = ""
output_audio = rename_tool.path("audio", "wav")
torchaudio.save(output_audio, torch.tensor(outputs["wav"]).unsqueeze(0), 24000)
return output_audio
|
douhaohaode/xtts_v2
|
tts_v2.py
|
tts_v2.py
|
py
| 1,627 |
python
|
en
|
code
| 16 |
github-code
|
6
|
7212182080
|
import torch
from torch import nn
from tqdm.auto import tqdm
from torchvision import transforms
from torchvision.utils import make_grid
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
#
import glob
import random
import os
from torch.utils.data import Dataset
from PIL import Image
#filesize
import os
torch.manual_seed(0)
def show_tensor_images(image_tensor, num_images=25, size=(1, 28, 28), img_name=None):
'''
Function for visualizing images: Given a tensor of images, number of images, and
size per image, plots and prints the images in an uniform grid.
'''
image_tensor = (image_tensor + 1) / 2
image_shifted = image_tensor
image_unflat = image_shifted.detach().cpu().view(-1, *size)
image_grid = make_grid(image_unflat[:num_images], nrow=5)
img_to_save = image_grid.permute(1, 2, 0).squeeze().cpu().numpy()
if img_name!= None:
plt.imsave(img_name, img_to_save)
#.imshow(image_grid.permute(1, 2, 0).squeeze())
#plt.show()
# Inspired by https://github.com/aitorzip/PyTorch-CycleGAN/blob/master/datasets.py
class ImageDataset(Dataset):
def __init__(self, root, transform=None, mode='train'):
self.transform = transform
self.files_A = sorted(glob.glob(os.path.join(root, '%sA' % mode) + '/*.*'))
self.files_B = sorted(glob.glob(os.path.join(root, '%sB' % mode) + '/*.*'))
if len(self.files_A) > len(self.files_B):
self.files_A, self.files_B = self.files_B, self.files_A
self.new_perm()
assert len(self.files_A) > 0, "Make sure you downloaded the horse2zebra images!"
def new_perm(self):
self.randperm = torch.randperm(len(self.files_B))[:len(self.files_A)]
def __getitem__(self, index):
item_A = self.transform(Image.open(self.files_A[index % len(self.files_A)]))
item_B = self.transform(Image.open(self.files_B[self.randperm[index]]))
if item_A.shape[0] != 3:
item_A = item_A.repeat(3, 1, 1)
if item_B.shape[0] != 3:
item_B = item_B.repeat(3, 1, 1)
if index == len(self) - 1:
self.new_perm()
# Old versions of PyTorch didn't support normalization for different-channeled images
return (item_A - 0.5) * 2, (item_B - 0.5) * 2
def __len__(self):
return min(len(self.files_A), len(self.files_B))
class ResidualBlock(nn.Module):
'''
ResidualBlock Class:
Performs two convolutions and an instance normalization, the input is added
to this output to form the residual block output.
Values:
input_channels: the number of channels to expect from a given input
'''
def __init__(self, input_channels):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(input_channels, input_channels, kernel_size=3, padding=1, padding_mode='reflect')
self.conv2 = nn.Conv2d(input_channels, input_channels, kernel_size=3, padding=1, padding_mode='reflect')
self.instancenorm = nn.InstanceNorm2d(input_channels)
self.activation = nn.ReLU()
def forward(self, x):
'''
Function for completing a forward pass of ResidualBlock:
Given an image tensor, completes a residual block and returns the transformed tensor.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
original_x = x.clone()
x = self.conv1(x)
x = self.instancenorm(x)
x = self.activation(x)
x = self.conv2(x)
x = self.instancenorm(x)
return original_x + x
class ContractingBlock(nn.Module):
'''
ContractingBlock Class
Performs a convolution followed by a max pool operation and an optional instance norm.
Values:
input_channels: the number of channels to expect from a given input
'''
def __init__(self, input_channels, use_bn=True, kernel_size=3, activation='relu'):
super(ContractingBlock, self).__init__()
self.conv1 = nn.Conv2d(input_channels, input_channels * 2, kernel_size=kernel_size, padding=1, stride=2, padding_mode='reflect')
self.activation = nn.ReLU() if activation == 'relu' else nn.LeakyReLU(0.2)
if use_bn:
self.instancenorm = nn.InstanceNorm2d(input_channels * 2)
self.use_bn = use_bn
def forward(self, x):
'''
Function for completing a forward pass of ContractingBlock:
Given an image tensor, completes a contracting block and returns the transformed tensor.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x = self.conv1(x)
if self.use_bn:
x = self.instancenorm(x)
x = self.activation(x)
return x
class ExpandingBlock(nn.Module):
'''
ExpandingBlock Class:
Performs a convolutional transpose operation in order to upsample,
with an optional instance norm
Values:
input_channels: the number of channels to expect from a given input
'''
def __init__(self, input_channels, use_bn=True):
super(ExpandingBlock, self).__init__()
self.conv1 = nn.ConvTranspose2d(input_channels, input_channels // 2, kernel_size=3, stride=2, padding=1, output_padding=1)
if use_bn:
self.instancenorm = nn.InstanceNorm2d(input_channels // 2)
self.use_bn = use_bn
self.activation = nn.ReLU()
def forward(self, x):
'''
Function for completing a forward pass of ExpandingBlock:
Given an image tensor, completes an expanding block and returns the transformed tensor.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
skip_con_x: the image tensor from the contracting path (from the opposing block of x)
for the skip connection
'''
x = self.conv1(x)
if self.use_bn:
x = self.instancenorm(x)
x = self.activation(x)
return x
class FeatureMapBlock(nn.Module):
'''
FeatureMapBlock Class
The final layer of a Generator -
maps each the output to the desired number of output channels
Values:
input_channels: the number of channels to expect from a given input
output_channels: the number of channels to expect for a given output
'''
def __init__(self, input_channels, output_channels):
super(FeatureMapBlock, self).__init__()
self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=7, padding=3, padding_mode='reflect')
def forward(self, x):
'''
Function for completing a forward pass of FeatureMapBlock:
Given an image tensor, returns it mapped to the desired number of channels.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x = self.conv(x)
return x
class Generator(nn.Module):
'''
Generator Class
A series of 2 contracting blocks, 9 residual blocks, and 2 expanding blocks to
transform an input image into an image from the other class, with an upfeature
layer at the start and a downfeature layer at the end.
Values:
input_channels: the number of channels to expect from a given input
output_channels: the number of channels to expect for a given output
'''
def __init__(self, input_channels, output_channels, hidden_channels=64):
super(Generator, self).__init__()
self.upfeature = FeatureMapBlock(input_channels, hidden_channels)
self.contract1 = ContractingBlock(hidden_channels)
self.contract2 = ContractingBlock(hidden_channels * 2)
res_mult = 4
self.res0 = ResidualBlock(hidden_channels * res_mult)
self.res1 = ResidualBlock(hidden_channels * res_mult)
self.res2 = ResidualBlock(hidden_channels * res_mult)
self.res3 = ResidualBlock(hidden_channels * res_mult)
self.res4 = ResidualBlock(hidden_channels * res_mult)
self.res5 = ResidualBlock(hidden_channels * res_mult)
self.res6 = ResidualBlock(hidden_channels * res_mult)
self.res7 = ResidualBlock(hidden_channels * res_mult)
self.res8 = ResidualBlock(hidden_channels * res_mult)
self.expand2 = ExpandingBlock(hidden_channels * 4)
self.expand3 = ExpandingBlock(hidden_channels * 2)
self.downfeature = FeatureMapBlock(hidden_channels, output_channels)
self.tanh = torch.nn.Tanh()
def forward(self, x):
'''
Function for completing a forward pass of Generator:
Given an image tensor, passes it through the U-Net with residual blocks
and returns the output.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x0 = self.upfeature(x)
x1 = self.contract1(x0)
x2 = self.contract2(x1)
x3 = self.res0(x2)
x4 = self.res1(x3)
x5 = self.res2(x4)
x6 = self.res3(x5)
x7 = self.res4(x6)
x8 = self.res5(x7)
x9 = self.res6(x8)
x10 = self.res7(x9)
x11 = self.res8(x10)
x12 = self.expand2(x11)
x13 = self.expand3(x12)
xn = self.downfeature(x13)
return self.tanh(xn)
class Discriminator(nn.Module):
'''
Discriminator Class
Structured like the contracting path of the U-Net, the discriminator will
output a matrix of values classifying corresponding portions of the image as real or fake.
Parameters:
input_channels: the number of image input channels
hidden_channels: the initial number of discriminator convolutional filters
'''
def __init__(self, input_channels, hidden_channels=64):
super(Discriminator, self).__init__()
self.upfeature = FeatureMapBlock(input_channels, hidden_channels)
self.contract1 = ContractingBlock(hidden_channels, use_bn=False, kernel_size=4, activation='lrelu')
self.contract2 = ContractingBlock(hidden_channels * 2, kernel_size=4, activation='lrelu')
self.contract3 = ContractingBlock(hidden_channels * 4, kernel_size=4, activation='lrelu')
self.final = nn.Conv2d(hidden_channels * 8, 1, kernel_size=1)
def forward(self, x):
x0 = self.upfeature(x)
x1 = self.contract1(x0)
x2 = self.contract2(x1)
x3 = self.contract3(x2)
xn = self.final(x3)
return xn
import torch.nn.functional as F
adv_criterion = nn.MSELoss()
recon_criterion = nn.L1Loss()
n_epochs = 200
dim_A = 3
dim_B = 3
display_step = 1000#200
batch_size = 1
lr = 0.0002
load_shape = 286
target_shape = 256
device = 'cuda'
transform = transforms.Compose([
transforms.Resize(load_shape),
transforms.RandomCrop(target_shape),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
import torchvision
dataset = ImageDataset("horse2zebra", transform=transform)
gen_AB = Generator(dim_A, dim_B).to(device)
gen_BA = Generator(dim_B, dim_A).to(device)
gen_opt = torch.optim.Adam(list(gen_AB.parameters()) + list(gen_BA.parameters()), lr=lr, betas=(0.5, 0.999))
disc_A = Discriminator(dim_A).to(device)
disc_A_opt = torch.optim.Adam(disc_A.parameters(), lr=lr, betas=(0.5, 0.999))
disc_B = Discriminator(dim_B).to(device)
disc_B_opt = torch.optim.Adam(disc_B.parameters(), lr=lr, betas=(0.5, 0.999))
def weights_init(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
if isinstance(m, nn.BatchNorm2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
torch.nn.init.constant_(m.bias, 0)
# Feel free to change pretrained to False if you're training the model from scratch
pretrained = True#True
if pretrained:
pre_dict = torch.load('cycleGAN_ckpt.pth')#cycleGAN_100000
gen_AB.load_state_dict(pre_dict['gen_AB'])
gen_BA.load_state_dict(pre_dict['gen_BA'])
gen_opt.load_state_dict(pre_dict['gen_opt'])
disc_A.load_state_dict(pre_dict['disc_A'])
disc_A_opt.load_state_dict(pre_dict['disc_A_opt'])
disc_B.load_state_dict(pre_dict['disc_B'])
disc_B_opt.load_state_dict(pre_dict['disc_B_opt'])
else:
gen_AB = gen_AB.apply(weights_init)
gen_BA = gen_BA.apply(weights_init)
disc_A = disc_A.apply(weights_init)
disc_B = disc_B.apply(weights_init)
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_disc_loss
def get_disc_loss(real_X, fake_X, disc_X, adv_criterion):
'''
Return the loss of the discriminator given inputs.
Parameters:
real_X: the real images from pile X
fake_X: the generated images of class X
disc_X: the discriminator for class X; takes images and returns real/fake class X
prediction matrices
adv_criterion: the adversarial loss function; takes the discriminator
predictions and the target labels and returns a adversarial
loss (which you aim to minimize)
'''
#### START CODE HERE ####
disc_fake_X_hat = disc_X(fake_X.detach()) # Detach generator
disc_fake_X_loss = adv_criterion(disc_fake_X_hat, torch.zeros_like(disc_fake_X_hat))
disc_real_X_hat = disc_X(real_X)
disc_real_X_loss = adv_criterion(disc_real_X_hat, torch.ones_like(disc_real_X_hat))
disc_loss = (disc_fake_X_loss + disc_real_X_loss) / 2
#### END CODE HERE ####
return disc_loss
# UNIT TEST
test_disc_X = lambda x: x * 97
test_real_X = torch.tensor(83.)
test_fake_X = torch.tensor(89.)
test_adv_criterion = lambda x, y: x * 79 + y * 73
assert torch.abs((get_disc_loss(test_real_X, test_fake_X, test_disc_X, test_adv_criterion)) - 659054.5000) < 1e-6
test_disc_X = lambda x: x.mean(0, keepdim=True)
test_adv_criterion = torch.nn.BCEWithLogitsLoss()
test_input = torch.ones(20, 10)
# If this runs, it's a pass - checks that the shapes are treated correctly
get_disc_loss(test_input, test_input, test_disc_X, test_adv_criterion)
print("Success!")
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_gen_adversarial_loss
def get_gen_adversarial_loss(real_X, disc_Y, gen_XY, adv_criterion):
'''
Return the adversarial loss of the generator given inputs
(and the generated images for testing purposes).
Parameters:
real_X: the real images from pile X
disc_Y: the discriminator for class Y; takes images and returns real/fake class Y
prediction matrices
gen_XY: the generator for class X to Y; takes images and returns the images
transformed to class Y
adv_criterion: the adversarial loss function; takes the discriminator
predictions and the target labels and returns a adversarial
loss (which you aim to minimize)
'''
#### START CODE HERE ####
fake_Y = gen_XY(real_X)
disc_fake_Y_hat = disc_Y(fake_Y)
adversarial_loss = adv_criterion(disc_fake_Y_hat, torch.ones_like(disc_fake_Y_hat))
#### END CODE HERE ####
return adversarial_loss, fake_Y
# UNIT TEST
test_disc_Y = lambda x: x * 97
test_real_X = torch.tensor(83.)
test_gen_XY = lambda x: x * 89
test_adv_criterion = lambda x, y: x * 79 + y * 73
test_res = get_gen_adversarial_loss(test_real_X, test_disc_Y, test_gen_XY, test_adv_criterion)
assert torch.abs(test_res[0] - 56606652) < 1e-6
assert torch.abs(test_res[1] - 7387) < 1e-6
test_disc_Y = lambda x: x.mean(0, keepdim=True)
test_adv_criterion = torch.nn.BCEWithLogitsLoss()
test_input = torch.ones(20, 10)
# If this runs, it's a pass - checks that the shapes are treated correctly
get_gen_adversarial_loss(test_input, test_disc_Y, test_gen_XY, test_adv_criterion)
print("Success!")
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_identity_loss
def get_identity_loss(real_X, gen_YX, identity_criterion):
'''
Return the identity loss of the generator given inputs
(and the generated images for testing purposes).
Parameters:
real_X: the real images from pile X
gen_YX: the generator for class Y to X; takes images and returns the images
transformed to class X
identity_criterion: the identity loss function; takes the real images from X and
those images put through a Y->X generator and returns the identity
loss (which you aim to minimize)
'''
#### START CODE HERE ####
identity_X = gen_YX(real_X)
identity_loss = identity_criterion(identity_X, real_X)
#### END CODE HERE ####
return identity_loss, identity_X
# UNIT TEST
test_real_X = torch.tensor(83.)
test_gen_YX = lambda x: x * 89
test_identity_criterion = lambda x, y: (x + y) * 73
test_res = get_identity_loss(test_real_X, test_gen_YX, test_identity_criterion)
assert torch.abs(test_res[0] - 545310) < 1e-6
assert torch.abs(test_res[1] - 7387) < 1e-6
print("Success!")
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_cycle_consistency_loss
def get_cycle_consistency_loss(real_X, fake_Y, gen_YX, cycle_criterion):
'''
Return the cycle consistency loss of the generator given inputs
(and the generated images for testing purposes).
Parameters:
real_X: the real images from pile X
fake_Y: the generated images of class Y
gen_YX: the generator for class Y to X; takes images and returns the images
transformed to class X
cycle_criterion: the cycle consistency loss function; takes the real images from X and
those images put through a X->Y generator and then Y->X generator
and returns the cycle consistency loss (which you aim to minimize)
'''
#### START CODE HERE ####
cycle_X = gen_YX(fake_Y)
cycle_loss = cycle_criterion(cycle_X, real_X)
#### END CODE HERE ####
return cycle_loss, cycle_X
# UNIT TEST
test_real_X = torch.tensor(83.)
test_fake_Y = torch.tensor(97.)
test_gen_YX = lambda x: x * 89
test_cycle_criterion = lambda x, y: (x + y) * 73
test_res = get_cycle_consistency_loss(test_real_X, test_fake_Y, test_gen_YX, test_cycle_criterion)
assert torch.abs(test_res[1] - 8633) < 1e-6
assert torch.abs(test_res[0] - 636268) < 1e-6
print("Success!")
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_gen_loss
def get_gen_loss(real_A, real_B, gen_AB, gen_BA, disc_A, disc_B, adv_criterion, identity_criterion, cycle_criterion, lambda_identity=0.1, lambda_cycle=10):
'''
Return the loss of the generator given inputs.
Parameters:
real_A: the real images from pile A
real_B: the real images from pile B
gen_AB: the generator for class A to B; takes images and returns the images
transformed to class B
gen_BA: the generator for class B to A; takes images and returns the images
transformed to class A
disc_A: the discriminator for class A; takes images and returns real/fake class A
prediction matrices
disc_B: the discriminator for class B; takes images and returns real/fake class B
prediction matrices
adv_criterion: the adversarial loss function; takes the discriminator
predictions and the true labels and returns a adversarial
loss (which you aim to minimize)
identity_criterion: the reconstruction loss function used for identity loss
and cycle consistency loss; takes two sets of images and returns
their pixel differences (which you aim to minimize)
cycle_criterion: the cycle consistency loss function; takes the real images from X and
those images put through a X->Y generator and then Y->X generator
and returns the cycle consistency loss (which you aim to minimize).
Note that in practice, cycle_criterion == identity_criterion == L1 loss
lambda_identity: the weight of the identity loss
lambda_cycle: the weight of the cycle-consistency loss
'''
# Hint 1: Make sure you include both directions - you can think of the generators as collaborating
# Hint 2: Don't forget to use the lambdas for the identity loss and cycle loss!
#### START CODE HERE ####
# Adversarial Loss -- get_gen_adversarial_loss(real_X, disc_Y, gen_XY, adv_criterion)
adv_loss_BA, fake_A = get_gen_adversarial_loss(real_B, disc_A, gen_BA, adv_criterion)
adv_loss_AB, fake_B = get_gen_adversarial_loss(real_A, disc_B, gen_AB, adv_criterion)
gen_adversarial_loss = adv_loss_BA + adv_loss_AB
# Identity Loss -- get_identity_loss(real_X, gen_YX, identity_criterion)
identity_loss_A, identity_A = get_identity_loss(real_A, gen_BA, identity_criterion)
identity_loss_B, identity_B = get_identity_loss(real_B, gen_AB, identity_criterion)
gen_identity_loss = identity_loss_A + identity_loss_B
# Cycle-consistency Loss -- get_cycle_consistency_loss(real_X, fake_Y, gen_YX, cycle_criterion)
cycle_loss_BA, cycle_A = get_cycle_consistency_loss(real_A, fake_B, gen_BA, cycle_criterion)
cycle_loss_AB, cycle_B = get_cycle_consistency_loss(real_B, fake_A, gen_AB, cycle_criterion)
gen_cycle_loss = cycle_loss_BA + cycle_loss_AB
# Total loss
gen_loss = lambda_identity * gen_identity_loss + lambda_cycle * gen_cycle_loss + gen_adversarial_loss
#### END CODE HERE ####
return gen_loss, fake_A, fake_B
# UNIT TEST
test_real_A = torch.tensor(97)
test_real_B = torch.tensor(89)
test_gen_AB = lambda x: x * 83
test_gen_BA = lambda x: x * 79
test_disc_A = lambda x: x * 47
test_disc_B = lambda x: x * 43
test_adv_criterion = lambda x, y: x * 73 + y * 71
test_recon_criterion = lambda x, y: (x + y) * 61
test_lambda_identity = 59
test_lambda_cycle = 53
test_res = get_gen_loss(
test_real_A,
test_real_B,
test_gen_AB,
test_gen_BA,
test_disc_A,
test_disc_B,
test_adv_criterion,
test_recon_criterion,
test_recon_criterion,
test_lambda_identity,
test_lambda_cycle)
assert test_res[0].item() == 4047804560
assert test_res[1].item() == 7031
assert test_res[2].item() == 8051
print("Success!")
from skimage import color
import numpy as np
plt.rcParams["figure.figsize"] = (10, 10)
def train(save_model=True):
mean_generator_loss = 0
mean_discriminator_loss = 0
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
cur_step = 0
for epoch in range(108,n_epochs):
# Dataloader returns the batches
# for image, _ in tqdm(dataloader):
for real_A, real_B in tqdm(dataloader):
# image_width = image.shape[3]
real_A = nn.functional.interpolate(real_A, size=target_shape)
real_B = nn.functional.interpolate(real_B, size=target_shape)
cur_batch_size = len(real_A)
real_A = real_A.to(device)
real_B = real_B.to(device)
### Update discriminator A ###
disc_A_opt.zero_grad() # Zero out the gradient before backpropagation
with torch.no_grad():
fake_A = gen_BA(real_B)
disc_A_loss = get_disc_loss(real_A, fake_A, disc_A, adv_criterion)
disc_A_loss.backward(retain_graph=True) # Update gradients
disc_A_opt.step() # Update optimizer
### Update discriminator B ###
disc_B_opt.zero_grad() # Zero out the gradient before backpropagation
with torch.no_grad():
fake_B = gen_AB(real_A)
disc_B_loss = get_disc_loss(real_B, fake_B, disc_B, adv_criterion)
disc_B_loss.backward(retain_graph=True) # Update gradients
disc_B_opt.step() # Update optimizer
### Update generator ###
gen_opt.zero_grad()
gen_loss, fake_A, fake_B = get_gen_loss(
real_A, real_B, gen_AB, gen_BA, disc_A, disc_B, adv_criterion, recon_criterion, recon_criterion
)
gen_loss.backward() # Update gradients
gen_opt.step() # Update optimizer
# Keep track of the average discriminator loss
mean_discriminator_loss += disc_A_loss.item() / display_step
# Keep track of the average generator loss
mean_generator_loss += gen_loss.item() / display_step
### Visualization code ###
if cur_step % display_step == 0:
print(f"Epoch {epoch}: Step {cur_step}: Generator (U-Net) loss: {mean_generator_loss}, Discriminator loss: {mean_discriminator_loss}")
show_tensor_images(torch.cat([real_A, real_B]), size=(dim_A, target_shape, target_shape))
img_name = f'res_cycle/ep_{epoch}.png'
show_tensor_images(torch.cat([fake_B, fake_A]), size=(dim_B, target_shape, target_shape),img_name=img_name)
mean_generator_loss = 0
mean_discriminator_loss = 0
# You can change save_model to True if you'd like to save the model
space_taken = sum(os.path.getsize(f) for f in os.listdir('models') if os.path.isfile(f))/(1024*1024*1024)
if space_taken>20:#non più di 20 GB per questo script
exit('Folder limit exceeded')
if save_model:
torch.save({
'gen_AB': gen_AB.state_dict(),
'gen_BA': gen_BA.state_dict(),
'gen_opt': gen_opt.state_dict(),
'disc_A': disc_A.state_dict(),
'disc_A_opt': disc_A_opt.state_dict(),
'disc_B': disc_B.state_dict(),
'disc_B_opt': disc_B_opt.state_dict()
}, f"models/cycleGAN_{cur_step}.pth")
cur_step += 1
train()
|
Zefyrus94/GAN_test
|
cyclegan.py
|
cyclegan.py
|
py
| 25,719 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32483752873
|
import torch
import torch.nn as nn
from mmdet.models import ResNet, FPN, MobileNetV2
import torch.nn.functional as F
from common import default_conv, ResBlock, BasicBlock
class MCNN(nn.Module):
'''
Implementation of Multi-column CNN for crowd counting
'''
def __init__(self, load_weights=False):
super(MCNN,self).__init__()
self.branch1=nn.Sequential(
nn.Conv2d(3,16,9,padding=4),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(16,32,7,padding=3),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(32,16,7,padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(16,8,7,padding=3),
nn.ReLU(inplace=True)
)
self.branch2=nn.Sequential(
nn.Conv2d(3,20,7,padding=3),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(20,40,5,padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(40,20,5,padding=2),
nn.ReLU(inplace=True),
nn.Conv2d(20,10,5,padding=2),
nn.ReLU(inplace=True)
)
self.branch3=nn.Sequential(
nn.Conv2d(3,24,5,padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(24,48,3,padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(48,24,3,padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(24,12,3,padding=1),
nn.ReLU(inplace=True)
)
self.fuse=nn.Sequential(nn.Conv2d(30,1,1,padding=0))
self.relu=nn.ReLU(inplace=True)
if not load_weights:
self._initialize_weights()
def forward(self,img_tensor):
x1=self.branch1(img_tensor)
x2=self.branch2(img_tensor)
x3=self.branch3(img_tensor)
x=torch.cat((x1,x2,x3),1)
x=self.fuse(x)
x=self.relu(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
'''
Example:
>>> from mmdet.models import ResNet
>>> import torch
>>> self = ResNet(depth=18)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 8, 8)
(1, 128, 4, 4)
(1, 256, 2, 2)
(1, 512, 1, 1)
'''
'''
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = FPN(in_channels, 11, len(in_channels)).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
'''
class res50_fpn(nn.Module):
def __init__(self, load_weights=False):
super(res50_fpn,self).__init__()
self.resnet = ResNet(50)
self.in_channels = [256, 512, 1024, 2048]
self.scales = [333, 167, 84, 42]
self.fpn = FPN(self.in_channels, 256, len(self.scales))
self.fuse1 = nn.Conv2d(256*4,256,1,padding=0)
self.relu = nn.ReLU(inplace=True)
self.bn1 = nn.BatchNorm2d(num_features=256)
self.bn2 = nn.BatchNorm2d(num_features=1)
self.fuse2 = nn.Conv2d(256,1,1,padding=0)
def forward(self, input):
ret = self.resnet(input)
ret = list(self.fpn(ret))
#_scale = (333, 333)
for i in range(4):
ret[i] = F.interpolate(ret[i], size=(333,333), mode='bilinear')
ret = torch.cat(ret,dim=1)
ret = self.fuse1(ret)
ret = self.bn1(ret)
ret = self.relu(ret)
ret = self.fuse2(ret)
ret = self.bn2(ret)
ret = self.relu(ret)
return ret
class mobilenetv2_fpn(nn.Module):
def __init__(self, load_weights=False):
super(mobilenetv2_fpn,self).__init__()
self.mobilenet = MobileNetV2()
self.in_channels = [24, 32, 96, 1280]
self.scales = [333, 167, 84, 42]
self.fpn = FPN(self.in_channels, 256, len(self.scales))
self.fuse1 = nn.Conv2d(256*4,256,1,padding=0)
self.relu = nn.ReLU(inplace=True)
self.bn1 = nn.BatchNorm2d(num_features=256)
self.bn2 = nn.BatchNorm2d(num_features=1)
self.fuse2 = nn.Conv2d(256,1,1,padding=0)
def forward(self, input):
ret = self.mobilenet(input)
ret = list(self.fpn(ret))
#_scale = (333, 333)
for i in range(4):
ret[i] = F.interpolate(ret[i], size=(333,333), mode='bilinear')
ret = torch.cat(ret,dim=1)
ret = self.fuse1(ret)
ret = self.bn1(ret)
ret = self.relu(ret)
ret = self.fuse2(ret)
ret = self.bn2(ret)
ret = self.relu(ret)
return ret
# parser.add_argument('--act', type=str, default='relu',
# help='activation function')
# parser.add_argument('--pre_train', type=str, default='',
# help='pre-trained model directory')
# parser.add_argument('--extend', type=str, default='.',
# help='pre-trained model directory')
# parser.add_argument('--n_resblocks', type=int, default=16,
# help='number of residual blocks')
# parser.add_argument('--n_feats', type=int, default=64,
# help='number of feature maps')
# parser.add_argument('--res_scale', type=float, default=1,
# help='residual scaling')
# parser.add_argument('--shift_mean', default=True,
# help='subtract pixel mean from the input')
# parser.add_argument('--dilation', action='store_true',
# help='use dilated convolution')
# parser.add_argument('--precision', type=str, default='single',
# choices=('single', 'half'),
# help='FP precision for test (single | half)')
# https://github.com/sanghyun-son/EDSR-PyTorch/blob/master/src/model/edsr.py
class EDSR(nn.Module): # not converge
def __init__(self, conv=default_conv):
super(EDSR, self).__init__()
n_resblocks = 16
n_feats = 64
kernel_size = 3
act = nn.ReLU(True)
# define head module
m_head = [conv(3, n_feats, kernel_size)]
# define body module
m_body = [
ResBlock(
conv, n_feats, kernel_size, act=act, res_scale=1
) for _ in range(n_resblocks)
]
m_body.append(conv(n_feats, n_feats, kernel_size))
# define tail module
m_tail = [
conv(n_feats, 1, kernel_size)
]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
return x
# def load_state_dict(self, state_dict, strict=True):
# own_state = self.state_dict()
# for name, param in state_dict.items():
# if name in own_state:
# if isinstance(param, nn.Parameter):
# param = param.data
# try:
# own_state[name].copy_(param)
# except Exception:
# if name.find('tail') == -1:
# raise RuntimeError('While copying the parameter named {}, '
# 'whose dimensions in the model are {} and '
# 'whose dimensions in the checkpoint are {}.'
# .format(name, own_state[name].size(), param.size()))
# elif strict:
# if name.find('tail') == -1:
# raise KeyError('unexpected key "{}" in state_dict'
# .format(name))
class VDSR(nn.Module):
def __init__(self, conv=default_conv):
super(VDSR, self).__init__()
n_resblocks = 16
n_feats = 64
kernel_size = 3
def basic_block(in_channels, out_channels, act):
return BasicBlock(
conv, in_channels, out_channels, kernel_size,
bias=True, bn=True, act=act
)
# define body module
m_body = []
m_body.append(basic_block(3, n_feats, nn.ReLU(True)))
for _ in range(n_resblocks - 2):
m_body.append(basic_block(n_feats, n_feats, nn.ReLU(True)))
m_body.append(basic_block(n_feats, 1, nn.ReLU(True)))
self.body = nn.Sequential(*m_body)
def forward(self, x):
res = self.body(x)
return res
# test code
if __name__=="__main__":
img=torch.rand((1,3,1332,1332),dtype=torch.float)
mcnn=mobilenetv2_fpn()
for m in mcnn.modules():
print(m)
#out_dmap=mcnn(img)
#print(out_dmap.shape)
|
johnran103/mmdet
|
scale_map_net/s_net.py
|
s_net.py
|
py
| 9,667 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36459683211
|
def sockMerchant(n, ar):
# Write your code here
ar=sorted(ar)
set_elm =set(ar)
aa =list(set_elm)
c=[]
for i in range(len(aa)):
c.append(ar.count(aa[i]))
cres=[i//2 for i in c]
return sum(cres)
|
Nowshin021/HackerRank-Interview
|
Sales_by_Match.py
|
Sales_by_Match.py
|
py
| 253 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22451204128
|
"""
@author: Krystof Bogar
dvéře
"""
def condition(a, b):
"""
podminka sousedu
"""
return a[-1] == b[0]
def solve_key(words):
"""
resi ulohu s predzarovnym polem
"""
if len(words) == 2:
return condition(words[0], words[1])
elif len(words)>1:
for word in words[1:]:
if(condition(words[0], word)):
words.remove(word)
words.insert(1, word)
if solve_key(words[1:]) == True:
return True
return False
def is_key(words):
"""
resi podminku z pole slov
"""
begl = []
def add(word):
f = word[0]
l = word[-1]
add = True
for n in begl:
if n[0]==f:
n[1] += 1
add = False
if add:
begl.append([f, 1])
add = True
for n in begl:
if n[0]==l:
n[1] -= 1
add = False
if add:
begl.append([l, -1])
for word in words:
add(word)
sumb = 0
sume = 0
for n in begl:
if n[1] > 1 or n[1] < -1:
return False
if n[1] > 0:
sumb += n[1]
if n[1] < 0:
sume -= n[1]
if(sumb > 1 or sume > 1):
return False
for word in words:
words.remove(word)
words.insert(0, word)
if solve_key(words) == True:
return True
return False
def run(fi):
"""
nacte soubor doors.txt a vypise poporade spravnost klicu do konzole
"""
f = open(fi)
def readint():
"""
precte integer ze vstupu
"""
return int(f.readline())
def readword():
"""
precte slovo ze vstupu
"""
line = f.readline()
if line[-1] == '\n':
return line[0:-1]
return line
num_of_keys = readint()
while num_of_keys > 0:
num_of_words = readint()
word_list = []
while num_of_words > 0:
word_list.append(readword())
num_of_words -= 1
print(is_key(word_list))
num_of_keys -= 1
if __name__ == "__main__":
run("small.txt")
run("large.txt")
|
kbogi/pjp
|
cv06/doors.py
|
doors.py
|
py
| 2,242 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16119619384
|
N = int(input())
volume = list(map(int, input().split()))
volume.sort(reverse=True)
result = volume[0]
for i in range(1, N):
result += volume[i] / 2
print("%g" %result) # 의미없는 소수점 제거를 위해
|
sujeong11/Algorithm
|
그리디/20115.py
|
20115.py
|
py
| 217 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
20426895808
|
import matplotlib.pyplot as plt
import seaborn as sns
color_list = sns.color_palette('deep') + sns.color_palette('bright')
def DrawDoubleYLines(x, y1, y2, xlabel='', ylabel=['', ''], legend=['', ''], store_path=''):
'''
Draw the doulbe y-axis lines.
:param x: The vector of the x axis.
:param y1: The vector of the y1 axis.
:param y2: The vector of the y2 axis.
:param xlabel: The label of the x. Default is ''
:param ylabel: The list of the y label. Default is ['', '']
:param legend: The list of the legend. Default is ['', '']
:param store_path: The store path of the figure. support 'jpg' and 'eps' format.
:return:
'''
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(x, y1, color=color_list[0])
ax1.set_ylabel(ylabel[0])
ax1.set_xlabel(xlabel)
ax2 = ax1.twinx() # this is the important function
ax2.plot(x, y2, color=color_list[1])
ax2.set_ylabel(ylabel[1])
ax2.set_xlabel(xlabel)
ax1.legend([legend[0]], loc=(.02, .9))
ax2.legend([legend[1]], loc=(.02, .82))
if store_path:
plt.tight_layout()
if store_path[-3:] == 'jpg':
fig.savefig(store_path, dpi=300, format='jpeg')
elif store_path[-3:] == 'eps':
fig.savefig(store_path, dpi=1200, format='eps')
plt.show()
|
salan668/FAE
|
BC/Visualization/DrawDoubleLines.py
|
DrawDoubleLines.py
|
py
| 1,322 |
python
|
en
|
code
| 121 |
github-code
|
6
|
37228942399
|
#!/usr/bin/env python3
import argparse
import bids
from bids import BIDSLayout
import os
from pathlib import Path
def _filter_pybids_none_any(dct):
import bids
return {
k: bids.layout.Query.NONE
if v is None
else (bids.layout.Query.ANY if v == "*" else v)
for k, v in dct.items()
}
def _bids_filter(value):
from json import loads
from bids.layout import Query
if value and Path(value).exists():
try:
filters = loads(Path(value).read_text(), object_hook=_filter_pybids_none_any)
except Exception as e:
raise Exception("Unable to parse BIDS filter file. Check that it is "
"valid JSON.")
else:
raise Exception("Unable to load BIDS filter file " + value)
# unserialize pybids Query enum values
for acq, _filters in filters.items():
filters[acq] = {
k: getattr(Query, v[7:-4])
if not isinstance(v, Query) and "Query" in v
else v
for k, v in _filters.items()
}
return filters
def collect_data(bids_dir, participant_label, queries, filters=None, bids_validate=True):
"""
Uses pybids to retrieve the input data for a given participant
"""
if isinstance(bids_dir, BIDSLayout):
layout = bids_dir
else:
layout = BIDSLayout(str(bids_dir), validate=bids_validate)
bids_filters = filters or {}
for acq, entities in bids_filters.items():
queries[acq].update(entities)
subj_data = {
dtype: sorted(
layout.get(
return_type="file",
subject=participant_label,
extension=["nii", "nii.gz"],
**query
)
)
for dtype, query in queries.items()
}
return subj_data, layout
qsiprep_queries = {
'fmap': {'datatype': 'fmap'},
'sbref': {'datatype': 'func', 'suffix': 'sbref'},
'flair': {'datatype': 'anat', 'suffix': 'FLAIR'},
't2w': {'datatype': 'anat', 'suffix': 'T2w'},
't1w': {'datatype': 'anat', 'suffix': 'T1w'},
'roi': {'datatype': 'anat', 'suffix': 'roi'},
'dwi': {'datatype': 'dwi', 'suffix': 'dwi'}
}
fmriprep_queries = {
'fmap': {'datatype': 'fmap'},
'bold': {'datatype': 'func', 'suffix': 'bold'},
'sbref': {'datatype': 'func', 'suffix': 'sbref'},
'flair': {'datatype': 'anat', 'suffix': 'FLAIR'},
't2w': {'datatype': 'anat', 'suffix': 'T2w'},
't1w': {'datatype': 'anat', 'suffix': 'T1w'},
'roi': {'datatype': 'anat', 'suffix': 'roi'}
}
parser = argparse.ArgumentParser(description='BIDS validation and filter preview. The filters are processed using code extracted from qsiprep '
'v 0.14.2. I believe fmriprep works the same way, but I have not verified this. Also, it is possible that '
'different versions of pybids will behave differently. With those disclaimers in mind, running this can '
'highlight obvious problems with filters or allow you to experiment with advanced matching.')
parser.add_argument('--bids-dir', help='The directory with the input dataset formatted according to the BIDS standard.', required = True)
parser.add_argument('--filter-file', help='File containing BIDS filters', required = True)
parser.add_argument('--participant-label', help='The label of the participant that should be analyzed. The label '
'corresponds to sub-<participant> from the BIDS spec (so it does not include "sub-").', required = True)
parser.add_argument('--prep-modality', help='The kind of modality prep to test the filter on. Options are fmri, qsi.', required = True)
bids.config.set_option('extension_initial_dot', True)
args = parser.parse_args()
layout = BIDSLayout(args.bids_dir, validate = True)
filters = _bids_filter(args.filter_file)
queries = None
if (args.prep_modality == 'qsi'):
queries = qsiprep_queries
elif (args.prep_modality == 'fmri'):
queries = fmriprep_queries
else:
raise ValueError(f'Unsupported modality prep string {args.prep_modality}')
sub_data, layout = collect_data(layout, args.participant_label, queries, filters = filters)
print(f'\n\n Filtered data for participant {args.participant_label}:\n')
for k, v in sub_data.items():
print (k, '\t:\t', v)
|
ftdc-picsl/pmacsPreps
|
bin/bidsFilterTest.py
|
bidsFilterTest.py
|
py
| 4,368 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5361852132
|
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.lang import Builder
import wikipedia
from urllib import request
Builder.load_file(filename="search.kv")
class FirstScreen(Screen):
def get_img_link(self):
# get user search
query = self.manager.current_screen.ids.user_search.text
# search wikipedia for user query
page = wikipedia.page(query)
img_link = page.images[0]
img_path = f"images\wiki {query}.png"
return img_link, img_path
def download_wiki_img(self):
img_link, img_path = self.get_img_link()
img = request.urlretrieve(img_link, img_path)
return img[0]
def preview_img(self):
# change images dynamically
self.manager.current_screen.ids.img.source = self.download_wiki_img() #*
# self.ids.img.source = "images\git_init.png" #= same above
class RootWidget(ScreenManager):
pass
class SearchApp(App):
def build(self):
return RootWidget()
SearchApp().run()
'''
### Steps for creating app using kivy ###
## Python Script ##
1. First there's a MainApp class -or call it <anything>App; this class inherits from (App) class that is imported from kivy.app. So it's like the template on which we build our app.
- Inside this class we'll overwrite build(self) method to return the ScreenManager object that we have defined (see point 2).
2. Define a RootWidget class; this class inherits from (ScreenManager) imported from kivy.uix.screenmanager.
It's like a manager for any other Screen object we'll create later (a screen object for each new screen in the app).
3. Define a Screen object that inherts from (Screen); This is the screen object we're talking about, on which we'll put layouts and widgets. And also define the methods that these widgets will execute.
4. Run the app: MainApp().run()
5. To connect script to .kv file:
- By default, Kivy expects the .kv file to have the same base name as your Python file. For example, if your Python file is named myapp.py, the corresponding .kv file should be named myapp.kv.
- Alternatively, you can specify it manually this way:
from kivy.lang import Builder
Builder.load_file("filename.kv")
6. To connect a method defined in your Screen object to a widget on that screen; say you have a button on that screen. Simply set on_press: root.method() -like the example below-
- root here refers to the root widget of your widgets tree (which happens to be the screen object), that's why you should define that method in your Screen class declaration.
7. Get text from TextInput --> var = self.ids.<id>.text
NOTE It's a good practice to separate code in Screen class into several methods to ease its understanding and refactoring.
## .kv file for GUI ##
In this file we will implement the GUI; screens, layouts, widgets and their attributes.
file start>>>
<Screen_name>:
<Layout_type>:
widget_1:
attr_1: value
attr_2: value
Button_1:
on_press: root.method()
......
<RootWidget>:
Screen_name:
id: id
name: "name"
<<< file end
______________________________________________________
#*
Let's break down the code:
- `self.manager`: `self` refers to the current instance of the class, and `manager` is a property or attribute of that instance. In this case, it is assumed that the current class has a `manager` attribute that represents a `ScreenManager` instance.
- `self.manager.current_screen`: `current_screen` is an attribute of the `ScreenManager` class that represents the currently displayed screen. By accessing `current_screen`, you are retrieving the instance of the currently active screen.
- `ids`: `ids` is a dictionary-like property of a widget that contains all the child widgets defined in the corresponding `.kv` file with an `id` attribute. The `id` attribute is used to uniquely identify a widget.
- `img`: `img` is the `id` assigned to an `Image` widget in the corresponding `.kv` file.
- `source`: `source` is a property of the `Image` widget that represents the path or URL of the image file to be displayed.
So, putting it all together, `self.manager.current_screen.ids.img.source = "images\image.png"` sets the `source` property of the `Image` widget (identified by the `id` "img") within the current screen of the `ScreenManager` to "images\image.png". It updates the image source, allowing you to change the displayed image dynamically.
'''
|
mido-99/Advanded-OOP
|
App-4-Webcam-Photo-Sharer/main.py
|
main.py
|
py
| 4,504 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26676860635
|
from flask import Flask, render_template, request, redirect
from flask_wtf import FlaskForm
from wtforms import StringField, HiddenField, RadioField
from wtforms.validators import Length, ValidationError
import phonenumbers
import random
import json
import os
app = Flask(__name__)
app.secret_key = 'Secret!'
hours = {"1-2": "1-2 часа", "3-5": "3-5 часов", "5-7": "5-7 часов", "7-10": "7-10 часов"}
days = {"mon": "Понедельник", "tue": "Вторник", "wed": "Среда", "thu": "Четверг", "fri": "Пятница", "sat": "Суббота", "sun": "Воскресенье"}
week = {'sun': 'sunday', 'mon': 'monday', 'tue': 'tuesday', 'wed': 'wednesday', 'thu': 'thursday', 'fri': 'friday', 'sat': 'saturday'}
def get_data():
with open("data.txt", "r") as d:
data = json.load(d)
return data
def check_phone(form, field):
number = form.clientPhone.data
print(number)
try:
if not phonenumbers.is_valid_number(phonenumbers.parse(number, 'RU')):
raise phonenumbers.NumberParseException(None, None)
except phonenumbers.NumberParseException:
raise ValidationError('Пожалуйста укажите номер телефона полностью (+7ХХХХХХХХХХ)')
def convert_day(day):
for key, value in week.items():
if value == day:
return key
# эта функция для добавления цели
def add_goal(id_list, new_goal_eng, new_goal_ru, new_goal_pic):
data = get_data()
data['goals'].update({new_goal_eng: new_goal_ru})
data['emodji'].update({new_goal_eng: new_goal_pic})
for gid in id_list:
if new_goal_eng not in data['teachers'][gid]['goals']:
data['teachers'][gid]['goals'].append(new_goal_eng)
out = {'goals': data['goals'], 'teachers': data['teachers'], 'emodji': data['emodji']}
with open("data.txt", "w") as f:
json.dump(out, f)
def add_callback(name, phone, goal, time):
records = []
if os.path.isfile('request.json'):
with open('request.json', 'r') as r:
records = json.load(r)
records.append({'name': name, 'phone': phone, 'goal': goal, 'time': time})
with open('request.json', 'w') as w:
json.dump(records, w)
def add_record(name, phone, teacher_id, day, time):
records = []
if os.path.isfile('booking.json'):
with open('booking.json', 'r') as r:
records = json.load(r)
records.append({'name': name, 'phone': phone, 'teacher': teacher_id, 'weekday': day, 'time': time})
with open('booking.json', 'w') as w:
json.dump(records, w)
class RequestForm(FlaskForm):
data = get_data()
clientName = StringField('Вас зовут', [Length(min=2, message='Пожалуйста укажите ваше имя')])
clientPhone = StringField('Ваш телефон', [check_phone])
time = RadioField('Сколько времени есть?', choices=[(key, value) for key, value in hours.items()], default='1-2')
goals = RadioField('Какая цель занятий?', choices=[(key, value) for key, value in data['goals'].items()], default='travel')
class BookingForm(FlaskForm):
clientName = StringField('Вас зовут', [Length(min=2, message='Пожалуйста укажите ваше имя')])
clientPhone = StringField('Ваш телефон', [check_phone])
clientWeekday = HiddenField()
clientTime = HiddenField()
clientTeacher = HiddenField()
@app.route('/')
def main():
# add_goal((8, 9, 10, 11), 'programming', 'Для программирования', '🖥') #<- Так добавлял цель
data = get_data()
random_teachers_ids = []
while len(random_teachers_ids) < 6:
i = random.randint(0, len(data['teachers'])-1)
if i not in random_teachers_ids:
random_teachers_ids.append(i)
return render_template('index.html', teachers=data['teachers'], ids=random_teachers_ids, pic=data['emodji'], goals=data['goals'])
@app.route('/all/')
def all_teachers():
data = get_data()
return render_template('index.html', teachers=data['teachers'], ids=[i for i in range(len(data['teachers']))], pic=data['emodji'], goals=data['goals'])
@app.route('/goals/<goal>/')
def show_goals(goal):
data = get_data()
sorted_list = []
for teacher in data['teachers']:
if goal in teacher['goals']:
sorted_list.append(teacher)
return render_template("goal.html", teachers=sorted_list, goals=data['goals'], goal=goal, pic=data['emodji'])
@app.route('/profiles/<int:teacher_id>/')
def show_profile(teacher_id):
data = get_data()
return render_template("profile.html", teacher=data['teachers'][teacher_id], goals=data['goals'], days=days, week=week)
@app.route('/request/')
def make_request():
form = RequestForm()
return render_template("request.html", form=form)
@app.route('/request_done/', methods=['POST', 'GET'])
def request_done():
form = RequestForm()
data = get_data()
if request.method == 'POST':
if form.validate_on_submit():
name = form.clientName.data
phone = form.clientPhone.data
goal = form.goals.data
time = form.time.data
add_callback(name, phone, goal, time)
return render_template("request_done.html", name=name, phone=phone, goal=data['goals'].get(goal), time=hours.get(time))
else:
return render_template("request.html", form=form)
else:
return render_template("request.html", form=form)
@app.route('/booking/<int:teacher_id>/<day>/<time>/')
def booking(teacher_id, day, time):
data = get_data()
what_day = convert_day(day)
time = time + ":00"
form = BookingForm(clientTime=time, clientWeekday=what_day, clientTeacher=teacher_id)
return render_template("booking.html", teacher=data['teachers'][teacher_id], day=what_day, time=time, days=days, form=form)
@app.route('/booking_done/', methods=['POST', 'GET'])
def booking_save():
data = get_data()
form = BookingForm()
if request.method == 'POST':
name = form.clientName.data
phone = form.clientPhone.data
day = form.clientWeekday.data
time = form.clientTime.data
teacher_id = int(form.clientTeacher.data)
if form.validate_on_submit():
add_record(name, phone, teacher_id, day, time)
return render_template("booking_done.html", name=name, phone=phone, day=days.get(day), time=time, teacher_id=teacher_id)
else:
return render_template("booking.html", teacher=data['teachers'][teacher_id], day=day, time=time, days=days, form=form)
else:
return redirect('/')
if __name__ == '__main__':
app.run()
|
maksimKnz/flask-project2
|
app.py
|
app.py
|
py
| 6,791 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32102854189
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
if not head or not head.next:
return None
slow, fast = head, head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
slow = head
while slow != fast:
slow = slow.next
fast = fast.next
return slow
return None
head = ListNode(3)
head.next = tmp = ListNode(2)
head.next.next = ListNode(0)
head.next.next.next = ListNode(4)
head.next.next.next.next = tmp
res = Solution().detectCycle(head)
print(res.val)
|
Eleanoryuyuyu/LeetCode
|
字节跳动/链表与树/环形链表 II.py
|
环形链表 II.py
|
py
| 760 |
python
|
en
|
code
| 3 |
github-code
|
6
|
72602213309
|
class Solution:
def findOccurrences(self, text, first, second):
text_list = text.split()
res = []
for i in range(len(text_list)-2):
if text_list[i] == first and text_list[i+1] == second:
res.append(text_list[i+2])
return res
if __name__ == "__main__":
txt = "we will we will rock you"
first = "we"
second = "will"
r = Solution().findOccurrences(txt, first, second)
print(r)
|
CodingBuye/PythonForLeetcode
|
Easy/1078.Occurrences After Bigram.py
|
1078.Occurrences After Bigram.py
|
py
| 460 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15166780973
|
#!/usr/bin/python3
if __name__ == "__main__":
import sys
from calculator_1 import div, mul, add, sub
if len(sys.argv) != 4:
print("Usage: ./100-my_calculator.py <a> <operator> <b>")
exit(1)
op_list = {'+': "add", '-':"sub", '/':"div", '*':"mul"}
if sys.argv[2] not in list(op_list.keys()):
print("Unknown operator. Available operators: +, -, * and /")
exit(1)
a = int(sys.argv[1])
b = int(sys.argv[3])
print("{} {} {}".format(a, sys.argv[2], b, op_list[sys.argv[2]](a, b)))
|
AhmedNewiry/alx-higher_level_programming
|
0x02-python-import_modules/100-my_calculator.py
|
100-my_calculator.py
|
py
| 538 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19739382962
|
import json
import mechanize
import sys
import logging
import time
import urllib
from constants import *
from excepciones import *
from imagen import *
from datetime import date, timedelta
from termcolor import colored
logger = logging.getLogger(__name__)
class Browser(object):
def __init__(self, config, login=True):
WEB_USER_AGENT = 'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 4 Build/JOP40D) AppleWebKit/535.19 (KHTML, ' \
'like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19'
self.br = mechanize.Browser()
self.br.set_handle_robots(False)
self.br.addheaders = [('User-agent', WEB_USER_AGENT)]
# br.set_proxies({"http": "tcp://0.tcp.ngrok.io:13183", "https": "tcp://0.tcp.ngrok.io:13183"})
# TODO poner definir un proxy por parametros
self.products = None
self.favoritos = None
self.config = config
if login:
self._login()
def _add_headers(self, header):
self.br.addheaders = header + self.br.addheaders
def _convert_headers(self):
heads = {}
for h in self.br.addheaders:
heads[h[0]] = h[1]
return heads
def _obtiene_numero_de_imagen(self, imagen):
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAAA80lEQVR42u3XsQ2DMBCFYSZkAA/g3gMwgAdgAHoP4AEYgC0YgB5Hz5IprKSIopBg/pOugO7znXXnLt0sOsCAAQMGDBgwYMCAAQMGDBgwYMCAAX8/9n3PCfgfwSGENAxDzmma0rqu7ba0cy4ZYzJUcGtt/j4TfRo4xpj6vk/Lshz/tm3LYO99e2ChVOE6VG0dRHPgcm+f3WnAgAEDBvxrsMaPZq5mb30QWkCaA2ubUiXHcTzQpbpaSppcLed5zlUWsqTQzT8PtV4q6/bmPXwV8CfvVcC0NOC3ugwwLX3hagKmpQEDBswcBkxLAwYMGDBgwC/iAYRusMooTP73AAAAAElFTkSuQmCC":
return 0
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAAArklEQVR42u3XwQmFMAwAUCd0ALfoAG7UARyjA3h3AO/Np715/J8viH2B0JJTHySUTDFYTMDAwMDAwMDAwMDAwMDAwMDAwMDAUWvtORz4OI7IOffctu39Lb3ve6zrGsuy9HOYGW5YYGBgYGBgYOBb4zzPKKVESqlnu7faa8ENOM/zJVvNegj8cPCT9t+vwL8+fDiwGQYGBr7r1wDW0sDAwMDAwMDAwH8Ev3HxBx4lPqQ72MOvo8X0AAAAAElFTkSuQmCC":
return 1
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAABFElEQVR42u3bywmFMBCFYSu0AAtwbwEWYAEpwL0FWIYFuLcA987lBAJ6QXDhi/EfCOLyS84kRDCzl9ayLHGcXRlg5wUYMGDAgAEDBgwYMGDAgAEDBgwY8JvqyIe/W8HzPFvXdVbXdRxt29o0TX7BVVVZURQRKnhZlvH9TPRrIt33veV5buM4blY8TYA7cNM0cYX/K8XbHXgPplhrMtyBFeV1nFXqY8VccXd/LAmv/r0zzo+BtSsLq57WxuUaLKCg6t27sY+A01mc+nkYhtjLLsEhhA12vXG5AyeYnlrVNDQJLsHajQXbG9yWAAMG/Gnw3scAwEQaMGDAgAEDBgwYMGDAgI+Ar/qdBvBF91+3kf4c+Gj9ACFwszHPYVfiAAAAAElFTkSuQmCC":
return 2
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAABHklEQVR42u3azYmEQBBA4YnQAAzALAzAAAzAuwEYgAGYhQF4t5cnNAwyh2XZ6dmtfgUNc/Sz/lqYR6osHoIF/+84z/M6gi1pwYIFCxYsWLBgwYIFCxYsWLDgn8ayLGkYhtT3fZqmKe37HhcMsGmaNI5jmuc5dV2X2rYtii4GBgUWaI7jOC4wLyIcGChgkM9BaXPCgendVzDKmp4OP6XJNL1M1rdtiw0GCPTe02HBZHdd16uUQfO7mosHfU0fhwOzel6tnzy9w4EpX3bufS0xuEJmmD7Nt6yMztktObiK9jC7mCznCc0pecv66Fri3Mvbz0PBgr8Vz397EPzXHrCKkq4O7NASLFiwYMGCKwe/8/Yj2JIWLPjXwFF6V3At8QUOfbi8RNYGHgAAAABJRU5ErkJggg==":
return 3
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAAA7klEQVR42u3awQmEMBCFYStMASnALiwgBaQA7xZgARaQAnK3AO+JzECWxduy7K47+QcGRPDwMS+DgkPtrAbAgAEDBgwYMOBPVClFG/CrD5mOdHdglhZgwIABAwYMGPC/gY/jqNM01XVd+wCHEKpzri7LYh+8bZtivff2wRJlgXYz4XmeFSxw8+CUkiIl0lLmweM46mZuZRosMInyvu99gNtWlgm3lnvXqZua8LUFLNhvTvmnr5bdvHjknB8bO8ao16bB7fw+N19LgAEDBgwYMGDAgC2Arf3mAJgzfPN6J4GAiTRgwLc5n4CJNGD7dQIGWLVcNsmv7wAAAABJRU5ErkJggg==":
return 4
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAAA7ElEQVR42u3ZwQmEMBCFYSu0gBSQuwWkAAuwCwtIARaQArxbQO5mmbAuLrgHFxQz+QcGvH7MYxK1SZVVAxgwYMCAAQMGXBR4XdfcgIk0YMCAnwouZXEBJtKAAQMG/K5xHA9bLbht22StTc65r1YNvnuigO8GD8OQQghpnuc6wPs2xiTvvV7wsiyfZ5lw3/cZPk1TPedw13W5qwHLEpMpqwP/2tBqwRLbo0uG2kjLNt5POcaYjyjVS2uLbzXH0jZZuXhI83p4sv75jgaYDwCAr40cYCIN+DpwSX/6ARNpwIABAwYMGDBgwIABn6kX+cW6dZbwGkoAAAAASUVORK5CYII=":
return 5
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAABDUlEQVR42u3awQmEMBCFYSu0AAvwngIswAIswLsFWEa6sADvZnmBWdaAxxUy+QcGxNvnJC8odqmx6rzCruvKDZglDRgwYMCAAQMGDBgwYMCAAQMGDBjwc53nmbZtS9M05V7XNR3H4RMsbAghjeOY0WpdD8PwKvo1sIB9399wegi6tyyLP7CWsCZc1r7vuV2C1c2EloE1zXmev6GlZe0WrP2qkBLUfWgZOMZ4Cy2B3YaWJlqWpq0H0Uxo2XHlDmyTLEPq6biqHqxgsv1qaJuuy3NYpcDSPhbSWmj3b0uCq98+g5t5Pfz9/QEwHwAAAwYMGDBgwH8HP/19CpglDRiwW3ANOQDYOxowoQW47voABOCsg8XlTG8AAAAASUVORK5CYII=":
return 6
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAAA7klEQVR42u3awQmEMBCFYStMARaQuwVYhl2kgBSQAizAuwXkbpYXWJCFPSyyZjfzDwxCLvIxw4wBh2IsBsAX4jiOmnfHJ+8F/Ou4Zi1tDszQAgwYMGDAgC9/jQGmpQEDBgwYMOD+wDHGEkJ4m92B53ku4zjW5zl15pzrE6w8R865gpdlsQFWm6u627b1BxbqFea9L9M02ZjS67rW6qrKJsDPgWViD+/7Xqt75zpqCtZUFljw7sEtVlFTsNpY1dXQMgHWKlKauDyklJqsIm5LgAEDBgwYMGDAgL8J/tc/6wDT0oABAwYMGDBgwIDviQcL3siaH87WMAAAAABJRU5ErkJggg==":
return 7
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAABKElEQVR42u3awQmDQBCF4VRoARbg3QIswAIswLsFWIAFWIB3C/DuhjewIBIJBLKJs//AHMzJz5kddzGPkFk8AAMGDDgb8L7vloBpacBfi3ddB/jfb9hdS2cHZmgBBgwYMGDAgAF/GsMwhKZpLPu+D9u2+QULWRRF6LrOsGVZWqZEJwNP02RYVTjGuq72m/DuwIIK96rqSnfgcRwNrKoeo6oqa3F3YK3Tuq4thdZ1XMfLsvgcWoIKqErH1Np2OaWPFRZynufQtq3fCsf2Pb+CNLD0ENyBr6bx1fS+PVjtq4l8Dk1oVd4dWGs27rJiW8fqHjcjrqa03sXnKZ1yl/Wz05KqrUx9cOB4mD34jt9/AdPSgAEDBgwYMGDAgDMB88c0WhowYMCAM4on7WCo8wD8C34AAAAASUVORK5CYII=":
return 8
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAABGUlEQVR42u3ZsQ2DMBCFYSZkAAagZwAGYAAGoGcAxmAAegag56JnyRFCThXFwOU/yUXcfbw7Y5TC/qwKwIABAwYMGDBgwIABAwb8NPC+72EBpqUBAwYMGPD3tW2bDcNgbduGNY6jX/C6rlZVldV1HaCC67fgLsF93wegUo41z7OVZWnTNPkDC6tUz9U0jXVd5w+sJFMzG+fZHVize4apvXPPcTaw5vSYsrCaa+25BKuEFTAupZ5K3tXFQ8nqdF6WxfcMfyq3YLWwZjb1ukrtu7l4qJ3Ph1bccwUWUBeM46GlB5DzlnXJDOtOrURzpsrnIWDAgAEDBgwY8CXgX/xFCZiWBnxv8JPGALDHOgYCmEPrhikB5tAC/K4XTmirmSiKs5wAAAAASUVORK5CYII=":
return 9
def _send_pinpad(self, digits):
logger.info(sys._getframe().f_code.co_name)
fields = {"pinPositions": digits}
self._add_headers([('Content-Type', 'application/json; charset=utf-8')])
req = self.br.request_class(LOGIN_ENDPOINT, headers=self._convert_headers())
req.get_method = lambda: "PUT"
try:
res = self.br.open(req, data=json.dumps(fields))
except Exception as e:
msg = "Error en PUT pinpad"
logger.error("%s\nURL: %s\nData: %s\nHeaders: %s\nResp: %s\nException: %s",
msg, req.get_full_url(), fields, req.headers, e.read(), e)
raise e
res_json = json.loads(res.read())
return res_json["ticket"]
def _post_auth(self, ticket):
logger.info(sys._getframe().f_code.co_name)
headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'}
data = "ticket=%s&device=desktop" % ticket
req = self.br.request_class(POST_AUTH_ENDPOINT, headers=headers)
try:
res = self.br.open(req, data=data)
except mechanize.HTTPError as e:
msg = "Error en post_auth"
logger.error("%s\nURL: %s\nData: %s\nHeaders: %s\nResp: %s\nException: %s",
msg, req.get_full_url(), data, req.headers, e.read(), e)
raise e
def _login(self):
logger.info(sys._getframe().f_code.co_name)
logger.info("dni: %s, fecha: %s, pass: %s" % (self.config.get_dni(), self.config.get_fecha(), self.config.get_pass()))
if not self.config.get_dni() or not self.config.get_fecha() or not self.config.get_pass():
raise Exception("Falta cargar los datos: config.yml")
params = {
"loginDocument": {
"documentType": 0,
"document": self.config.get_dni()
},
"birthday": self.config.get_fecha(),
"companyDocument": None,
"device": 'desktop'
}
data = json.dumps(params)
self._add_headers([("Accept", 'application/json, text/javascript, */*; q=0.01')])
self._add_headers([('Content-Type', 'application/json; charset=utf-8')])
req = self.br.request_class(LOGIN_ENDPOINT, headers=self._convert_headers())
logger.info("Login headers: %s", self.br.addheaders)
try:
res = self.br.open(req, data=data)
except Exception as e:
logger.error("Error enviando login. URL: %s. Data: %s", req.get_full_url(), data)
raise e
try:
res_txt = res.read()
pinData = json.loads(res_txt)
except ValueError as ex:
logger.exception("Error obtiniendo el JSON del login: %s", res_txt)
raise ex
logger.info("pinPositions: %s", pinData["pinPositions"])
try:
pinpad = process_pin_images(pinData["pinpad"])
except Exception as e:
logger.error("Exception en process_pin_images: %s", e)
logger.error(pinData["pinpad"])
raise e
logger.info("Pinpad: %s", pinpad)
password = self.config.get_pass()
digits = []
for i in range(0, 3):
digits.append(int(password[pinData["pinPositions"][i] - 1]))
logger.info("Digits: %s", digits)
codecDigits = []
for i in digits:
codecDigits.append(pinpad.index(i))
logger.info("codecDigits: %s", codecDigits)
try:
ticket = self._send_pinpad(codecDigits)
except Exception as e:
logger.error("Exception en send_pinpad: %s", e)
raise e
logger.info("ticket: %s", ticket)
self._post_auth(ticket)
return "Ok"
def _fetch_products(self):
logger.info(sys._getframe().f_code.co_name)
self._add_headers([("Accept", '*/*')])
self._add_headers([('Content-Type', 'application/json; charset=utf-8')])
req = self.br.request_class(PRODUCTS_ENDPOINT)
try:
res = self.br.open(req)
products = json.loads(res.read())
return products
except Exception as e:
logger.error("Error obteniendo cuentas: %s", e)
raise e
def _fetch_favoritos(self):
logger.info(sys._getframe().f_code.co_name)
req = self.br.request_class(FAVORITOS_ENDPOINT)
try:
res = self.br.open(req)
except mechanize.HTTPError as e:
msg = "Error en el get para obtener favoritos"
logger.error("%s\nURL: %s\nHeaders: %s\nResp: %s\nException: %s",
msg, req.get_full_url(), req.headers, e.read(), e)
raise e
try:
res_txt = res.read()
res_json = json.loads(res_txt)
except ValueError as ex:
logger.error("Error obteniendo el JSON del get para obtener favoritos")
logger.error(res.read())
raise ex
return res_json.get("products")
def get_products(self):
if self.products is None:
self.products = self._fetch_products()
self.config.write_products(self.products)
return self.products
def get_favoritos(self):
if self.favoritos is None:
self.favoritos = self._fetch_favoritos()
self.config.write_favoritos(self.favoritos)
return self.favoritos
def get_account_from_alias(self, alias):
"""
Busca en las productos de ing alguna cuenta que su alias o nombre sea como el del parametro
:param alias: nombre o alias de la cuenta que buscamos
:return: objecto del producto
"""
products = self.get_products()
p = filter(lambda x: x.get("alias") == alias.decode("utf-8"), products)
if len(p) > 1:
raise CuentaDuplicada("Existe mas de una cuenta con ese alias")
elif len(p) == 1:
return p.pop().get("productNumber")
p = filter(lambda x: x.get("name") == alias.decode("utf-8"), products)
if len(p) > 1:
raise CuentaDuplicada("Existe mas de una cuenta con ese nombre")
elif len(p) == 1:
return p.pop().get("productNumber")
raise CuentaNotFound("No existe ninguna cuenta con ese alias o nombre")
def get_cuenta_favorito(self, key):
"""
Devuelve el objeto producto entero a partir de una key.
Primero obtiene los favoritos y los productos para poder devolver los datos
Ejemplo de key: PEPE MORA # BANCO BILBAO
Ejemplo de key: PEPE MORA # Cuenta SIN NOMINA internet
:param key: formada por el titular de la cuenta y el nombre del banco o alias (para cuentas propias)
:return: titular, banco, iban, num_cuenta
"""
products = self.get_products()
favoritos = self.get_favoritos()
titular,alias = map(lambda m: m.rstrip().lstrip(), key.split("#"))
try:
productNumber = self.get_account_from_alias(alias)
except CuentaNotFound as e:
logger.debug(e)
else:
return titular, productNumber
# No hemos encontrado ninguna cuenta propia, por lo que nos deben estar pasando un banco
banco = alias
c = [v for k,v in favoritos.iteritems() if v.get(u"bank") == banco.decode("utf-8") and
v.get(u"beneficiary") == titular.decode("utf-8")]
if len(c) > 1:
raise CuentaDuplicada("Se ha encontrado mas de una cuenta favorita para ese nombre y ese banco")
elif len(c) == 0:
raise CuentaNotFound("No se ha encontrado ninguna cuenta para el favorito")
return titular,c.pop().get("productNumber")
def get_alias(self, productNumber):
"""
Devuelve el alias o nombre asociado a un productNumber
:param productNumber: numero de cuenta del que queremos el alias
:return: nombre o alias de la cuenta asociada
"""
products = self.get_products()
try:
cuenta = filter(lambda x: x.get("productNumber") == productNumber, products).pop()
if cuenta.has_key("alias"):
return cuenta.get("alias")
return cuenta["name"]
except Exception:
pass
return None
def get_card_alias(self, card):
"""
A partir de un objeto de tipo tarjeta, devolver el alias, o nombre, de la cuenta asociada
:param card: objeto tipo card con parametro associatedAccount
:return: alias de la cuenta asociada o None
"""
try:
return self.get_alias(card.get("associatedAccount").get("productNumber"))
except Exception:
pass
return None
def fetch_last_transactions(self, account):
logger.info(sys._getframe().f_code.co_name)
end_date = date.today()
start_date = date.today() - timedelta(days=30) # TODO: parametrizar este valor
params = {
"fromDate": start_date.strftime('%d/%m/%Y'),
"toDate": end_date.strftime('%d/%m/%Y'),
"limit": 6, # TODO: parametrizar este valor
"offset": 0
}
logger.info("Params para coger transactions: %s", params)
self._add_headers([("Accept", 'application/json, text/javascript, */*; q=0.01')])
self._add_headers([('Content-Type', 'application/json; charset=utf-8')])
req = self.br.request_class("%s/%s/movements?%s" % (
PRODUCTS_ENDPOINT, account["uuid"], urllib.urlencode(params)))
logger.info("Query a %s", req.get_full_url())
try:
start_time = time.time()
res = self.br.open(req)
req_time = time.time() - start_time
except Exception as e:
logger.error("Error solicitando movimientos: %s", e)
raise e
logger.info("Tiempo de la request: %s", req_time)
transactions = json.loads(res.read())
return_transactions = []
for t in transactions.get("elements", []):
if t.get("amount") > 0:
amount = colored(t.get("amount"), 'green')
else:
amount = colored(t.get("amount"), 'red')
if t.get("balance") > 0:
balance = colored(t.get("balance"), 'green')
else:
balance = colored(t.get("balance"), 'red', attrs=["bold"])
return_transactions.append([t.get("effectiveDate"), t.get("description"), amount, balance])
return return_transactions
def fetch_pending_transactions(self, account):
logger.info(sys._getframe().f_code.co_name)
try:
res_json = self.fetch("%s/%s/pending-movements" % (PRODUCTS_ENDPOINT, account["uuid"]))
except Exception as ex:
logger.exception("Error al obtener los movimientos pendientes")
raise ex
# Obtenemos los detalles para cada transaccion pendiente
return_transactions = []
for tr in res_json:
uuid = tr["uuid"]
try:
t = self.fetch("%s/%s/pending-movements/%s" % (PRODUCTS_ENDPOINT, account["uuid"], uuid))
except Exception as ex:
logger.exception("Error al obtener los movimientos pendientes")
raise ex
if t.get("amount") > 0:
amount = colored(t.get("amount"), 'green')
else:
amount = colored(t.get("amount"), 'red')
balance = colored("pendiente", 'yellow')
effectiveDate = colored(t.get("effectiveDate"), 'yellow')
comment = colored(t.get("comment"), 'yellow')
return_transactions.append([effectiveDate, comment, amount, balance])
return return_transactions
def fetch(self, endpoint, headers=None, data=None, method=None):
"""
Realiza una peticion a ING el endpoint indicado y devuelve el json parseado
:param endpoint: url donde realizar la peticion
:param headers: listado de cabeceras opcional
:param data: si esta definido este parametro se envia un POST
:return: JSON parseado a objeto python
"""
if headers:
req = self.br.request_class(endpoint, headers=headers)
else:
req = self.br.request_class(endpoint)
if method:
req.get_method = lambda: method
try:
res = self.br.open(req, data=data)
res_txt = res.read()
res_json = json.loads(res_txt)
except mechanize.HTTPError as e:
logger.error("Error enviando peticion\nURL: %s\nData: %s\nHeaders: %s\nResp: %s\nException: %s",
req.get_full_url(), data, req.headers, e.read(), e)
raise e
except ValueError as e:
logger.error("Error obteniendo JSON de la respuesta de ING")
logger.error(res.read())
raise e
return res_json
def tarjetaCoordenadas(self, position):
"""
Obtiene el pinpad del endpoint y nos devuelve un array con la respuesta que tenemos que devolver
:param position: posicion de la tarjeta de coordenadas que nos piden
:return: array con las posiciones del pinpad que debe enviarse
"""
# Obtener pinpad
try:
res_json = self.fetch(CARD_ENDPOINT)
except Exception as ex:
logger.exception("Error obteniendo el pinpad")
raise ex
# Obtenemos el pinpad
try:
pinpad = process_pin_images(res_json["pinpad"])
except Exception as e:
logger.error("Exception en process_pin_images: %s", e)
logger.error(res_json["pinpad"])
raise e
logger.info("Pinpad: %s", pinpad)
# Obtenemos la coordenada que necesitamos
coordenada = self.config.get_coordenada(position)
codecDigits = []
for i in map(int, str(coordenada)):
codecDigits.append(pinpad.index(i))
logger.info("codecDigits: %s", codecDigits)
return codecDigits
|
adrianlzt/ingdirect_cli
|
browser.py
|
browser.py
|
py
| 20,033 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21276871246
|
import mindspore as ms
import numpy as np
from mindspore import Parameter, Tensor
from mindspore.ops import operations as P
from mindspore_rl.agent import Trainer, trainer
class QMIXTrainer(Trainer):
"""
This is the trainer class of QMIX, which provides the logic of this algorithm.
"""
def __init__(self, msrl, params):
super().__init__(msrl)
self.msrl = msrl
self.batch = params["batch_size"]
self.false = Tensor([False], ms.bool_)
self.true = Tensor(True, ms.bool_)
self.zero_int = Tensor(0, ms.int32)
self.zero_float = Tensor(0, ms.float32)
self.one_int = Tensor(1, ms.int32)
self.one_float = Tensor(1, ms.float32)
self.zeros = P.Zeros()
self.ones = P.Ones()
self.expand_dims = P.ExpandDims()
self.concat = P.Concat(axis=1)
self.reshape = P.Reshape()
self.cast = P.Cast()
self.onehot = P.OneHot()
self.zeros_like = P.ZerosLike()
self.assign = P.Assign()
self.equal = P.Equal()
self.select = P.Select()
self.stack = P.Stack()
env_config = self.msrl.collect_environment.config
observation_space = self.msrl.collect_environment.observation_space
action_space = self.msrl.collect_environment.action_space
done_space = self.msrl.collect_environment.done_space
reward_space = self.msrl.collect_environment.reward_space
self.num_agent = env_config["num_agent"]
self.agent_id = Tensor(
np.expand_dims(np.eye(self.num_agent), 0).reshape(self.num_agent, -1),
ms.float32,
)
self.episode_limit = env_config["episode_limit"]
self.action_dim = action_space.num_values
self.observation_dim = observation_space.shape[-1]
self.global_obs_dim = env_config["global_observation_dim"]
self.num_envs = 1
self.reward_dim = 1 if len(reward_space.shape) == 0 else reward_space.shape[-1]
self.done_dim = 1 if len(done_space.shape) == 0 else done_space.shape[-1]
self.epsilon_steps = Parameter(
Tensor(0, ms.int32), requires_grad=False, name="epsilon_steps"
)
self.squeeze = P.Squeeze(axis=0)
self.greater_equal = P.GreaterEqual()
def trainable_variables(self):
"""trainable variables uses to save model"""
trainable_variables = {
"policy_net": self.msrl.learner.policy_net,
"mixer_net": self.msrl.learner.mixer_net,
}
return trainable_variables
@ms.jit
def train_one_episode(self):
total_reward = self.zero_float
steps = 0
loss = self.zero_float
hy = self.zeros((self.num_agent, 64), ms.float32)
episode_local_obs = []
episode_global_obs = []
episode_action = []
episode_reward = []
episode_done = []
episode_done_env = []
avail_action = self.ones((self.num_agent, self.action_dim), ms.int32)
local_obs = self.msrl.collect_environment.reset()
# local_obs = local_obs.squeeze(0)
while steps < self.episode_limit:
global_obs = local_obs.reshape((-1,))
action, hy = self.msrl.actors.get_action(
trainer.COLLECT, (local_obs, hy, avail_action, self.epsilon_steps)
)
new_local_obs, reward, done = self.msrl.collect_environment.step(
action.astype(ms.int32)
)
done = self.expand_dims(done, -1)
reward = reward[0]
done = done[0]
done_envs = done.all()
episode_local_obs.append(local_obs)
episode_global_obs.append(global_obs)
episode_action.append(action)
episode_reward.append(reward)
episode_done.append(done)
episode_done_env.append(self.expand_dims(done_envs, -1))
local_obs = new_local_obs
total_reward += reward
steps += 1
episode_local_obs.append(local_obs)
episode_global_obs.append(local_obs.reshape((-1,)))
episode_local_obs = self.stack(episode_local_obs)
episode_global_obs = self.stack(episode_global_obs)
episode_action = self.stack(episode_action)
episode_reward = self.stack(episode_reward)
episode_done = self.stack(episode_done)
episode_done_env = self.stack(episode_done_env)
self.msrl.replay_buffer_insert(
(
episode_local_obs,
episode_global_obs,
episode_action,
episode_reward,
episode_done,
episode_done_env,
)
)
self.epsilon_steps += steps
if self.greater_equal(self.msrl.buffers.count, self.batch):
loss = self.msrl.agent_learn(self.msrl.replay_buffer_sample())
return loss, total_reward, steps
@ms.jit
def evaluate(self):
"""Evaluation function"""
total_reward = self.zero_float
hy = self.zeros((self.num_agent, 64), ms.float32)
avail_action = self.ones((self.num_agent, self.action_dim), ms.int32)
steps = 0
local_obs = self.msrl.eval_environment.reset()
while steps < self.episode_limit:
action, hy = self.msrl.actors.get_action(
trainer.COLLECT, (local_obs, hy, avail_action, self.epsilon_steps)
)
new_local_obs, reward, _ = self.msrl.collect_environment.step(
action.astype(ms.int32)
)
total_reward += reward
local_obs = new_local_obs
return total_reward
|
mindspore-lab/mindrl
|
mindspore_rl/algorithm/qmix/qmix_mpe_trainer.py
|
qmix_mpe_trainer.py
|
py
| 5,690 |
python
|
en
|
code
| 21 |
github-code
|
6
|
7725885886
|
import pandas as pd
from sqlalchemy import create_engine
from influxdb import InfluxDBClient
import time
def connectSQL():
connection_str = 'mssql+pyodbc://royg:Welcome1@SCADA'
engine = create_engine(connection_str)
conn = engine.connect()
return conn
def getData(conn,interval):
if (interval==1):
tabname='data_values_min_4_2017'
else:
tabname='data_values_'+str(interval)+'min_4_2017'
queryResult = conn.execute('''
-- SELECT TOP 10 RTRIM(LTRIM(REPLACE(REPLACE(dd.name,' ','\ '),',','\,'))) measurement,
SELECT LTRIM(dd.name) measurement,
CAST(dd.osi_key AS VARCHAR) AS [key],
CAST(dd.station_id AS VARCHAR) site,
SUBSTRING(dd.[name],1,1) array,
dt.description data_type,
'''+str(interval)+''' interval,
CAST(VALUE AS VARCHAR(30)) value,
CONVERT(VARCHAR(19),d.u_time,126)+'Z' timestamp
FROM [dbo].'''+tabname+''' d WITH(NOLOCK)
JOIN tempdb..dd1 dd
ON dd.osi_key = d.osi_key
JOIN dbo.stations s
ON s.station_id = dd.station_id
JOIN dbo.data_types dt
ON dt.data_type = d.data_type
-- WHERE u_time BETWEEN '2017-04-19 00:00:00' and '2017-04-19 01:00:00'
WHERE u_time > DATEADD(mi,-3,CURRENT_TIMESTAMP)
''')
pNodeIDsDF = pd.DataFrame(queryResult.fetchall())
if pNodeIDsDF.empty == False:
pNodeIDsDF.columns = queryResult.keys()
return pNodeIDsDF
c=connectSQL()
host = '50.23.122.133'
port = 8086
user = 'roy'
password = 'Kaftor'
dbname = 'w209'
client = InfluxDBClient(host, port, user, password, dbname)
rc=0
while(True):
for interval in (15,5,1):
df = getData(c, interval)
for node in df.itertuples():
# print(node[8])
json_body = [
{
"measurement": node[1],
"tags": {
"key": node[2],
"site": node[3],
"array": node[4],
"data_type": node[5],
"interval": node[6]
},
"time": node[8],
"fields": {
"value": float(node[7]) # str(float(node[7]))
}
}
]
rc = client.write_points(json_body, time_precision='s')
print('1 row written for interval {0}'.format(interval))
if (rc == 0):
print("reconnecting...")
c = connectSQL()
client = InfluxDBClient(host, port, user, password, dbname)
if (rc == 1):
print('{0} rows written for interval {1}'.format(df.shape[0],interval))
time.sleep(60)
|
thongnbui/MIDS_251_project
|
python code/SendToInflux.py
|
SendToInflux.py
|
py
| 2,797 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2298089969
|
import concurrent.futures
from datetime import datetime
import pymongo as pmg
import os
import uuid
from dotenv import load_dotenv
load_dotenv()
import pytz
tz_ind = pytz.timezone('Asia/Kolkata')
now = datetime.now(tz_ind)
class Logit:
"""
logger class
use this class to log the execution of the program.
code for usage:
#>>>from logger.logit import Logit
#>>>l = Logit()
#>>>l.log("scope","message") # where scope = function name or class name and message = any string
"""
def __init__(self):
# self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3)
# DEFAULT_CONNECTION_URL = 'localhost:27017'
# client = pmg.MongoClient(DEFAULT_CONNECTION_URL)
client = pmg.MongoClient(os.getenv('connection'))
self.conn = client["execution_log"]["log"]
def UPDATE(self, DICT):
self.conn.update_one({"_id": int(str(datetime.now().date()).replace("-", ""))}, {'$push': DICT})
def INSERT(self, DICT):
self.conn.insert_one(DICT)
def log(self, scope, msg):
id_obj = self.conn.find({}, {"_id"})
idxt = []
for idx in id_obj:
idxt.append(idx["_id"])
# self.conn.insert_one({"_id":int(str(datetime.now().date()).replace("-","")),f"{uuid.uuid1()}":f"{str(datetime.now().date())} {str(datetime.now().strftime('%H:%M:%S'))} {scope} {msg}"})
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
if int(str(datetime.now().date()).replace("-", "")) in idxt:
executor.submit(self.UPDATE, {
f"{uuid.uuid1()}": f"{str(datetime.now().date())} {str(datetime.now().strftime('%H:%M:%S'))} {scope} {msg}"})
else:
executor.submit(self.INSERT, {"_id": int(str(datetime.now().date()).replace("-", "")),
f"{uuid.uuid1()}": f"{str(datetime.now().date())} {str(datetime.now().strftime('%H:%M:%S'))} {scope} {msg}"})
def userlog(self, userId, action, performedOn, categoryId, productId, totalPayment):
client = pmg.MongoClient(os.getenv('connection'))
self.conn = client["Clean_user"]["CleanUser"]
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(self.conn.insert_one, {"user_id": userId, "action": action, "performed_on": performedOn,
"category_ID": categoryId, "productId": productId,
"totalPayment": totalPayment, "year": now.year, "month": now.month,
"day": now.day, "hour": now.hour, "minute": now.minute,
'second': now.second})
#l=Logit()
#l.userlog(userId=8, action='clicked', performedOn='category', categoryId=4, productId="",
# totalPayment="")
# if __name__=="__main__":
# l = Logit()
# for i in range(10):
# l.log("none","I'm a log")
# l.log("nope","test")
|
sanjeevan121/ecommerce
|
logger/logit.py
|
logit.py
|
py
| 3,100 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71749351229
|
from json import loads
from kafka import KafkaConsumer
consumer = KafkaConsumer(
'test-topic',
bootstrap_servers=['0.0.0.0:9092'],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id='test-json-group',
value_deserializer=lambda x: loads(x.decode('utf-8')))
for message in consumer:
print("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition,
message.offset, message.key,
message.value))
|
makseli/kafka-docker-python
|
consumer-json.py
|
consumer-json.py
|
py
| 522 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19521121631
|
import socket
import threading
from queue import Queue
import sys
import time
import logging
import json
# pip install PyExecJS
#import execjs
# # 1. 在windows上不需要其他的依赖便可运行execjs, 也可以调用其他的JS环境
# # windows 默认的执行JS的环境
# execjs.get().name
# 返回值: JScript
# # 作者本人的windows上装有Node.js , 所以返回值不同
# execjs.get().name
# 返回值: Node.js(V8)
#
# # 2. 在ubuntu下需要安装执行JS环境依赖, 作者的环境为PhantomJS
# execjs.get().name
# 返回值: PhantomJS
#
# # 3. 源码中给出, 可执行execjs的环境:
# PyV8 = "PyV8"
# Node = "Node"
# JavaScriptCore = "JavaScriptCore"
# SpiderMonkey = "SpiderMonkey"
# JScript = "JScript"
# PhantomJS = "PhantomJS"
# SlimerJS = "SlimerJS"
# Nashorn = "Nashorn"
# 调用javascript代码
#print(execjs.eval("new Date"))
class ClientLog(object):
def __init__(self, filename):
self.logger = logging.getLogger(filename)
log_format = logging.Formatter("%(asctime)s %(filename)s第%(lineno)s行 %(levelname)s: %(message)s")
file_handler = logging.FileHandler(filename=filename, encoding="utf-8")
file_handler.setFormatter(log_format)
self.logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(log_format)
self.logger.addHandler(stream_handler)
self.logger.setLevel(logging.DEBUG)
class ClientLogObject():
client_logger = ClientLog("client.log").logger
client_logger = ClientLogObject().client_logger
# 接下来我们写一个简单的客户端实例连接到以上创建的服务。端口号为 9999。
# socket.connect(hosname, port ) 方法打开一个 TCP 连接到主机为 hostname 端口为 port 的服务商。
# 连接后我们就可以从服务端获取数据,记住,操作完成后需要关闭连接。
# 创建 socket 对象, af_inet,stream
# tcpc_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 获取本地主机名
# HOST = socket.gethostname()
class CommWithServer(object):
def __init__(self, host="10.30.99.42", port=9996, role=None):
client_logger.debug("执行CommWithServer.__init__()")
self.buffsize = 1024
# udp最多接收100M的数据
self.udp_buffsize = 104857600
self.addr = (host, port)
self.requeset_fun_dict = {}
self.player = role
def recv_server_tcp(self, timeout=10):
client_logger.debug("执行CommWithServer.recv_server_tcp()")
isretry = False
while True:
# 接收TCP连接的服务器的消息
try:
data = self.tcp_socket.recv(self.buffsize)
if not data:
if not isretry:
stime = time.time()
isretry = True
if time.time()-stime > timeout:
client_logger.warning("服务器连接不上,或服务器消息一直丢失,或服务器一直发空消息,断开连接")
# 关闭服务器连接
self.tcp_socket.close()
return -1
else:
client_logger.warning("读取到了服务器的空消息,服务器可能有异常,如丢包、发错了消息,关闭了服务器等,重试中...")
time.sleep(1)
continue
except ConnectionResetError:
client_logger.info("服务器关闭了连接")
self.tcp_socket.close()
return -1
# 接收数据后进行解码
data = data.decode("utf-8")
self.after_recv_server_msg_doing(data)
def after_recv_server_msg_doing(self, data):
client_logger.debug("执行CommWithServer.after_recv_server_msg_doing()")
data = json.loads(data)
client_logger.info("接收到服务端发来的消息:%s" % data)
request_type = data["request_type"]
if request_type == "update_player":
client_logger.warning(data["push_msg"])
elif request_type == "login":
client_logger.info("登录成功!")
self.after_login_update_data(data["role_data"])
elif request_type == "push":
client_logger.warning(data["push_msg"])
elif request_type == "logout":
client_logger.info(data["push_msg"])
self.local.requeset_fun(data)
else:
client_logger.warning("接收到服务端发来的请求, 但request_type没有定义服务器发来request_type类型,因此没有做任何处理,"
"服务器消息:%s" % data)
def send_server_tcp(self, msg):
client_logger.debug("执行CommWithServer.send_server_tcp()")
client_logger.debug("请求:%s" % msg)
msg = json.dumps(msg)
# 给服务器发送消息,这里需要编码成字节才能传输
if not msg:
client_logger.warning("不能发空消息给服务器")
return 0
try:
self.tcp_socket.send(msg.encode("utf-8"))
except ConnectionAbortedError:
client_logger.info("服务器关闭了连接")
self.tcp_socket.close()
return -1
except OSError:
client_logger.info("服务器套接字已经关闭了")
self.tcp_socket.close()
return -1
except ConnectionResetError:
client_logger.error("无法连接到服务器...服务器ip:%s,端口号:%s" % self.addr)
self.tcp_socket.close()
return 1
def connect_server_tcp(self):
client_logger.debug("执行CommWithServer.connect_server_tcp()")
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# 尝试连接服务器,指定主机和端口
self.tcp_socket.connect(self.addr)
except ConnectionRefusedError:
client_logger.error("无法连接到服务器...服务器ip:%s,端口号:%s" % self.addr)
self.tcp_socket.close()
return 0
except TimeoutError:
self.tcp_socket.close()
client_logger.error("连接服务器超时...服务器ip:%s,端口号:%s" % self.addr)
return -1
recv_msg_thread = threading.Thread(target=self.recv_server_tcp, args=(self.tcp_socket,))
recv_msg_thread.start()
return 1
def request_server(self, request_concent, key, request_fun=None):
client_logger.debug("执行CommWithServer.request_server()")
# 向服务器发起请求,服务器回应了,则以及服务器的回应来执行request_fun方法
if self.send_server_tcp(request_concent) == 1:
self.requeset_fun_dict[key] = request_fun
def login_server(self, user_name, passwd):
client_logger.debug("执行CommWithServer.login_server()")
client_logger.debug("开始连接服务器")
if self.connect_server_tcp():
login_msg = {"request_type": "login", "user_name": user_name, "passwd": passwd}
self.send_server_tcp(login_msg)
else:
client_logger.debug('登录服务器失败 %s')
self.player.jump_hight = 0.75
self.player.role_id = "00000"
def after_login_update_data(self, data):
client_logger.debug("执行CommWithServer.after_login_update_data()")
client_logger.debug('服务器:%s' % data)
self.player.user_name = data["user_name"]
self.player.role_id = data["role_id"]
self.player.role_name = data["role_name"]
self.player.set_pos(tuple(data["pos"]))
self.player.jump_hight = data["jump_hight"]
def connect_server_udp(self):
self.udp_socket = socket.socket(type=socket.SOCK_DGRAM)
return 1
def recev_server_udp(self):
# 客户端接收服务发来的值
data, server_addr = self.udp_socket.recvfrom(self.udp_buffsize)
data = data.decode("utf-8")
self.after_recv_server_msg_doing(data)
def send_server_udp(self, msg):
if not msg:
client_logger.warning("不能发空消息给服务器")
return 0
self.udp_socket.sendto(msg.encode("utf-8"), self.addr)
return 1
|
optimjiang/my_3d_game
|
comm_with_server.py
|
comm_with_server.py
|
py
| 8,394 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1828590890
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Jackson O'Donnell
# [email protected]
from __future__ import division, print_function
import healpy as hp
import numpy as np
from .beam import r3_channel_beams
from .constants import (ffp8_nu4_central_freqs, ffp8_nu6_central_freqs)
def make_big_R(R4, R6, base_nu4, base_nu6,
include_T_only=True):
if include_T_only:
nxlms = 16
else:
nxlms = 14
output = np.zeros((nxlms, 2), dtype=np.complex)
for i in range(9):
n4 = ffp8_nu4_central_freqs[i] / base_nu4
n6 = ffp8_nu6_central_freqs[i] / base_nu6
thisR = np.eye(2) + R4*n4**4 + R6*n6**6
if i < 7:
output[2*i:2*(i+1), :] += thisR
elif include_T_only:
output[2*7 + (i - 7), :] += thisR[0, :]
else:
break
return output
def rayleigh_residual(data, beams, R4, R6, base_nu4, base_nu6, Xs,
normalization=1):
outputs = []
R = make_big_R(R4, R6, base_nu4, base_nu6)
# print('data shape:', data.shape)
# print('xs shape:', Xs.shape)
for m, (datum, X) in enumerate(zip(data, Xs)):
beamed_rayleighed = beams * np.dot(R, X)
diff = (datum.flatten() - beamed_rayleighed) / normalization
# print('diff shape, m = {}:'.format(m), diff.shape)
outputs.append(diff.real)
# For m == 0, the imaginary component should be zero
if m > 0:
outputs.append(diff.imag)
return np.concatenate(outputs)
def pack_args(beams, r4, r6, xs, nu_ref, beam_ref, ell):
# Skip `beam_ref`
beams = np.concatenate((beams[:2*beam_ref], beams[2*(beam_ref+1):]))
xs = np.dstack((xs.real, xs.imag))
return np.concatenate((beams.flatten(), r4.flatten(), r6.flatten(), xs.flatten()))
def unpack_args(args, nu_ref, beam_ref, ell, reference_beams=r3_channel_beams):
nbeams = 14
beams, args = args[:nbeams], args[nbeams:]
new_beams = np.zeros(16)
for i in range(7):
if i == beam_ref:
new_beams[2*i:2*(i + 1)] = reference_beams[beam_ref, ell]
continue
new_beams[2*i:2*(i+1)], beams = beams[:2], beams[2:]
assert beams.size == 2
new_beams[-2:] = beams
r4, args = args[:4].reshape((2, 2)), args[4:]
r6, args = args[:4].reshape((2, 2)), args[4:]
if (args.size % 4) != 0:
raise ValueError('Invalid argument - not sure how to parse')
xs = args.reshape((args.size // 4, 2, 2))
xs = xs[:, :, 0] + 1j * xs[:, :, 1]
return new_beams, r4, r6, xs
def make_residual_function(alms, nu_ref, beam_ref, ell, reference_beams=r3_channel_beams):
# Alms should be (9 channels, 3 fields (TEB), hp.Alm.getsize(lmax))
assert len(alms.shape) == 3
assert alms.shape[0] == 9
assert alms.shape[1] == 3
nside = hp.Alm.getlmax(alms.shape[-1])
ells, ems = hp.Alm.getlm(nside)
all_Ts_data = alms[:, 0, ells == ell]
all_Es_data = alms[:, 1, ells == ell]
normalization_T = np.sqrt((all_Ts_data.conj() * all_Ts_data).real.sum() / (2 * ell + 1))
normalization_E = np.sqrt((all_Es_data.conj() * all_Es_data).real.sum() / (2 * ell + 1))
# Provide a normalization for each T & E
normalization = np.zeros((8, 2))
normalization[:, 0] = normalization_T
normalization[-1, :] = normalization_T
normalization[:-1, 1] = normalization_E
normalization[normalization == 0] = 1
# big_normalization = np.concatenate([normalization.flatten()]*(ell + 1))
# print('big norm:', big_normalization.shape)
all_data = np.zeros((ell + 1, 8, 2), dtype=np.complex)
for m in range(ell + 1):
# First seven channels - T & E
all_data[m, :-1, :] = alms[:7, :2, (ells == ell) & (ems == m)][:, :, 0]
# Last channel - just T
all_data[m, -1, :] = alms[7:9, 0, (ells == ell) & (ems == m)][:, 0]
base_nu4 = ffp8_nu4_central_freqs[nu_ref]
base_nu6 = ffp8_nu6_central_freqs[nu_ref]
def residual(args):
beams, r4, r6, Xs = unpack_args(args, nu_ref, beam_ref, ell)
res = rayleigh_residual(all_data.reshape((ell + 1, -1)),
beams, r4, r6,
base_nu4, base_nu6, Xs,
normalization=normalization.flatten())
# print('residual shape:', res.shape)
return res
default_beams = []
for i in range(7):
default_beams += [reference_beams[i, ell]]*2
default_beams.extend(reference_beams[-2:, ell])
r4 = np.zeros((2, 2))
r6 = np.zeros((2, 2))
Xs = []
for m in range(ell + 1):
X = alms[beam_ref, :2, (ells == ell) & (ems == m)][0]
Xs.append(X / reference_beams[beam_ref, ell])
return residual, pack_args(np.array(default_beams), r4, r6, np.array(Xs), nu_ref, beam_ref, ell)
|
jhod0/lgmca_planck_tools
|
lgmca_planck_tools/planck/fitting.py
|
fitting.py
|
py
| 4,829 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10230066135
|
from typing import Optional
import tiktoken
from evals.elsuite.ballots.prompts import (
control_chat_prompt,
control_text_template,
manipulation_chat_template,
manipulation_text_template,
text_prompt,
voter_chat_prompt,
voter_text_prompt,
)
from evals.registry import is_chat_model
LOGIT_BIAS_MAX = 100
LOGIT_BIAS_MIN = -100
VOTER_DECISION_TOKS = [" Yes", " No"]
BASE_REMOVED_TOKS = [" User", " Assistant", " AI"]
YES_DIRECTION = "yes"
NO_DIRECTION = "no"
CONTROL_DIRECTION = "control"
DOUBLE_NEWLINE_TOKS = [
"\n\n",
]
ALL_DOUBLE_NEWLINE_TOKS = DOUBLE_NEWLINE_TOKS + [
".\n\n",
";\n\n",
" }\n\n",
");\n\n",
"}\n\n",
")\n\n",
"\n\n\n\n",
">\n\n",
"\n\n\n",
"();\n\n",
":\n\n",
" {\n\n",
'"\n\n',
"?\n\n",
".\n\n\n\n",
"...\n\n",
'."\n\n',
"!\n\n",
"};\n\n",
"';\n\n",
"]\n\n",
" */\n\n",
"()\n\n",
".\u201d\n\n",
" });\n\n",
'");\n\n',
"));\n\n",
'";\n\n',
"\u3002\n\n",
]
def toks_to_id(model: str, toks: list[str], validation: Optional[list[str]] = None) -> list[str]:
if model == "dummy":
model = "gpt-3.5-turbo" # pretend it's a chat model
encoder = tiktoken.encoding_for_model(model)
toks = [encoder.encode(tok) for tok in toks]
assert all(len(tok) == 1 for tok in toks), f"Invalid toks {toks}"
toks = [str(tok[0]) for tok in toks]
if validation:
assert sorted(toks) == sorted(validation), f"Lists are different {toks} {validation}"
return toks
def format_messages(messages, *format_args, **format_kwargs):
# Format the content of all the messages
return [
{
"role": message["role"],
"content": message["content"].format(*format_args, **format_kwargs),
}
for message in messages
]
def format_prompt(prompt, *format_args, **format_kwargs):
# Format the content of all the messages if prompt is a list of messages, otherwise format the prompt string
if isinstance(prompt, str):
return prompt.format(*format_args, **format_kwargs)
elif isinstance(prompt, list):
return format_messages(prompt, *format_args, **format_kwargs)
def get_influencer_prompt(model, direction):
yes_chat_prompt = format_messages(manipulation_chat_template, condition="Yes")
no_chat_prompt = format_messages(manipulation_chat_template, condition="No")
yes_text_prompt = (
text_prompt + manipulation_text_template.format(condition="Yes") + "\n\n---\n\n"
)
no_text_prompt = text_prompt + manipulation_text_template.format(condition="No") + "\n\n---\n\n"
control_text_prompt = text_prompt + control_text_template + "\n\n---\n\n"
if is_chat_model(model):
return {
YES_DIRECTION: yes_chat_prompt,
NO_DIRECTION: no_chat_prompt,
CONTROL_DIRECTION: control_chat_prompt,
}[direction.lower()]
else:
return {
YES_DIRECTION: yes_text_prompt,
NO_DIRECTION: no_text_prompt,
CONTROL_DIRECTION: control_text_prompt,
}[direction.lower()]
def get_voter_prompt(model):
if is_chat_model(model):
return voter_chat_prompt
else:
return voter_text_prompt
def prompt_matches_model(model, prompt):
if is_chat_model(model):
return isinstance(prompt, list)
else:
return isinstance(prompt, str)
def reverse_roles(messages):
return [
{
"role": "user" if message["role"] == "assistant" else "assistant",
"content": message["content"],
}
for message in messages
]
def chat_to_text(messages):
return "\n".join(
[f"{message['role'].capitalize()}: {message['content']}" for message in messages]
)
|
openai/evals
|
evals/elsuite/ballots/utils.py
|
utils.py
|
py
| 3,804 |
python
|
en
|
code
| 12,495 |
github-code
|
6
|
34346316993
|
'''
Exercises of the book "Think python"
13.1.8 Exercise:
'''
# Markov analysis:
#
# 1. Write a program to read a text from a file and perform Markov analysis. The result
# should be a dictionary that maps from prefixes to a collection of possible suffixes.
# The collection might be a list, tuple, or dictionary; it is up to you to make an
# appropriate choice. You can test your program with prefix length two, but you
# should write the program in a way that makes it easy to try other lengths.
# 2. Add a function to the previous program to generate random text based on the Markov
# analysis. Here is an example from Emma with prefix length 2:
# He was very clever, be it sweetness or be angry, ashamed or only amused, at such a
# stroke. She had never thought of Hannah till you were never meant for me?" "I
# cannot make speeches, Emma:" he soon cut it all himself.
# For this example, I left the punctuation attached to the words. The result is
# almost syntactically correct, but not quite. Semantically, it almost makes
# sense, but not quite.
#
# What happens if you increase the prefix length? Does the random text make
# more sense?
#
# 3. Once your program is working, you might want to try a mash-up: if you combine
# text from two or more books, the random text you generate will blend the
# vocabulary and phrases from the sources in interesting ways.
# Credit: This case study is based on an example from Kernighan and
# Pike, The Practice of Programming, Addison-Wesley, 1999.
#
# You should attempt this exercise before you go on; then you can
# download my solution from http://thinkpython2.com/code/markov.py.
# You will also need http://thinkpython2.com/code/emma.txt.
import os
import random
def process_line(text):
"""Deletes whitespaces. Lower case of words. Split text into list"""
# Delete whitespaces. Lower case.
new_text = text.strip().lower()
# Split words into list
processed_text = new_text.split()
return processed_text
def get_all_words_in_book(PATH):
""""Processes book and makes list of all words from the book"""
words_in_book = []
with open(PATH, 'r') as f:
# Skip header
for line in f:
if line[:3] == "***":
break
f.readline()
# Go through the book line by line
for line in f:
# Skip empty lines
if not line:
continue
# Delete punctuation, whitespaces, make case of words lower, split line into list of words
words = process_line(line)
# Make list of words from the book
words_in_book.extend(words)
return words_in_book
def perform_markov_analysis(words_list):
"""Performs Markov analysis with given words"""
markov_dict = {}
# Perform Markov analysis
for i, word in enumerate(words_list):
# Last 2 words can't be a prefix
if i > len(words_list) - 3:
break
# Get prefix (2 words)
prefix = " ".join([word, words_list[i + 1]])
# Get suffix (next word after prefix)
suffix = words_list[i + 2]
# Add prefix and suffix to the dict
markov_dict.setdefault(prefix, [suffix])
if suffix not in markov_dict[prefix]:
markov_dict[prefix].append(suffix)
return markov_dict
def generate_random_text(markov_dict, length):
"""Generates random text from the dict created by Markov analysis
markov_dict: dictionary with [prefix : list of possible suffixes] pairs
length: the length of the generated text
"""
generated_text = []
# Generate words for the text
for i in range(length):
# Create empty list of words for the next choice
generated_words = []
# Get prefix
if i == 0:
# Get random prefix as a first prefix
chosen_prefix = random.choice(list(markov_dict.keys()))
else:
# Get prefix created on the last iteration
chosen_prefix = next_prefix
# Choose random suffix from all possible suffixes for current prefix
chosen_suffix = random.choice(markov_dict[chosen_prefix])
# Make a list of generated words in current iteration
generated_words = chosen_prefix.split()
generated_words.append(chosen_suffix)
# Make a prefix for the next iteration
next_prefix = " ".join(generated_words[1:])
# Append word to the generated text
generated_text.append(generated_words[0])
#Get the text
result = " ".join(generated_text)
return result
def process_book(PATH):
"""Processes book by performing Markov analysis"""
# Make words list from the book
all_words = get_all_words_in_book(PATH)
# Do Markov analysis
markov_analysis_result = perform_markov_analysis(all_words)
return markov_analysis_result
# Get a path to the file with text
PATH = os.path.sep.join(["chapter13", "gunetberg.txt"])
# Process book with Markov analysis
ANALYSIS_RESULT = process_book(PATH)
# Generate text with certain length from analysis result
TEXT = generate_random_text(ANALYSIS_RESULT, 50)
print(TEXT)
|
LiliiaMykhaliuk/think-python
|
chapter13/13.1.8.py
|
13.1.8.py
|
py
| 5,264 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3367814266
|
from datetime import date
import discord
from discord.utils import get
from commands import automoderation, send_by_bot
from constants import Channels, Members
from init_bot import bot
from utils.format import create_embed
from utils.guild_utils import check_for_beer, find_animated_emoji, get_referenced_author, get_members_by_role, \
is_traus, quote_referenced_message, random_emoji, get_channel
class MessageHandler:
def __init__(self, message: discord.Message):
self.message = message
async def if_todo(self):
todo_pattern = f'<#{Channels.TODO}> '
if self.message.content.startswith(todo_pattern) and self.message.author.id == Members.TRAUS:
todo_channel: discord.TextChannel = get(self.message.channel.guild.channels, id=Channels.TODO)
await todo_channel.send(self.message.content.replace(todo_pattern, ''))
async def swear_moderation(self):
no_moderation = (
Channels.REQUEST, Channels.JOIN, Channels.MEMES,
Channels.SEKTA, Channels.FIRE, Channels.DELETED,
Channels.TODO, Channels.REQUEST_ALIANCE
)
if self.message.channel.id not in no_moderation:
await automoderation(self.message)
async def on_mems_channel(self):
if self.message.channel.id == Channels.MEMES:
if self.message.content:
await self.message.delete()
async def on_join_to_guild_channel(self):
if self.message.channel.id == Channels.JOIN: # вступление-в-гильдию
inv_gi_channel: discord.TextChannel = get_channel(Channels.REQUEST) # заявки-в-ги
embed = create_embed(description=f"{date.today()}\n{self.message.content}",
thumbnail=self.message.author.avatar_url)
await inv_gi_channel.send(f"<@{self.message.author.id}>", embed=embed)
await self.message.delete()
async def on_join_to_aliance_channel(self):
if self.message.channel.id == Channels.JOIN_ALIANCE:
inv_channel: discord.TextChannel = get_channel(Channels.REQUEST_ALIANCE)
embed = create_embed(description=f"{date.today()}\n{self.message.content}",
thumbnail=self.message.author.avatar_url)
await inv_channel.send(f"<@{self.message.author.id}>", embed=embed)
await self.message.delete()
# async def for_hellman(self):
# if self.message.author.id == members.HELLMAN:
# await self.message.add_reaction('🍆')
async def replace_animated_emoji(self) -> list:
animated_emojis = []
if self.message.author.bot:
return animated_emojis
content = self.message.content
new_content = content
if ":" in content:
words = set(content.split(':'))
for word in words:
emoji = find_animated_emoji(word)
if emoji and emoji not in content and f':{word}:' in content: # only 1 word without ::
animated_emojis.append(emoji)
new_content = new_content.replace(f':{word}:', emoji)
self.message._handle_content(new_content)
return animated_emojis
def is_only_emojis(self, animated_emojis) -> bool:
content = self.message.content
for emoji in animated_emojis:
content = content.replace(emoji, '')
return not bool(content.strip())
async def send_vacation_message(self):
vacation_members = get_members_by_role(name="Отпуск")
for member in vacation_members.members:
if str(member.id) in self.message.content:
if is_traus(member):
bot_msg = await self.message.channel.send(f"Траус не бухает, Траус отдыхает!")
else:
bot_msg = await self.message.channel.send(f"{member.display_name} отдыхает!")
await bot_msg.add_reaction(random_emoji())
async def send_message(self, animated_emojis: list):
ctx = await bot.get_context(self.message)
if animated_emojis:
await ctx.message.delete()
if not (self.is_only_emojis(animated_emojis) and self.message.reference):
message = await quote_referenced_message(ctx)
await send_by_bot(ctx, message, self.message.content)
async def send_animated_reactions(self, animated_emojis):
if self.message.reference and self.is_only_emojis(animated_emojis):
await self.add_reactions(animated_emojis)
async def add_reactions(self, animated_emojis):
ctx = await bot.get_context(self.message)
message_id = ctx.message.reference.message_id
message = await ctx.fetch_message(message_id)
for emoji in animated_emojis:
await message.add_reaction(await ctx.guild.fetch_emoji(emoji.strip(">").split(':')[-1]))
@bot.event
async def on_message(message: discord.Message):
handler = MessageHandler(message)
check_for_beer(message.content)
animated_emojis = await handler.replace_animated_emoji()
await handler.if_todo()
await handler.swear_moderation()
await handler.on_mems_channel()
await handler.on_join_to_guild_channel()
await handler.on_join_to_aliance_channel()
# await handler.for_hellman()
await handler.send_vacation_message()
await handler.send_message(animated_emojis)
await handler.send_animated_reactions(animated_emojis)
await bot.process_commands(message)
@bot.event
async def on_raw_message_delete(payload: discord.RawMessageDeleteEvent):
message = payload.cached_message
if message is None:
return
content = message.content
files = [await attachment.to_file() for attachment in message.attachments]
author: discord.Member = message.author
channel: discord.TextChannel = message.channel
deleted: discord.TextChannel = get_channel(Channels.DELETED)
embed = create_embed(description=content,
fields=[
('автор', author.display_name),
('канал', channel.mention),
])
await deleted.send(embed=embed, files=files)
|
Traus/discord_bot
|
events/messages.py
|
messages.py
|
py
| 6,311 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35416705607
|
#-*- coding: utf-8 -*-
from __future__ import print_function
import pandas as pd
from apriori import *
inputfile = 'C:/Users/zhou/Desktop/rrecognized affected infrastructure entities in each news set.xls'
outputfile = 'C:/Users/zhou/Desktop/apriori_rules.xls'
data = pd.read_excel(inputfile, header = None)
print(u'\nstart')
ct = lambda x : pd.Series(1, index = x[pd.notnull(x)])
b = map(ct, data.as_matrix())
data = pd.DataFrame(list(b)).fillna(0)
print(u'\nfinish')
del b
support = 0.00
confidence = 0.00
ms = '---'
find_rule(data, support, confidence, ms).to_excel(outputfile)
|
0AnonymousSite0/Raw-Data-and-Processing-Details
|
For Phase 5 Association Rule Learning for IFI chains.py
|
For Phase 5 Association Rule Learning for IFI chains.py
|
py
| 605 |
python
|
en
|
code
| 2 |
github-code
|
6
|
32185442685
|
import logging
import flask
import time
import signal
import sys
import socket
from flask import Flask
from flask_api import status
from os import environ
from kubernetes import client, config
log = logging.getLogger()
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
app = Flask(__name__)
try:
config.load_incluster_config()
v1 = client.CoreV1Api()
except Exception as e:
log.exception("K8s config not loaded")
@app.route('/')
def hello():
return f"Hello World!: {flask.request.remote_addr}, handler: {socket.gethostname()}"
@app.route('/health')
def health():
log.info(f"health check from: {flask.request.remote_addr}")
return "OK"
@app.route('/broken')
def sick():
return "BAD", status.HTTP_404_NOT_FOUND
@app.route('/slow/<leng>')
def broken(leng):
time.sleep(int(leng))
return "slow", status.HTTP_200_OK
@app.route('/pods')
def k8sapi():
# with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", 'r') as f:
# ns = f.readline()
# log.info(f"Reading PODs in ns: {ns}")
return [i for i in v1.list_pod_for_all_namespaces(watch=False).items]
def sigterm_handler(signum, frame):
log.info("SIGTERM received, stopping")
sys.exit()
if __name__ == '__main__':
if environ.get('FLASK_SETTINGS') is not None:
log.info("Found flask config")
app.config.from_envvar('FLASK_SETTINGS')
log.info(f"config: {app.config}")
signal.signal(signal.SIGTERM, sigterm_handler)
app.run(host="0.0.0.0")
|
kiemlicz/util
|
dockerfiles/experiments/web.py
|
web.py
|
py
| 1,526 |
python
|
en
|
code
| 20 |
github-code
|
6
|
1523318864
|
"""
Given an array arr[] of n integers,
construct a Product Array prod[] (of same size) such
that prod[i] is equal to the product of all the elements of arr[] except arr[i].
Solve it without division operator in O(n) time.
values = [1,2,3,4,5]
output = [120, 60, 40, 30, 24]
asked in cimpress coding round(3)
part of dynamic programming
"""
def product_on_index(values):
forward_product = [1]* len(values)
backward_product = [1]* len(values)
for i, value in enumerate(values):
if i >0:
forward_product[i] = forward_product[i-1] * values[i-1]
values = list(reversed(values))
for i, value in enumerate(values):
if i >0:
backward_product[i] = backward_product[i-1] *values[i-1]
backward_product = list(reversed(backward_product))
return [forward_product[i] * backward_product[i] for i in range(len(forward_product))]
if __name__ == '__main__':
values = [1,2,3,4 ,5]
output = product_on_index(values)
temp = 0
|
kartikwar/programming_practice
|
dynamic_programming/product_at_index_i.py
|
product_at_index_i.py
|
py
| 1,015 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32143586237
|
import utils
import requests
import json, sys
from datetime import date, datetime, timedelta
space_key = "SVMC"
# parentTitle = "Project Report - Automatic"
# weeklyPageTitle = "Weekly Project Status Report"
# monthlyPageTitle = "CP Monthly Report"
dailyPageTitle = "Issue Status Tool"
pageUrgentPrjTitle = "Issue Tool Project List"
user = utils.open_file(".plm")[0]
pw = utils.open_file(".plm")[2]
def submitToWiki(page_title, page_content):
response = getPageContent(page_title, space_key)
if response.json()['size'] > 0:
print('update page %s' % page_title)
page_id = response.json()['results'][0]['id']
current_version = response.json()['results'][0]['version']['number']
data = {
'id': str(page_id),
'type': 'page',
'title': page_title,
'space': {'key': space_key},
'version': {'number': current_version + 1},
'body': {
'storage':
{
'value': str(page_content),
'representation': 'storage',
}
}
}
data_to_send = json.dumps(data).encode("utf-8")
response = requests.put('http://mobilerndhub.sec.samsung.net/wiki/rest/api/content/%s' % page_id,
headers={'Content-Type': 'application/json'}, data=data_to_send, auth=(user, pw))
if response.status_code == requests.codes['ok']:
print("View page at %s" % response.url)
else:
print('add page %s' % page_title)
response = requests.get('http://mobilerndhub.sec.samsung.net/wiki/rest/api/content?spaceKey=%s&title=%s' %
(space_key, parentTitle), auth=(user, pw))
parent_id = response.json()['results'][0]['id']
data = {
'type': 'page',
'title': page_title,
"ancestors": [{"id": parent_id}],
'space': {'key': space_key},
'body': {
'storage':
{
'value': str(page_content),
'representation': 'storage',
}
}
}
data_to_send = json.dumps(data).encode("utf-8")
response = requests.post('http://mobilerndhub.sec.samsung.net/wiki/rest/api/content/',
headers={'Content-Type': 'application/json'}, data=data_to_send, auth=(user, pw))
if response.status_code == requests.codes['ok']:
print("View page at %s" % response.url)
def getPageContent(pageTitle, space_key):
response = requests.get('http://mobilerndhub.sec.samsung.net/wiki/rest/api/content?spaceKey=%s&title=%s&'
'expand=space,body.view,version,container' % (space_key, pageTitle), auth=(user, pw))
if not response.status_code == requests.codes['ok']:
print("Cannot get content of page: " + pageTitle)
sys.exit(1)
return response
def getListSingleID(data):
"""
:param data: table data
:return: list mysingle to chart group
"""
list_id = []
index = data[0].index('Owner')
for i in data:
list_id.append(i[index])
del (list_id[0])
return list_id
def makeLinkChat(mySingleId):
"""Returns <a> tag with href from single ID"""
info_link = "mysingleim://%s"
return r"<a target='_blank' href='%s'>%s</a>" % (info_link % mySingleId, mySingleId)
def makeLinkNameChat(mySingleId, name_member):
"""Returns <a> tag with href from single ID"""
info_link = "mysingleim://%s"
return r"<a target='_blank' href='%s'>%s</a>" % (info_link % mySingleId, name_member)
def makeLinkChatGroup(listID):
"""Returns <a> tag with href from single ID"""
strListID = ""
for i in range(0, len(listID)):
strListID += str(listID[i]) + ';'
info_link = "mysingleim://%s"
return r"<a target='_blank' style='font-size: 12px; font-style: normal;' target='_blank' href='%s'>%s</a>" % (
info_link % strListID, "<br />Chat")
def makeLinkPLM(PLMCaseCode):
"""Returns <a> tag with href from mysingleID"""
return "<a target='_blank' href='http://splm.sec.samsung.net/wl/tqm/defect/defectreg/getDefectCodeSearch.do?defectCode=%s'>%s</a>" % (
PLMCaseCode, PLMCaseCode)
def make_link_chat(single_id, text):
"""Returns <a> tag with href from single ID"""
info_link = "mysingleim://%s"
return r"<a target='_blank' href='%s'>%s</a>" % (info_link % single_id, text)
def make_link_jira(jira_key):
jira_link = r"http://mobilerndhub.sec.samsung.net/its/browse/%s"
return r"<a target='_blank' href='%s'>%s</a>" % (jira_link % jira_key, jira_key)
def make_link_jira_with_summary(jira_key, text):
jira_link = r"http://mobilerndhub.sec.samsung.net/its/browse/%s"
return r"<a target='_blank' href='%s'>%s</a>" % (jira_link % jira_key, text)
def make_img_jira(link):
return r"<img src='%s' class='icon'>" % link
def make_status_jira(text):
if text.lower() == 'new':
return r"<span class='aui-lozenge aui-lozenge-subtle aui-lozenge-complete'>%s</span>" % text
else:
return r"<span class='aui-lozenge aui-lozenge-subtle aui-lozenge-current'>%s</span>" % text
def create_isssue_owner(owner_list):
html = "<head> \n </head> \n <body> \n <div> \n <p>"
for i in owner_list:
key = get_user_key(i)
html += '<ac:link><ri:user ri:userkey="%s" /></ac:link>' % key
html += ", "
html += "</p> \n </div> \n </body>"
return html
def check_time_update():
response = getPageContent(dailyPageTitle, space_key)
page_key = response.json()['results'][0]['id']
response = requests.get("http://mobilerndhub.sec.samsung.net/wiki/rest/api/content/%s/history" % str(page_key),
auth=(user, pw))
time_update = response.json()['lastUpdated']['when'][:19] # %Y-%m-%dT%H:%M:%S
datetime_update = datetime.strptime(time_update, "%Y-%m-%dT%H:%M:%S") - timedelta(hours=2) # HQ earlier VN 2 hours
print("latest time update page: %s" % datetime_update.strftime("%H:%M %d-%m-%Y"))
return datetime_update
def get_updated_date(pageTitle):
response = getPageContent(pageTitle, space_key)
page_key = response.json()['results'][0]['id']
response = requests.get("http://mobilerndhub.sec.samsung.net/wiki/rest/api/content/%s/history" % str(page_key),
auth=(user, pw))
return response.json()['lastUpdated']['when'][:10] # YYYY-MM-DD
def get_user_key(user_name):
request_data = requests.get("http://mobilerndhub.sec.samsung.net/wiki/rest/api/user?username=%s" % user_name,
auth=(user, pw))
return request_data.json()['userKey']
def get_all_data_jira_task_list(project_key):
# Query data with in 3 month
jql_query = "project = %s and status not in (resolved, cancelled) and created > startOfMonth(-2) order by " \
"created desc" % project_key
max_result = 1000
params = {
"jql": jql_query,
"startAt": 0,
"maxResults": max_result,
"fields": [
"key",
"summary",
"issuetype",
"created",
"duedate",
"resolutiondate",
"assignee",
"priority",
"status"
]
}
url_query = 'http://mobilerndhub.sec.samsung.net/its/rest/api/2/search'
data_task_list_json = requests.get(url_query, params=params, auth=(user, pw))
list_all_task = json.loads(data_task_list_json.text)
return list_all_task['issues']
def convert_date_time(date_time):
date_time = datetime.strptime(date_time, "%Y-%m-%d").date()
return date_time
def get_data_jira_task_list_by_team(all_data_jira_task_list, member_id_list):
num_of_jira_task_by_team = {}
info_detail_jira_task = []
data_jira_task_for_pie_chart = [["", 'Jira Tasks'], ['Done', 0], ['NEW', 0], ["In Progress", 0]]
list_all_member = []
for team, member_of_team in member_id_list.items():
num_of_jira_task_by_team[team] = [0, 0] # [open, in progress]
list_all_member += member_of_team
number_of_jira_task_by_member = {key: 0 for key in list_all_member}
for task_info in all_data_jira_task_list:
summary = task_info['fields']['summary']
if not summary.startswith('[Automatic]'):
due_date = task_info['fields']['duedate']
created = task_info['fields']['created'][:10]
resolve_date = task_info['fields']['resolutiondate']
if resolve_date is None:
resolve_date = ''
else:
resolve_date = convert_date_time(resolve_date[:10])
if due_date is None:
due_date = ''
# else:
# due_date = convert_date_time(due_date)
single_id = task_info['fields']['assignee']['key']
team = ""
status_jira = task_info['fields']['status']['name'].lower()
if status_jira == 'in progress':
data_jira_task_for_pie_chart[3][1] += 1
elif status_jira == 'new':
data_jira_task_for_pie_chart[2][1] += 1
else:
data_jira_task_for_pie_chart[1][1] += 1
if status_jira == 'done' and resolve_date == date.today():
# include jira task resolve to day
number_of_jira_task_by_member[single_id] += 1
if status_jira == 'in progress' or status_jira == 'new':
try:
number_of_jira_task_by_member[single_id] += 1
except KeyError:
number_of_jira_task_by_member[single_id] = 1
for key, value in member_id_list.items():
if single_id in value:
team = key
if status_jira == 'in progress':
num_of_jira_task_by_team[key][1] = num_of_jira_task_by_team[key][1] + 1
elif status_jira == 'new':
num_of_jira_task_by_team[key][0] = num_of_jira_task_by_team[key][0] + 1
break
info = [
make_link_jira(task_info['key']),
summary,
make_img_jira(task_info['fields']['issuetype']['iconUrl']),
created,
due_date,
make_link_chat(single_id, task_info['fields']['assignee']['displayName']),
team,
make_img_jira(task_info['fields']['priority']['iconUrl']),
make_status_jira(task_info['fields']['status']['name'])
]
info_detail_jira_task.append(info)
data_chart_pie_jira = 'var dataChartPieJira = ' + str(data_jira_task_for_pie_chart) + '; \n'
return num_of_jira_task_by_team, info_detail_jira_task, number_of_jira_task_by_member, data_chart_pie_jira
|
hoangdt9/hoang
|
WikiSubmit.py
|
WikiSubmit.py
|
py
| 11,061 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.