seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
32869975011
|
from fastapi import APIRouter
from api.schemes import relations, responses
from database import redis
def add_relation(rel: relations.Relation, rel_name: str) -> responses.RelationOperations:
if redis.add_relation(rel_name, rel.user_id, rel.item_id):
return responses.RelationOperations(status="successful")
return responses.RelationOperations(status="unsuccessful", action="relate")
def rem_relation(rel: relations.Relation, rel_name: str) -> responses.RelationOperations:
if redis.rem_relation(rel_name, rel.user_id, rel.item_id):
return responses.RelationOperations(status="successful")
return responses.RelationOperations(status="unsuccessful", action="unrelate")
u2u_router = APIRouter(
prefix="/u2u",
tags=["User2User API"]
)
@u2u_router.post("")
def add_user(u2u: relations.User2User) -> responses.RelationOperations:
add_relation(rel=u2u, rel_name="u2u")
@u2u_router.delete("")
def rem_user(u2u: relations.User2User) -> responses.RelationOperations:
rem_relation(rel=u2u, rel_name="u2u")
u2p_router = APIRouter(
prefix="/u2p",
tags=["User2Post API"]
)
@u2p_router.post("")
def add_post(u2p: relations.User2Post) -> responses.RelationOperations:
add_relation(rel=u2p, rel_name="u2p")
@u2p_router.delete("")
def rem_post(u2p: relations.User2Post) -> responses.RelationOperations:
rem_relation(rel=u2p, rel_name="u2u")
u2c_router = APIRouter(
prefix="/u2c",
tags=["User2Comm API"]
)
@u2c_router.post("")
def add_comm(u2c: relations.User2Comm) -> responses.RelationOperations:
add_relation(rel=u2c, rel_name="u2c")
@u2c_router.delete("")
def rem_comm(u2c: relations.User2Comm) -> responses.RelationOperations:
rem_relation(rel=u2c, rel_name="u2c")
|
Muti-Kara/sylvest_recommender
|
api/routers/relations.py
|
relations.py
|
py
| 1,755 |
python
|
en
|
code
| 2 |
github-code
|
6
|
38043006452
|
import cloudinary.uploader
import requests
# define your S3 bucket name here.
S3_BUCKET_NAME = "akshayranganath"
def get_file_name(url, transformation):
# transformation will be of the format "t_text_removed/jpg".
# remove the "/jpg" part and the "t_" part
transformation = transformation.rsplit('/',1)[0].split('t_',1)[1]
# from the URL, extract the file name. This will be of the format: 1000000010144_7GuardiansoftheTomb_portrait3x4.jpg
# For this file name, insert the transformation from above as the last component in the file name
file_name = url.rsplit('/',1)[1].replace('.jpg','')
# if file name as the format s3_akshayranganath_, remove the prepended file name part
# by default, Cloudinary will create the file name like s3_akshayranganath_1000000010144_7GuardiansoftheTomb_portrait3x4
file_name = file_name.replace(f"s3_{S3_BUCKET_NAME}_","")
file_name = file_name + '_' + transformation + '.jpg'
print(file_name)
return file_name
def download_and_save(url, file_name):
# download the image and save it with the desired file name
resp = requests.get(url)
with open(file_name, 'wb') as w:
w.write(resp.content)
def delete_image(public_id):
# delete the image since transformation is now complete
resp = cloudinary.uploader.destroy(
public_id,
type='upload',
resource_type='image'
)
def main():
try:
# upload the file. Create the necessary AI based deriviates inline.
# no need to wait for any webhook notifications.
print("Uploading and transforming image ..")
resp = cloudinary.uploader.upload(
f's3://{S3_BUCKET_NAME}/1000000010144_7GuardiansoftheTomb_portrait3x4.jpeg',
upload_preset='ai_preset'
)
print("Done.")
# response will contain the URLs for the transformations.
# extract these URLs and download the images
for transform in resp['eager']:
tx = transform['transformation']
url = transform['secure_url']
file_name = get_file_name(url, tx)
download_and_save(url, file_name)
print("Transformations downloaded successfully")
# optional - delete the file once the transformations are download
delete_image(resp['public_id'])
print(f"Image {resp['public_id']} deleted successfully.")
except Exception as e:
print(e)
if __name__=="__main__":
main()
|
akshay-ranganath/create-and-upload
|
demo_upload_and_download.py
|
demo_upload_and_download.py
|
py
| 2,558 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33488962773
|
'''
Given an array A, we can perform a pancake flip: We choose some positive integer k <= A.length, then reverse the order of the first k elements of A. We want to perform zero or more pancake flips (doing them one after another in succession) to sort the array A.
Return the k-values corresponding to a sequence of pancake flips that sort A. Any valid answer that sorts the array within 10 * A.length flips will be judged as correct.
Example 1:
Input: [3,2,4,1]
Output: [4,2,4,3]
Explanation:
We perform 4 pancake flips, with k values 4, 2, 4, and 3.
Starting state: A = [3, 2, 4, 1]
After 1st flip (k=4): A = [1, 4, 2, 3]
After 2nd flip (k=2): A = [4, 1, 2, 3]
After 3rd flip (k=4): A = [3, 2, 1, 4]
After 4th flip (k=3): A = [1, 2, 3, 4], which is sorted.
Example 2:
Input: [1,2,3]
Output: []
Explanation: The input is already sorted, so there is no need to flip anything.
Note that other answers, such as [3, 3], would also be accepted.
Note:
1 <= A.length <= 100
A[i] is a permutation of [1, 2, ..., A.length]
'''
class Solution(object):
def pancakeSort(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
A_origin = A
global_rec = []
def find_maxInd(alist):
max_ind, max_val = 0, alist[0]
for i in range(1, len(alist)):
if alist[i] > max_val:
max_ind, max_val = i, alist[i]
return max_ind
def sort_first_k(A, k):
if k == 0:
return
if A == sorted(A):
return
max_ind = find_maxInd(A[:k])
A = A[:max_ind + 1][::-1] + A[max_ind + 1:k] + A[k:]
global_rec.append(max_ind + 1)
A = A[:k][::-1] + A[k:]
global_rec.append(k)
sort_first_k(A, k - 1)
sort_first_k(A, len(A))
A = A_origin
return global_rec
|
sxu11/Algorithm_Design
|
Contests/C118_PancakeSorting.py
|
C118_PancakeSorting.py
|
py
| 1,915 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30517336024
|
from .errors import *
class Asset:
"""Generic class representing a file asset URL."""
def __init__(self, client, url, filename):
self.client = client
self._url = url
self.filename = filename
self._response = None
def __str__(self):
return f"{self.__class__.__name__}({self.filename})"
async def read(self, url=None):
"""Downloads the file associated with this Asset.
Parameters
----------
url: :class:`str`
The URL to download the asset from, for subclasses with multiple options.
Returns
-------
:class:`bytes`
The file, downloaded from the URL.
"""
if not url:
url = self._url
async with self.client._session.get(url) as response:
if response.status != 200:
raise APIException(response.status, response.reason)
image = await response.read()
return image
async def save(self, path=None, url=None):
"""Downloads the file associated with this Asset and saves to the requested path.
Parameters
----------
url: :class:`str`
The URL to download the asset from, for subclasses with multiple options.
path:
The file path at which to save the file.
If ``None``, saves the image to the working directory using the filename from the asset url.
"""
if not url:
url = self._url
path = path if path else f"./{url.split('/')[-1]}"
async with self.client._session.get(url) as response:
if response.status != 200:
raise APIException(response.status, response.reason)
with open(path, "wb") as f:
bytes_written = 0
while True:
chunk = await response.content.read(1000)
bytes_written += len(chunk)
if not chunk:
break
f.write(chunk)
return bytes_written
async def read_chunk(self, chunk_size: int, url=None):
if not url:
url = self._url
if not self._response:
response = await self.client._session.get(url)
if response.status != 200:
await response.close()
raise APIException(response.status, response.reason)
self._response = response
chunk = await self._response.content.read(chunk_size)
if not chunk:
await self._response.close()
self._response = None
return chunk
|
nwunderly/aionasa
|
aionasa/asset.py
|
asset.py
|
py
| 2,644 |
python
|
en
|
code
| 7 |
github-code
|
6
|
36021767355
|
import numpy as np
import sys
#file_name = "space_pulse_illumination_0.000000V.dat";
file_name = sys.argv[-1]
fname = open(file_name);
column_number = 5;
tmp_title = [];
tmp_value = [];
tmp_data = 0;
tmp_time = 0;
tmp_data_number = 0;
J = [];
time = [];
value = [];
value_space_pos = [];
i = 0;
position_number = 1;
for rows in fname:
parts = rows.split('\t') # split line into parts
if len(parts) > 1: # if at least 2 parts/columns
try:
tmp_data = float(parts[column_number]);
except:
pass;
else:
tmp_value.append(tmp_data);
if len(parts) <= 1:
tmp_title.append(parts);
if len(rows.strip()) == 0 :
tmp_data_number += 1;
# TIME
time_string = str(tmp_title[0]).split(' ');
tmp_time = float(time_string[1]);
time.append(tmp_time);
#CURRENT
current_string = str(tmp_title[2]).split(' ');
J.append(float(current_string[3]));
#VALUE
value.append(tmp_value);
value_space_pos.append(value[i][position_number]);
tmp_title = [];
tmp_value = [];
i += 1;
fname.close();
#SAVE RESULTS
save_results = open('test.txt', 'w')
for i in range(len(time)):
save_results.write(str(time[i]) + "\t" + str(value_space_pos[i]) + "\t" + str(J[i]) + "\n");
save_results.close();
|
dglowienka/drift-diffusion_mini-modules
|
Dynamic/time_J.py
|
time_J.py
|
py
| 1,400 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21211345581
|
import sys
import heapq
input = sys.stdin.readline
INF = float('inf')
V, E = map(int, input().split())
graph = [[] for _ in range(V + 1)]
for _ in range(E):
a, b, w = map(int, input().split())
graph[a].append((w, b))
graph[b].append((w, a))
v1, v2 = map(int, input().split())
def dijakstra(start):
min_dist = [INF] * (V + 1)
min_dist[start] = 0
queue = [(0, start)]
while queue:
current_dist, current = heapq.heappop(queue)
for distance_to_adjacent, adjacent in graph[current]:
new_dist = current_dist + distance_to_adjacent
if new_dist < min_dist[adjacent]:
min_dist[adjacent] = new_dist
heapq.heappush(queue, (new_dist, adjacent))
return min_dist
one = dijakstra(1)
v_one = dijakstra(v1)
v_two = dijakstra(v2)
path1 = one[v1] + v_one[v2] + v_two[V]
path2 = one[v2] + v_two[v1] + v_one[V]
answer = min(path1, path2)
if answer == INF:
print(-1)
else:
print(answer)
|
kimkimj/Algorithm
|
python/Dijkstra/specificRoute.py
|
specificRoute.py
|
py
| 984 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25097354504
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 21 13:56:29 2022
@author: maria
"""
import numpy as np
import pandas as pd
from numpy import zeros, newaxis
import matplotlib.pyplot as plt
import scipy as sp
from scipy.signal import butter,filtfilt,medfilt
import csv
import re
import functions2022_07_15 as fun
#getting the signal, for now using the raw F
animal= 'Hedes'
date= '2022-07-19'
#note: if experiment type not known, put 'suite2p' instead
experiment = '1'
#the file number of the NiDaq file, not alway experiment-1 because there might have been an issue with a previous acquisition etc
file_number = '0'
log_number = '0'
plane_number = '1'
#IMPORTANT: SPECIFY THE FRAME RATE
frame_rate = 15
#the total amount of seconds to plot
seconds = 5
#specify the cell for single cell plotting
res = ''
filePathF ='D://Suite2Pprocessedfiles//'+animal+ '//'+date+ '//'+res+'suite2p//plane'+plane_number+'//F.npy'
filePathops = 'D://Suite2Pprocessedfiles//'+animal+ '//'+date+ '//'+res+'suite2p//plane'+plane_number+'//ops.npy'
filePathmeta = 'Z://RawData//'+animal+ '//'+date+ '//'+experiment+'//NiDaqInput'+file_number+'.bin'
filePathlog = 'Z://RawData//'+animal+ '//'+date+ '//'+experiment+'//Log'+log_number+'.csv'
filePathArduino = 'Z://RawData//'+animal+ '//'+date+ '//'+experiment+'//ArduinoInput'+file_number+'.csv'
signal= np.load(filePathF, allow_pickle=True)
filePathiscell = 'D://Suite2Pprocessedfiles//'+animal+ '//'+date+ '//'+res+'suite2p//plane'+plane_number+'//iscell.npy'
iscell = np.load(filePathiscell, allow_pickle=True)
#loading ops file to get length of first experiment
ops = np.load(filePathops, allow_pickle=True)
ops = ops.item()
#loading ops file to get length of first experiment
ops = np.load(filePathops, allow_pickle=True)
ops = ops.item()
#printing data path to know which data was analysed
key_list = list(ops.values())
print(key_list[88])
print("frames per folder:",ops["frames_per_folder"])
exp= np.array(ops["frames_per_folder"])
#getting the first experiment, this is the length of the experiment in frames
exp1 = int(exp[0])
#getting second experiment
exp2 = int(exp[1])
#getting experiment 3
if exp.shape[0] == 3:
exp3 = int(exp[2])
"""
Step 1: getting the cell traces I need, here the traces for the first experiment
"""
#getting the F trace of cells (and not ROIs not classified as cells) using a function I wrote
signal_cells = fun.getcells(filePathF= filePathF, filePathiscell= filePathiscell).T
#%%
#
#getting the fluorescence for the first experiment
first_exp_F = signal_cells[:, 0:exp1]
# to practice will work with one cell for now from one experiment
cell = 33
F_onecell = signal[cell, 0:exp1]
# fig,ax = plt.subplots()
# plt.plot(F_onecell)
"""
Step 2: getting the times of the stimuli
"""
#getting metadata info, remember to choose the right number of channels!! for most recent data it's 5 (for data in March but after thr 16th it's 4 and 7 before that)
meta = fun.GetMetadataChannels(filePathmeta, numChannels=5)
#getting the photodiode info, usually the first column in the meta array
photodiode = meta[:,0]
#using the function from above to put the times of the photodiode changes (in milliseconds!)
photodiode_change = fun.DetectPhotodiodeChanges(photodiode,plot= True,lowPass=30,kernel = 101,fs=1000, waitTime=10000)
#the above is indiscriminate photodiode change, when it's on even numbers that is the stim onset
stim_on = photodiode_change[1::2]
# fig,ax = plt.subplots()
# ax.plot(stim_on)
"""
Step 3: actually aligning the stimuli with the traces (using Liad's function)
"""
tmeta= meta.T
frame_clock = tmeta[1]
frame_times = fun.AssignFrameTime(frame_clock, plot = False)
# frame_times1 = frame_times[1:]
frame_on = frame_times[::2]
frames_plane1 = frame_on[1::4]
frames_plane2 = frame_on[2::4]
#window: specify the range of the window
window= np.array([-1000, 4000]).reshape(1,-1)
aligned_all = fun.AlignStim(signal= signal_cells, time= frames_plane1, eventTimes= stim_on, window= window,timeLimit=1000)
#aligned: thetraces for all the stimuli for all the cells
aligned = aligned_all[0]
#the actual time, usually 1 second before and 4 seconds after stim onset in miliseconds
time = aligned_all[1]
#%%
"""
Step 4: getting the identity of the stimuli
"""
#need to get the log info file extraction to work
#getting stimulus identity
Log_list = fun.GetStimulusInfo (filePathlog, props = ["LightOn"])
#converting the list of dictionaries into an array and adding the time of the stimulus
#worked easiest by using pandas dataframe
log = np.array(pd.DataFrame(Log_list).values).astype(np.float64)
#log[0] is the degrees, log[1] would be spatial freq etc (depending on the order in the log list)
#no of stimuli specifes the total amount of stim shown
nr_stimuli = aligned.shape[1]
#%%
#getting one neuron for testing and plotting of a random stimulus:
neuron = 3
one_neuron = aligned[:,:,neuron]
fig,ax = plt.subplots()
ax.plot(time,one_neuron[:,])
ax.axvline(x=0, c="red", linestyle="dashed", linewidth = 1)
|
mariacozan/Analysis_and_Processing
|
code_archive/2022-07-21-neuronal_classification.py
|
2022-07-21-neuronal_classification.py
|
py
| 5,001 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35574568262
|
# This module contains the set of functions that work with the IPCA inflation index.
# IPCA is the most used inflation index in Brazil. It is calculated and published by IBGE. It is published between the 8th and 11th of the following month.
# IPCA is published both as a monthly percentage rate and a index number. We update our database getting the monthly percentage rate from the BCB's API.
import os
import pandas as pd
import ibge
import ir_calc as ir
import br_workdays as wd
def load_ipca(db_path='D:\Investiments\Databases\Indexes\IPCA.csv'):
# Reads the IPCA database to dataframe
try:
ipca = pd.read_csv(db_path, delimiter=';', dtype={'Num_IPCA':float, 'IPCA':float, 'Accum':float}, index_col='TradeDate')
ipca.index = pd.to_datetime(ipca.index,format='%Y-%m-%d')
ipca.sort_index()
except OSError as err:
raise OSError(err)
except Exception as err:
raise Exception(err)
return(ipca)
def update_ipca_db(db_path='D:\Investiments\Databases\Indexes\IPCA.csv'):
# Updates the IPCA database with all the rates published after the last update
try:
ipca = load_ipca(db_path)
except Exception as exp:
raise Exception(exp)
# Get the IPCA values published since the last update
try:
novos_ipca = ibge.get_ipca_from_ibge(ipca.last_valid_index(),pd.to_datetime("today"))
except Exception as exp:
raise Exception(exp)
if novos_ipca.first_valid_index() == ipca.last_valid_index():
ipca.drop(ipca.last_valid_index(), inplace=True)
# Appends new rates to the dataframe
ipca = pd.concat([ipca, novos_ipca])
# Includes the month following the date of the last IPCA rate available, to calculate the last available Accum
new_date = ipca.last_valid_index() + pd.DateOffset(months=1)
new_row = pd.DataFrame(data={'Num_IPCA':[0.0],'IPCA':[0.0],'Accum':[1.00]},index=[new_date])
ipca = pd.concat([ipca, new_row])
ipca.sort_index()
# Calculates the cumulative return
ipca['Accum'] = (1 + ipca.IPCA.shift(1)).cumprod()
ipca.loc[ipca.first_valid_index()].Accum = 1.00
ipca['Accum'].round(decimals=8)
# Saves the last version of the IPCA database in a different file
new_path = db_path[:len(db_path)-4] + ipca.last_valid_index().strftime('%Y%m%d') + '.csv'
if not os.path.exists(new_path):
os.rename(db_path,new_path)
# Saves the updated series to the csv file
ipca.to_csv(db_path, sep=';',header=['Num_IPCA','IPCA','Accum'], index_label='TradeDate')
def calc_first_accrual(start_date, end_date, reset_day, accrual_type):
# Calculates the parameters to be used in the accrual of the first rate -> may be necessary to calculate a pro-rata accrual
if start_date.day < reset_day:
month_m0 = pd.to_datetime((start_date - pd.DateOffset(months=1)).strftime('%Y%m01'))
month_m1 = pd.to_datetime(start_date.strftime('%Y%m01'))
first_date = pd.to_datetime(start_date.strftime('%Y%m')+"{0:0>2}".format(reset_day))
else:
month_m0 = pd.to_datetime(start_date.strftime('%Y%m01'))
month_m1 = pd.to_datetime((start_date + pd.DateOffset(months=1)).strftime('%Y%m01'))
first_date = pd.to_datetime(month_m1.strftime('%Y%m')+"{0:0>2}".format(reset_day))
if first_date > end_date:
first_date = end_date
if accrual_type == 'cd':
tot_days = month_m1 - month_m0
num_days = first_date - start_date
elif accrual_type == 'bd':
tot_days = wd.num_br_bdays(month_m0,month_m1)
num_days = wd.num_br_bdays(start_date, first_date)
else:
raise Exception('Accrual type must be cd (calendar days), or bd (business days)')
return {'month_m0': month_m0, 'month_m1': month_m1, 'first_date': first_date, 'tot_days': tot_days, 'num_days': num_days}
def calc_last_accrual(start_date, end_date, reset_day, accrual_type):
# Calculates the parameters to be used in the accrual of the last rate -> may be necessary to calculate a pro-rata accrual
if end_date.day <= reset_day:
month_m0 = pd.to_datetime((end_date - pd.DateOffset(months=1)).strftime('%Y%m01'))
month_m1 = pd.to_datetime(end_date.strftime('%Y%m01'))
last_date = pd.to_datetime((end_date - pd.DateOffset(months=1)).strftime('%Y%m')+"{0:0>2}".format(reset_day))
else:
month_m0 = pd.to_datetime(end_date.strftime('%Y%m01'))
month_m1 = pd.to_datetime((end_date + pd.DateOffset(months=1)).strftime('%Y%m01'))
last_date = pd.to_datetime(month_m0.strftime('%Y%m')+"{0:0>2}".format(reset_day))
if last_date < start_date:
last_date = start_date
if accrual_type == 'cd':
tot_days = month_m1 - month_m0
num_days = end_date - last_date
elif accrual_type == 'bd':
tot_days = wd.num_br_bdays(month_m0,month_m1)
num_days = wd.num_br_bdays(last_date, end_date)
else:
raise Exception('Accrual type must be cd (calendar days), or bd (business days)')
return {'month_m0': month_m0, 'month_m1': month_m1, 'last_date': last_date, 'tot_days': tot_days, 'num_days': num_days}
def ipca_accum (ipca, start_date, end_date, reset_day=0, accrual_type='cd'):
# Returns the cumulative return of the IPCA rate between start_date (inclusive) and end_date (exclusive)
# Source for the formula: https://www.b3.com.br/data/files/F6/26/EA/D2/F051F610AF4EF0F6AC094EA8/Caderno%20de%20Formulas%20-%20Debentures%20Cetip%2021.pdf
if (start_date < ipca.first_valid_index()) or (start_date > ipca.last_valid_index()) or (end_date < ipca.first_valid_index()) or (end_date > ipca.last_valid_index()):
raise Exception('Dates out of available range of IPCA dates')
if start_date > end_date:
raise Exception('Start Date must be older than End Date')
if accrual_type != 'cd' and accrual_type != 'bd':
raise Exception('Accrual type must be cd (calendar days), or bd (business days)')
if reset_day == 0:
reset_day = end_date.day
first_accrual = calc_first_accrual(start_date, end_date, reset_day, accrual_type)
last_accrual = calc_last_accrual(start_date, end_date, reset_day, accrual_type)
try:
# First accrual may be pro-rata
ipca_accum = (ipca.loc[first_accrual['month_m1']].Accum / ipca.loc[first_accrual['month_m0']].Accum) ** (first_accrual['num_days'] / first_accrual['tot_days'])
# Intermediate accruals are never pro-rata
ipca_accum = ipca_accum * (ipca.loc[last_accrual['month_m0']].Accum / ipca.loc[first_accrual['month_m1']].Accum)
# Last accrual can be pro-rata
ipca_accum = ipca_accum * (ipca.loc[last_accrual['month_m1']].Accum / ipca.loc[last_accrual['month_m0']].Accum) ** (last_accrual['num_days'] / last_accrual['tot_days'])
except Exception as exp:
raise Exception(exp)
return(ipca_accum)
|
ReiNog/CurryInv
|
ipca.py
|
ipca.py
|
py
| 6,923 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74182080829
|
#!/usr/bin/env python
from __future__ import print_function
import boto3
from botocore.exceptions import ClientError
import json
import argparse
import time
import random
import uuid
ALL_POLICY = '''{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt''' + str(random.randint(100000, 999999)) +'''",
"Effect": "Allow",
"Action": "*",
"Resource": "*"
}
]
}
'''
def main(args):
if args.user_name:
attach_policy(args.user_name, 'UserName')
put_policy(args.user_name, 'UserName')
elif args.role_name:
attach_policy(args.role_name, 'RoleName')
put_policy(args.role_name, 'RoleName')
elif args.group_name:
attach_policy(args.group_name, 'GroupName')
put_policy(args.group_name, 'GroupName')
else:
print('No user, role, or group specified. Quitting.')
def attach_policy(principal, principal_name):
result = False
client = boto3.client('iam')
attach_policy_funcs = {
'UserName': client.attach_user_policy,
'RoleName': client.attach_role_policy,
'GroupName': client.attach_group_policy
}
attach_policy_func = attach_policy_funcs[principal_name]
try:
response = attach_policy_func(**{
principal_name: principal,
'PolicyArn': 'arn:aws:iam::aws:policy/AdministratorAccess'
}
)
result = True
print('AdministratorAccess policy attached successfully to ' + principal)
except ClientError as e:
print(e.response['Error']['Message'])
return result
def put_policy(principal, principal_name):
result = False
client = boto3.client('iam')
put_policy_funcs = {
'UserName': client.put_user_policy,
'RoleName': client.put_role_policy,
'GroupName': client.put_group_policy
}
put_policy_func = put_policy_funcs[principal_name]
try:
response = put_policy_func(**{
principal_name: principal,
'PolicyName': str(uuid.uuid4()),
'PolicyDocument': ALL_POLICY
}
)
result = True
print('All action policy attached successfully to ' + principal)
except ClientError as e:
print(e.response['Error']['Message'])
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Attempts to add an admin and all actions policy to the given role, user, or group.")
parser.add_argument('-u',
'--user-name')
parser.add_argument('-r',
'--role-name')
parser.add_argument('-g',
'--group-name')
args = parser.parse_args()
main(args)
|
dagrz/aws_pwn
|
elevation/add_iam_policy.py
|
add_iam_policy.py
|
py
| 2,724 |
python
|
en
|
code
| 1,106 |
github-code
|
6
|
14698252975
|
from flask import Flask, request, jsonify
from SSAPI import app, api, db, guard
from flask_restplus import Resource, reqparse, inputs
import flask_praetorian
from SSAPI.models import *
@api.route('/Scrimmages')
class ScrimmageList(Resource):
@flask_praetorian.auth_required
def get(self):
""" Returns a list of Scrimmages """
current_id = flask_praetorian.current_user().id
current_user_roles = flask_praetorian.current_user().roles
# Filtering/sorting
parser = reqparse.RequestParser()
parser.add_argument('role', type=str) # role (advisor or presenter)
parser.add_argument('all', type=inputs.boolean) # all admin only
parser.add_argument('scrimmage_complete', type=inputs.boolean) # Completed?
args = parser.parse_args()
query = None
if args["all"]:
if "admin" in current_user_roles:
query = Scrimmage.query
else:
query = Scrimmage.query.filter(
(Scrimmage.advisors.any(User.id == current_id)) |
(Scrimmage.presenters.any(User.id == current_id)))
else:
query = Scrimmage.query.filter(
(Scrimmage.advisors.any(User.id == current_id)) |
(Scrimmage.presenters.any(User.id == current_id)))
if args["role"]:
if "advisor" in args["role"]:
query = query.filter(
Scrimmage.advisors.any(User.id == current_id))
if "presenter" in args["role"]:
query = query.filter(Scrimmage.presenters.any(
User.id == current_id))
if args["scrimmage_complete"] is not None:
query = query.filter(
Scrimmage.scrimmage_complete == args["scrimmage_complete"])
ret = []
result = query.all()
for i in result:
ret.append(i.as_dict())
resp = jsonify(ret)
return resp
@flask_praetorian.auth_required
def post(self):
""" Create a new Scrimmage """
parser = reqparse.RequestParser()
parser.add_argument('subject', required=True, type=str)
parser.add_argument('schedule', required=True, type=str)
parser.add_argument('scrimmage_type', required=True, type=str)
parser.add_argument('presenters', required=True, type=list, location="json")
parser.add_argument('max_advisors', type=int)
args = parser.parse_args()
if not args["max_advisors"]:
args["max_advisors"] = 5
new_scrimmage = Scrimmage(subject=args["subject"],
schedule=args["schedule"],
scrimmage_complete=False,
scrimmage_type=args["scrimmage_type"],
max_advisors=args["max_advisors"])
for i in args["presenters"]:
scrimmage_user = User.query.filter_by(id=i).first()
if "presenter" in scrimmage_user.roles:
new_scrimmage.presenters.append(scrimmage_user)
else:
resp = jsonify({"message": "Unable to locate or invalid user for presenter"})
resp.status_code = 400
return resp
db.session.add(new_scrimmage)
db.session.commit()
resp = jsonify(new_scrimmage.as_dict())
resp.status_code = 200
return resp
@api.route('/Scrimmages/<int:id>')
class Scrimmages(Resource):
@flask_praetorian.auth_required
def get(self, id):
""" Returns info about a Scrimmage """
scrimmage = Scrimmage.query.filter_by(id=id).first()
return jsonify(scrimmage.as_dict())
@flask_praetorian.auth_required
def post(self, id):
""" Updates a scrimmage """
scrimmage = Scrimmage.query.filter_by(id=id).first()
parser = reqparse.RequestParser()
parser.add_argument('subject', type=str)
parser.add_argument('schedule', type=str)
parser.add_argument('scrimmage_type', type=str)
parser.add_argument('presenters', type=list, location="json")
parser.add_argument('advisors', type=list, location="json")
parser.add_argument('max_advisors', type=int)
parser.add_argument('scrimmage_complete', type=inputs.boolean)
args = parser.parse_args()
# If I am an admin, OR one of the presenters, I can modify
user_id = flask_praetorian.current_user().id
user = User.query.filter_by(id=user_id).first()
if (user in scrimmage.presenters or
'admin' in flask_praetorian.current_user().roles):
update_dict = {}
for param in args.keys():
if args[param]:
new_presenters = []
new_advisors = []
if "presenters" in param:
for i in args[param]:
new_presenter = User.query.filter_by(id=i).first()
if new_presenter and 'presenter' in new_presenter.roles:
new_presenters.append(new_presenter)
else:
resp = jsonify({"message": "Unable to locate or invalid user for presenter"})
resp.status_code = 400
return resp
scrimmage.presenters = new_presenters
elif "advisors" in param:
for i in args[param]:
new_advisor = User.query.filter_by(id=i).first()
if new_advisor and 'advisor' in new_advisor.roles:
new_advisors.append(new_advisor)
else:
resp = jsonify({"message": "Unable to locate or invalid user for advisor"})
resp.status_code = 400
return resp
scrimmage.advisors = new_advisors
else:
update_dict[param] = args[param]
if update_dict:
Scrimmage.query.filter_by(id=id).update(update_dict)
db.session.commit()
else:
resp = jsonify({"message": "Unauthorized to update"})
resp.status_code = 401
return resp
resp = jsonify(scrimmage.as_dict())
resp.status_code = 200
return resp
@flask_praetorian.auth_required
def delete(self, id):
""" Delete a Scrimmage """
# If I am an admin, OR one of the presenters, I can delete
user_id = flask_praetorian.current_user().id
user = User.query.filter_by(id=user_id).first()
scrimmage = Scrimmage.query.filter_by(id=id).first()
if (user in scrimmage.presenters or
'admin' in flask_praetorian.current_user().roles):
Scrimmage.query.filter_by(id=id).delete()
db.session.commit()
return 'Scrimmage Deleted', 204
return 'UNAUTHORIZED', 401
|
ktelep/SSAPI
|
SSAPI/scrimmage_views.py
|
scrimmage_views.py
|
py
| 7,157 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71588771707
|
import pytest
from pytest import approx
from brownie import chain
from brownie.test import given, strategy
from decimal import Decimal
from .utils import RiskParameter, transform_snapshot
@pytest.fixture(autouse=True)
def isolation(fn_isolation):
pass
@given(
initial_fraction=strategy('decimal', min_value='0.001', max_value='0.500',
places=3),
peek_fraction=strategy('decimal', min_value='0.001', max_value='0.500',
places=3),
dt=strategy('uint256', min_value='10', max_value='600'))
def test_volume_bid(state, market, feed, initial_fraction,
peek_fraction, dt, ovl, bob):
# have bob initially build a short to init volume
cap_notional = market.params(RiskParameter.CAP_NOTIONAL.value)
input_collateral = initial_fraction * cap_notional
input_leverage = 1000000000000000000
input_is_long = False
input_price_limit = 0
# approve max for bob
ovl.approve(market, 2**256-1, {"from": bob})
# build position for bob
market.build(input_collateral, input_leverage, input_is_long,
input_price_limit, {"from": bob})
# mine the chain forward
chain.mine(timedelta=dt)
fraction = int(peek_fraction * Decimal(1e18))
snap = market.snapshotVolumeBid()
data = feed.latest()
(_, micro_window, _, _, _, _, _, _) = data
# calculate what the volume bid should be given snapshot value
timestamp = chain[-1]['timestamp']
window = micro_window
value = fraction
snap = transform_snapshot(snap, timestamp, window, value)
(_, _, accumulator) = snap
expect = int(accumulator)
actual = int(state.volumeBid(market, fraction))
assert expect == approx(actual)
@given(
initial_fraction=strategy('decimal', min_value='0.001', max_value='0.500',
places=3),
peek_fraction=strategy('decimal', min_value='0.001', max_value='0.500',
places=3),
dt=strategy('uint256', min_value='10', max_value='600'))
def test_volume_ask(state, market, feed, initial_fraction,
peek_fraction, dt, ovl, alice):
# have alice initially build a long to init volume
cap_notional = market.params(RiskParameter.CAP_NOTIONAL.value)
input_collateral = initial_fraction * cap_notional
input_leverage = 1000000000000000000
input_is_long = True
input_price_limit = 2**256 - 1
# approve max for alice
ovl.approve(market, 2**256-1, {"from": alice})
# build position for alice
market.build(input_collateral, input_leverage, input_is_long,
input_price_limit, {"from": alice})
# mine the chain forward
chain.mine(timedelta=dt)
fraction = int(peek_fraction * Decimal(1e18))
snap = market.snapshotVolumeAsk()
data = feed.latest()
(_, micro_window, _, _, _, _, _, _) = data
# calculate what the volume ask should be given snapshot value
timestamp = chain[-1]['timestamp']
window = micro_window
value = fraction
snap = transform_snapshot(snap, timestamp, window, value)
(_, _, accumulator) = snap
expect = int(accumulator)
actual = int(state.volumeAsk(market, fraction))
assert expect == approx(actual)
@given(
initial_fraction_alice=strategy('decimal', min_value='0.001',
max_value='0.500', places=3),
initial_fraction_bob=strategy('decimal', min_value='0.001',
max_value='0.500', places=3),
dt=strategy('uint256', min_value='10', max_value='600'))
def test_volumes(state, market, feed, ovl, alice, bob,
initial_fraction_alice, initial_fraction_bob,
dt):
# have alice and bob initially build a long and short to init volume
cap_notional = market.params(RiskParameter.CAP_NOTIONAL.value)
input_collateral_alice = initial_fraction_alice * cap_notional
input_leverage_alice = 1000000000000000000
input_is_long_alice = True
input_price_limit_alice = 2**256 - 1
input_collateral_bob = initial_fraction_bob * cap_notional
input_leverage_bob = 1000000000000000000
input_is_long_bob = False
input_price_limit_bob = 0
# approve max for alice and bob
ovl.approve(market, 2**256-1, {"from": alice})
ovl.approve(market, 2**256-1, {"from": bob})
# build positions for alice and bob
market.build(input_collateral_alice, input_leverage_alice,
input_is_long_alice, input_price_limit_alice, {"from": alice})
market.build(input_collateral_bob, input_leverage_bob, input_is_long_bob,
input_price_limit_bob, {"from": bob})
# mine the chain forward
chain.mine(timedelta=dt)
data = feed.latest()
(_, micro_window, _, _, _, _, _, _) = data
# calculate what the bid should be given snapshot value
snap_bid = market.snapshotVolumeBid()
timestamp_bid = chain[-1]['timestamp']
window_bid = micro_window
snap_bid = transform_snapshot(snap_bid, timestamp_bid, window_bid, 0)
(_, _, accumulator_bid) = snap_bid
# calculate what the ask should be given snapshot value
snap_ask = market.snapshotVolumeAsk()
timestamp_ask = chain[-1]['timestamp']
window_ask = micro_window
snap_ask = transform_snapshot(snap_ask, timestamp_ask, window_ask, 0)
(_, _, accumulator_ask) = snap_ask
expect_volume_bid = int(accumulator_bid)
expect_volume_ask = int(accumulator_ask)
(actual_volume_bid, actual_volume_ask) = state.volumes(market)
assert expect_volume_bid == approx(int(actual_volume_bid))
assert expect_volume_ask == approx(int(actual_volume_ask))
|
overlay-market/v1-periphery
|
tests/state/test_volume.py
|
test_volume.py
|
py
| 5,680 |
python
|
en
|
code
| 3 |
github-code
|
6
|
27673024131
|
import torch
from torch import nn
def init_weights_(m: nn.Module,
val: float = 3e-3):
if isinstance(m, nn.Linear):
m.weight.data.uniform_(-val, val)
m.bias.data.uniform_(-val, val)
class Actor(nn.Module):
def __init__(self,
state_dim: int,
action_dim: int,
max_action: float = None,
dropout: float = None,
hidden_dim: int = 256,
uniform_initialization: bool = False) -> None:
super().__init__()
if dropout is None:
dropout = 0
self.max_action = max_action
self.actor = nn.Sequential(
nn.Linear(state_dim, hidden_dim),
nn.Dropout(dropout),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.Dropout(dropout),
nn.ReLU(),
nn.Linear(hidden_dim, action_dim)
)
def forward(self, state: torch.Tensor) -> torch.Tensor:
action = self.actor(state)
if self.max_action is not None:
return self.max_action * torch.tanh(action)
return action
class Critic(nn.Module):
def __init__(self,
state_dim: int,
action_dim: int,
hidden_dim: int = 256,
uniform_initialization: bool = False) -> None:
super().__init__()
self.q1_ = nn.Sequential(
nn.Linear(state_dim + action_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
self.q2_ = nn.Sequential(
nn.Linear(state_dim + action_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self,
state: torch.Tensor,
action: torch.Tensor):
concat = torch.cat([state, action], 1)
return self.q1_(concat), self.q2_(concat)
def q1(self,
state: torch.Tensor,
action: torch.Tensor) -> torch.Tensor:
return self.q1_(torch.cat([state, action], 1))
|
zzmtsvv/rl_task
|
spot/modules.py
|
modules.py
|
py
| 2,231 |
python
|
en
|
code
| 8 |
github-code
|
6
|
40124065659
|
from pymongo.mongo_client import MongoClient
from pymongo.server_api import ServerApi
import certifi
from pprint import pprint
class database:
def __init__(self):
uri = "mongodb+srv://user:[email protected]/?retryWrites=true&w=majority"
# Create a new client and connect to the server
self.client = MongoClient(uri, tlsCAFile=certifi.where())
# Send a ping to confirm a successful connection
try:
self.client.admin.command('ping')
print("Pinged your deployment. You successfully connected to MongoDB!")
except Exception as e:
print(e)
self.db = self.client['Harvest-Hero']
|
SteveHuy/Harvest-Hero
|
Database+APIs/database.py
|
database.py
|
py
| 721 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43984207586
|
# gdpyt-analysis: test.test_fit_3dsphere
"""
Notes
"""
# imports
from os.path import join
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from correction import correct
from utils import fit, plotting, functions
# read dataframe
fp = '/Users/mackenzie/Desktop/gdpyt-characterization/experiments/02.07.22_membrane_characterization/analysis/tests/compare-interior-particles-per-test/' \
'df_id11.xlsx'
df = pd.read_excel(fp)
microns_per_pixel = 1.6
correctX = df.x.to_numpy()
correctY = df.y.to_numpy()
correctZ = df.z_corr.to_numpy()
raw_data = np.stack([correctX, correctY, correctZ]).T
xc = 498 * microns_per_pixel
yc = 253 * microns_per_pixel
zc = 3
r_edge = 500 * microns_per_pixel
# fit a sphere to 3D points
def fit_sphere(spX, spY, spZ):
# Assemble the A matrix
spX = np.array(spX)
spY = np.array(spY)
spZ = np.array(spZ)
A = np.zeros((len(spX), 4))
A[:, 0] = spX * 2
A[:, 1] = spY * 2
A[:, 2] = spZ * 2
A[:, 3] = 1
# Assemble the f matrix
f = np.zeros((len(spX), 1))
f[:, 0] = (spX * spX) + (spY * spY) + (spZ * spZ)
C, residules, rank, singval = np.linalg.lstsq(A, f)
# solve for the radius
t = (C[0] * C[0]) + (C[1] * C[1]) + (C[2] * C[2]) + C[3]
radius = math.sqrt(t)
return radius, C[0], C[1], C[2]
# fit a sphere to 3D points
def fit_spherexy(spX, spY, spZ, xc, yc):
# Assemble the A matrix
spX = np.array(spX)
spY = np.array(spY)
spZ = np.array(spZ)
A = np.zeros((len(spX), 2))
A[:, 0] = spZ * 2
A[:, 1] = 1
# Assemble the f matrix
f = np.zeros((len(spX), 1))
f[:, 0] = (spX * spX) + (spY * spY) + (spZ * spZ) - (2 * spX * xc) - (2 * spY * yc) # + xc ** 2 + yc ** 2
# least squares fit
C, residules, rank, singval = np.linalg.lstsq(A, f)
# solve for the radius
t = (xc**2) + (yc**2) + (C[0] * C[0]) + C[1]
radius = math.sqrt(t)
return radius, C[0]
def fit_ellipsoid_from_center(X, Y, Z, xc, yc, zc, r):
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
f = np.zeros((len(X), 1))
f[:, 0] = -1 * ((Z * Z) - (2 * zc * Z) + (zc * zc))
A = np.zeros((len(X), 1))
A[:, 0] = ((X * X) - (2 * xc * X) + (xc * xc) + (Y * Y) - (2 * yc * Y) + (yc * yc)) / (r * r) - 1
# least squares fit
C, residules, rank, singval = np.linalg.lstsq(A, f)
# solve for radius in z-dir.
r_z = math.sqrt(C[0])
return r_z
def calc_spherical_angle(r, xyz):
"""
Given a point (x, y, z) approx. on a sphere of radius (r), return the angle phi and theta of that point.
:param r:
:param xyz:
:return:
"""
x, y, z = xyz[0], xyz[1], xyz[2]
if np.abs(z) > r:
return np.nan, np.nan
else:
phi = np.arccos(z / r)
if x < 0 and y < 0:
theta_half = np.arccos(x / (r * np.sin(phi)))
theta_diff = np.pi - theta_half
theta = np.pi + theta_diff
else:
theta = np.arccos(x / (r * np.sin(phi)))
return phi, theta
# fit 3d ellipsoid
r_z = fit_ellipsoid_from_center(correctX, correctY, correctZ, xc, yc, zc, r_edge)
# general 3d sphere fit
rr, xx0, yy0, zz0 = fit_sphere(correctX, correctY, correctZ)
# custom 3d sphere fit
r, z0 = fit_spherexy(correctX, correctY, correctZ, xc, yc)
x0, y0 = xc, yc
phis = []
thetas = []
for i in range(raw_data.shape[0]):
x, y, z, = raw_data[i, 0], raw_data[i, 1], raw_data[i, 2]
dx = x - x0
dy = y - y0
dz = z - z0
if x < x0 * 0.5:
phi, theta = calc_spherical_angle(r, xyz=(dx, dy, dz))
if any([np.isnan(phi), np.isnan(theta)]):
continue
else:
# phis.append(phi)
thetas.append(theta)
if x < x0:
phi, theta = calc_spherical_angle(r, xyz=(dx, dy, dz))
if any([np.isnan(phi), np.isnan(theta)]):
continue
else:
phis.append(phi)
phis = np.array(phis)
thetas = np.array(thetas)
# ----------------------------------- PLOTTING ELLIPSOID
custom_ellipsoid = True
if custom_ellipsoid:
u = np.linspace(thetas.min(), thetas.max(), 20)
v = np.linspace(0, np.pi/2, 20)
u, v = np.meshgrid(u, v)
xe = r_edge * np.cos(u) * np.sin(v)
ye = r_edge * np.sin(u) * np.sin(v)
ze = r_z * np.cos(v)
xe = xe.flatten() + xc
ye = ye.flatten() + yc
ze = ze.flatten() + zc
# --- plot sphere
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(x, y, z, color="r")
#ax.plot_surface(xe, ye, ze, cmap='coolwarm', alpha=0.5)
ax.scatter(xe, ye, ze, zdir='z', s=20, c='r', rasterized=True)
ax.scatter(correctX, correctY, correctZ, zdir='z', s=2, c='b', rasterized=True, alpha=0.25)
ax.set_xlabel(r'$x \: (\mu m)$')
ax.set_ylabel(r'$y \: (\mu m)$')
ax.set_zlabel(r'$z \: (\mu m)$')
ax.view_init(15, 255)
plt.show()
raise ValueError('ah')
# ----------------------------------- PLOTTING SPHERES
gen_sphere, custom_sphere = True, True
# --- calculate points on sphere
if custom_sphere:
u, v = np.mgrid[thetas.min():thetas.max():20j, 0:phis.max():20j]
x=np.cos(u)*np.sin(v)*r
y=np.sin(u)*np.sin(v)*r
z=np.cos(v)*r
x = x + x0
y = y + y0
z = z + z0
# --- plot sphere
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(x, y, z, color="r")
ax.plot_surface(x, y, z, cmap='coolwarm', alpha=0.5)
ax.scatter(correctX, correctY, correctZ, zdir='z', s=20, c='b', rasterized=True)
ax.set_xlabel(r'$x \: (\mu m)$')
ax.set_ylabel(r'$y \: (\mu m)$')
ax.set_zlabel(r'$z \: (\mu m)$')
ax.view_init(15, 255)
plt.show()
# plot sphere viewed from above
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x, y, z, cmap='coolwarm', alpha=0.5)
ax.scatter(correctX, correctY, correctZ, zdir='z', s=20, c='b', rasterized=True)
ax.set_xlabel(r'$x \: (\mu m)$')
ax.set_ylabel(r'$y \: (\mu m)$')
ax.set_zlabel(r'$z \: (\mu m)$')
ax.view_init(90, 255)
plt.show()
if gen_sphere:
x2 = np.cos(u) * np.sin(v) * rr
y2 = np.sin(u) * np.sin(v) * rr
z2 = np.cos(v) * rr
x2 = x2 + xx0
y2 = y2 + yy0
z2 = z2 + zz0
# plot spheres
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x, y, z, cmap='coolwarm', alpha=0.5)
ax.plot_surface(x2, y2, z2, cmap='cool', alpha=0.5)
# ax.scatter(correctX, correctY, correctZ, zdir='z', s=20, c='b', rasterized=True)
ax.set_xlabel(r'$x \: (\mu m)$')
ax.set_ylabel(r'$y \: (\mu m)$')
zlabel = ax.set_zlabel(r'$z \: (\mu m)$')
ax.view_init(15, 255)
plt.show()
j = 1
|
sean-mackenzie/gdpyt-analysis
|
test/test_fit_3dsphere.py
|
test_fit_3dsphere.py
|
py
| 6,764 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29788980815
|
from collections import Counter
import numpy as np
import pandas as pd
import pickle
from sklearn import svm, model_selection, neighbors
from sklearn.ensemble import VotingClassifier, RandomForestClassifier
from sklearn.model_selection import cross_validate, train_test_split
# processing data for Machine Learning
# groups of companies are likely to move together, some are going to move first
# pricing data to % change - will be our features and labels will find target (buy,sell or hold)
# ask question to data based on the price changes - within 7 days did the price go up or not (buy if yes, sell if no)
# each model is going to be on per company basis
def process_data_for_labels(ticker):
# next 7 days if price goes up or down
hm_days = 7
df = pd.read_csv('sp500_joined_closes.csv', index_col = 0)
tickers = df.columns.values.tolist()
df.fillna(0, inplace = True)
for i in range(1, hm_days+1):
# price in 2 days from now - todays price / todays price * 100
df['{}_{}d'.format(ticker, i)] = (df[ticker].shift(-i) - df[ticker]) / df[ticker] # (shift (-i) to move future prices up in table)
df.fillna(0, inplace = True)
return tickers, df
# function to detect buy,sell or hold stocks
def buy_sell_hold(*args):
cols = [c for c in args]
requirement = 0.02
for col in cols:
if col > requirement: # buy
return 1
if col < -requirement: # sell
return -1
return 0 # hold
def extract_featuresets(ticker):
tickers, df = process_data_for_labels(ticker)
# creating maps of either buy, sell or hold for 7 days
df['{}_target'.format(ticker)] = list(map( buy_sell_hold,
df['{}_1d'.format(ticker)],
df['{}_2d'.format(ticker)],
df['{}_3d'.format(ticker)],
df['{}_4d'.format(ticker)],
df['{}_5d'.format(ticker)],
df['{}_6d'.format(ticker)],
df['{}_7d'.format(ticker)],
))
# values are assigned to a list
vals = df['{}_target'.format(ticker)].values.tolist()
str_vals = [str(i) for i in vals]
# Data spread to see the spreads in value and filling spreads in list
print ('Data spread: ', Counter(str_vals))
df.fillna(0, inplace=True)
# replaces any infinite increase since it may be an IPO to a NaN
df = df.replace([np.inf,-np.inf], np.nan)
# dropping NaN
df.dropna(inplace=True)
# values are normalised in % change from yesterday
df_vals = df[[ticker for ticker in tickers ]].pct_change()
df_vals = df_vals.replace([np.inf,-np.inf], 0)
df_vals.fillna(0, inplace=True)
# x feature sets, y are labels
X = df_vals.values
y = df['{}_target'.format(ticker)].values
return X,y, df
def do_ml(ticker):
# where x is our target values and y is the value from buy_sell_hold() either 0,1,-1
X, y, df = extract_featuresets(ticker)
# training x and y using train_test_split with test_size of 25%
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.25)
# creating a classifier
# clf = neighbors.KNeighborsClassifier()
clf = VotingClassifier([('lsvc',svm.LinearSVC()), ('knn', neighbors.KNeighborsClassifier()),
('rfor', RandomForestClassifier(n_estimators=100))])
# fit x and y train into classifier
clf.fit(X_train, y_train)
# to know confidence of the data
confidence = clf.score(X_test, y_test)
print('Accuracy: ', confidence)
# predictions predicts x_test(futuresets)
predictions = clf.predict(X_test)
print('Predicted spread:', Counter(predictions))
return confidence
do_ml('TWTR')
|
mihir13/python_for_finance
|
PythonForFinance9.py
|
PythonForFinance9.py
|
py
| 3,409 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9651796880
|
import utils
from utils import *
# Arguments available
def parse_args():
parser = argparse.ArgumentParser(description='Task1')
parser.add_argument('--image_path', type=str, default=None,
help='Path to an image on which to apply Task1 (absolute or relative path)')
parser.add_argument('--save_path', type=str, default=None,
help='Path where to save the output of the algorithm (absolute or relative path)')
parser.add_argument('--path_dir', type=str, default="./dataset/Task1/",
help='Path to the directory where we want to apply Task1 (absolute or relative path)')
parser.add_argument('--save_dir', type=str, default='./dataset/predictions/Task1/',
help='Path where to save the directory where we want to apply Task1 (absolute or relative path)')
parser.add_argument('--no_file', type=str, default=None,
help='Apply the algorithm on the image specified by this number, that is located on path_dir. The output is saved on save_dir location')
parser.add_argument('--verbose', type=str, default='0',
help='Print intermediate output from the algorithm. Choose 0/1')
args = parser.parse_args()
return args
# Computes the logic behind task1.
# - first apply get_map to remove the scoring table and non-relevant ice-surfaces.
# - it finds and filters multiple circles extracted using houghCircles algorithm.
# Finally, it saves the result in the specified file.
def task1(image_path, save_path=None, verbose=0):
image = cv2.imread(image_path)
image = get_map(image=image, verbose=verbose)
image_all_circles, image_filtered_circles, circles_dict = get_hough_circles(image=image, min_radius=10, max_radius=25, minDist=30, dp=1, param1=150, param2=15,verbose=verbose)
if verbose:
utils.show_images([image, image_all_circles, image_filtered_circles], nrows=2, ncols=2)
string_to_write_in_file = "\n".join([str(len(circles_dict ["all_circles"])), str(len(circles_dict ["red_circles"])), str(len(circles_dict ["yellow_circles"]))])
if save_path != None and save_path != "":
with open(save_path, "w+") as f:
f.write(string_to_write_in_file)
print("The output was saved at location: {}!".format(save_path))
print(string_to_write_in_file)
#image_path = save_path.replace(".txt", ".png")
#cv2.imwrite(image_path, image_filtered_circles)
return circles_dict
if __name__ == "__main__":
args = parse_args()
verbose = ord(args.verbose) - ord('0')
if args.image_path != None:
try:
task1(image_path=args.image_path,
save_path=args.save_path,
verbose=verbose)
except:
raise Exception("An exception occured during the execution of Task1!")
else:
os.makedirs(args.save_dir, exist_ok=True)
if args.no_file != None:
try:
image_path = os.path.join(args.path_dir, "{}.png".format(args.no_file))
save_path = os.path.join(args.save_dir, "{}_predicted.txt".format(args.no_file))
print("Processing the image located at: {}".format(image_path))
task1(image_path=image_path,
verbose=verbose,
save_path=save_path)
except:
raise Exception("An exception occured during the execution of Task1 for the image located at: {}!".format(image_path))
else:
for no_file in range(1, 26):
try:
image_path = os.path.join(args.path_dir, "{}.png".format(no_file))
save_path = os.path.join(args.save_dir, "{}_predicted.txt".format(no_file))
print("Processing the image located at: {}".format(image_path))
task1(image_path=image_path,
verbose=verbose,
save_path=save_path)
except:
raise Exception("An exception occured during the execution of Task1 for the image located at: {}!".format(image_path))
|
SebastianCojocariu/Curling-OpenCV
|
task_1.py
|
task_1.py
|
py
| 3,759 |
python
|
en
|
code
| 1 |
github-code
|
6
|
23088053555
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Code by: Magnus Øye, Dated: 12.11-2018
Contact: [email protected]
Website: https://github.com/magnusoy/Balancing-Platform
"""
# Importing packages
import numpy as np
from numpy import sqrt, sin, cos, pi, arccos
import matplotlib.pylab as plt
# Plot style
plt.style.use("bmh")
# Constants
L = 45 # Length of one side
Z0 = 8.0 # Start lifting height
A = 4.0 # Center offset
r = 9.0 # Radius
countsPerRev = 400000 # Motor counts per revolution
pitch = 0 # Movement in Y-axis
roll = 0 # Movement in X-axis
anglesPitch = np.linspace(-0.139626, 0.139626, num=50) # Array of linearly spaced angels from -8, 8 degrees
anglesRoll = np.linspace(-0.139626, 0.139626, num=50) # Array of linearly spaced angels from -8, 8 degrees
# Lists for holding simulation data
X = []
Y1 = []
Y2 = []
Y3 = []
# Simulating platform movements
for angle in anglesPitch:
deg = angle * 180 / pi
pitch = angle
roll = 0
# Motor lift height
z1 = ((sqrt(3) * L) / 6) * sin(pitch) * cos(roll) + ((L/2)*sin(roll)) + Z0
z2 = ((sqrt(3) * L) / 6) * sin(pitch) * cos(roll) - ((L/2)*sin(roll)) + Z0
z3 = -((sqrt(3) * L) / 3) * sin(pitch) * cos(roll) + Z0
# Motor angles in radians
angleM1 = arccos(((z1**2) + (A**2) - (r**2)) / (2.0 * A * z1))
angleM2 = arccos(((z2**2) + (A**2) - (r**2)) / (2.0 * A * z2))
angleM3 = arccos(((z3**2) + (A**2) - (r**2)) / (2.0 * A * z3))
# Motor angles in degrees
degreeM1 = (angleM1 * 180.0) / pi
degreeM2 = (angleM2 * 180.0) / pi
degreeM3 = (angleM3 * 180.0) / pi
# Motor position in counts
outM1 = angleM1 * (countsPerRev / 2 * pi)
outM2 = angleM2 * (countsPerRev / 2 * pi)
outM3 = angleM3 * (countsPerRev / 2 * pi)
# Adding values in array for visual representation
X.append(deg)
Y1.append(z1)
Y2.append(z2)
Y3.append(z3)
# Plotting values
fig, axes = plt.subplots(1, 3, constrained_layout=True)
fig.suptitle('Pitch +/- 8 grader | Roll +/- 0 grader', size=16)
ax_m1 = axes[0]
ax_m2 = axes[1]
ax_m3 = axes[2]
ax_m1.set_title('Motor 1 løftehøyde')
ax_m2.set_title('Motor 2 løftehøyde')
ax_m3.set_title('Motor 3 løftehøyde')
ax_m1.set_xlabel('Rotasjon [Grader]')
ax_m2.set_xlabel('Rotasjon [Grader]')
ax_m3.set_xlabel('Rotasjon [Grader]')
ax_m1.set_ylabel('Høyde [cm]')
ax_m2.set_ylabel('Høyde [cm]')
ax_m3.set_ylabel('Høyde [cm]')
ax_m1.set_xlim(-8, 8)
ax_m2.set_xlim(-8, 8)
ax_m3.set_xlim(-8, 8)
ax_m1.set_ylim(0, 15)
ax_m2.set_ylim(0, 15)
ax_m3.set_ylim(0, 15)
ax_m1.plot(X, Y1, label='M1')
ax_m2.plot(X, Y2, label='M2')
ax_m3.plot(X, Y3, label='M3')
ax_m1.legend()
ax_m2.legend()
ax_m3.legend()
# Showing values
plt.show()
|
magnusoy/Balancing-Platform
|
src/balancing_platform/util/graphs.py
|
graphs.py
|
py
| 2,702 |
python
|
en
|
code
| 7 |
github-code
|
6
|
15306305960
|
""" WENO Lax-Friedrichs
Author: Pierre-Yves Taunay
Date: November 2018
"""
import numpy as np
import matplotlib.pyplot as plt
###############
#### SETUP ####
###############
# Grid
npt = 200
L = 2
dz = L/npt
zvec = np.linspace(-L/2 + dz/2,L/2-dz/2,npt)
EPS = 1e-16
# Time
dt = dz / 1 * 0.4
tmax = 2000
tc = 0
# Scheme
# Flux can be 'LF', 'LW', 'FORCE' ,'FLIC'
order = 5
flux_type = 'FORCE'
# Data holders
#uvec = np.ones(len(zvec))
uvec = np.zeros(len(zvec))
def f_0(u):
# -0.8 < x < -0.6
b = (zvec>=-0.8) & (zvec<=-0.6)
z = zvec[b]
u[b] = np.exp(-np.log(2)*(z+0.7)**2/9e-4)
# -0.4 < x < -0.2
b = (zvec>=-0.4) & (zvec<=-0.2)
u[b] = 1
# 0 < x < 0.2
b = (zvec>=0) & (zvec<=0.2)
z = zvec[b]
u[b] = 1 - np.abs(10*z-1)
# 0.4 < x < 0.6
b = (zvec>=0.4) & (zvec<=0.6)
z = zvec[b]
u[b] = np.sqrt(1- 100*(z-0.5)**2)
# b = (zvec>=-0.5) & (zvec<=0.5)
# u[b] = 0
f_0(uvec)
u0 = uvec
#######################
#### TIME MARCHING ####
#######################
idx = 0
### WENO 3
# vi+1/2[0]^L : 1/2 v_i + 1/2 v_{i+1}
# vi+1/2[1]^L : -1/2 v_{i-1} + 3/2 v_i
# vi-1/2[0]^R : 3/2 v_i - 1/2 v_{i+1}
# vi-1/2[1]^R : 1/2 v_{i-1} + 1/2 v_i
def compute_weights(up1,u,um1):
if order == 3:
d0 = 2/3
d1 = 1/3
beta0 = (up1-u)**2
beta1 = (u-um1)**2
alpha0 = d0 / (EPS+beta0)**2
alpha1 = d1 / (EPS+beta1)**2
alphat0 = d1 / (EPS+beta0)**2
alphat1 = d0 / (EPS+beta1)**2
alphasum = alpha0+alpha1
alphatsum = alphat0 + alphat1
w0 = alpha0 / alphasum
w1 = alpha1 / alphasum
wt0 = alphat0 / alphatsum
wt1 = alphat1 / alphatsum
return w0,w1,wt0,wt1
elif order == 5:
up2 = np.roll(u,-2)
um2 = np.roll(u,2)
d0 = 3/10
d1 = 3/5
d2 = 1/10
beta0 = 13/12*(u-2*up1+up2)**2 + 1/4*(3*u-4*up1+up2)**2
beta1 = 13/12*(um1-2*u+up1)**2 + 1/4*(um1-up1)**2
beta2 = 13/12*(um2-2*um1+u)**2 + 1/4*(um2-4*um1+3*u)**2
alpha0 = d0/(EPS+beta0)**2
alpha1 = d1/(EPS+beta1)**2
alpha2 = d2/(EPS+beta2)**2
alphat0 = d2/(EPS+beta0)**2
alphat1 = d1/(EPS+beta1)**2
alphat2 = d0/(EPS+beta2)**2
alphasum = alpha0 + alpha1 + alpha2
alphatsum = alphat0 + alphat1 + alphat2
w0 = alpha0/alphasum
w1 = alpha1/alphasum
w2 = alpha2/alphasum
wt0 = alphat0/alphatsum
wt1 = alphat1/alphatsum
wt2 = alphat2/alphatsum
return w0,w1,w2,wt0,wt1,wt2
def compute_lr(up1,u,um1):
if order == 3:
u0p = 1/2*u + 1/2*up1
u1p = -1/2*um1 + 3/2*u
u0m = 3/2*u - 1/2*up1
u1m = 1/2*um1 + 1/2*u
w0,w1,wt0,wt1 = compute_weights(up1,u,um1)
uL = w0*u0p + w1*u1p
uR = wt0*u0m + wt1*u1m
elif order == 5:
up2 = np.roll(up1,-1)
um2 = np.roll(um1,1)
u0m = 11/6*u - 7/6*up1 + 1/3*up2
u1m = 1/3*um1 + 5/6*u - 1/6*up1
u2m = -1/6*um2 + 5/6*um1 + 1/3*u
u0p = 1/3*u + 5/6*up1 - 1/6*up2
u1p = -1/6*um1 + 5/6*u + 1/3*up1
u2p = 1/3*um2 -7/6*um1 + 11/6*u
w0,w1,w2,wt0,wt1,wt2 = compute_weights(up1,u,um1)
uL = w0*u0p + w1*u1p + w2*u2p
uR = wt0*u0m + wt1*u1m + wt2*u2m
return uL,uR
def flux(u):
return u
def compute_flux(u):
# u_{i+1}, u_{i-1}
up1 = np.roll(u,-1)
um1 = np.roll(u,1)
# Reconstruct the data on the stencil
uL, uR = compute_lr(up1,u,um1)
# Compute the RHS flux
up1h = np.roll(uR,-1) # This will contain u_{i+1/2}^R
um1h = np.roll(uL,1) # This will contain u_{i-1/2}^L
fpR = 0
fpL = 0
if flux_type == 'LF':
fpR = compute_flux_lf(uL,up1h)
fpL = compute_flux_lf(um1h,uR)
elif flux_type == 'LW':
fpR = compute_flux_lw(uL,up1h)
fpL = compute_flux_lw(um1h,uR)
elif flux_type == 'FORCE':
fpR = compute_flux_force(uL,up1h)
fpL = compute_flux_force(um1h,uR)
return -1/dz * (fpR-fpL)
def compute_flux_lf(uL,uR):
### Left, right
fL = flux(uL)
fR = flux(uR)
alpha = 1 # Derivative of flux
return 1/2*(fL+fR-alpha*(uR-uL))
def compute_flux_lw(uL,uR):
alpha = 1
u_lw = 1/2 * (uL+uR) - 1/2*alpha*(flux(uR)-flux(uL))
return flux(u_lw)
def compute_flux_force(uL,uR):
f_lf = compute_flux_lf(uL,uR)
f_lw = compute_flux_lw(uL,uR)
return 1/2*(f_lf + f_lw)
while tc<tmax:
u = uvec
u1 = u + dt * compute_flux(u)
u2 = 3/4*u + 1/4*u1 + 1/4* dt * compute_flux(u1)
unp1 = 1/3*u + 2/3*u2 + 2/3 * dt * compute_flux(u2)
uvec = unp1
tc = tc+dt
plt.plot(zvec,u0,'-')
plt.plot(zvec,uvec,'o')
print("L1:",np.sum(np.abs(u0-uvec)/len(u0)))
|
pytaunay/weno-tests
|
python/advection_1d/weno-advection.py
|
weno-advection.py
|
py
| 5,051 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71579186747
|
""" Optimizes GPST model hyperparameters via Optuna. """
import os
import time
import json
import shutil
import logging
import argparse
import tempfile
import datetime
import optuna
from train import train
from lumber import get_log
from arguments import get_args
def main() -> None:
""" Run an Optuna study. """
datestring = str(datetime.datetime.now())
datestring = datestring.replace(" ", "_")
log_path = get_log("snow")
logging.getLogger().setLevel(logging.INFO) # Setup the root logger.
logging.getLogger().addHandler(logging.FileHandler(log_path))
optuna.logging.enable_propagation() # Propagate logs to the root logger.
optuna.logging.disable_default_handler() # Stop showing logs in stderr.
study = optuna.create_study()
logging.getLogger().info("Start optimization.")
study.optimize(objective, n_trials=100)
def objective(trial: optuna.Trial) -> float:
"""
Optuna objective function. Should never be called explicitly.
Parameters
----------
trial : ``optuna.Trial``, required.
The trial with which we define our hyperparameter suggestions.
Returns
-------
loss : ``float``.
The output from the model call after the timeout value specified in ``snow.sh``.
"""
parser = argparse.ArgumentParser()
parser = get_args(parser)
args = parser.parse_args()
# Set arguments.
args.num_train_epochs = 10000
args.stationarize = False
args.normalize = False
args.seq_norm = False
args.seed = 42
args.max_grad_norm = 3
args.adam_epsilon = 7.400879524874149e-08
args.warmup_proportion = 0.0
args.sep = ","
batch_size = 64
n_positions = 30
agg_size = 1
# Commented-out trial suggestions should be placed at top of block.
# args.stationarize = trial.suggest_categorical("stationarize", [True, False])
# agg_size = trial.suggest_discrete_uniform("agg_size", 1, 40, 5)
# args.warmup_proportion = trial.suggest_uniform("warmup_proportion", 0.05, 0.4)
# batch_size = trial.suggest_discrete_uniform("train_batch_size", 4, 64, 4)
args.weight_decay = trial.suggest_loguniform("weight_decay", 0.0001, 0.01)
args.learning_rate = trial.suggest_loguniform("learning_rate", 8e-7, 5e-3)
args.train_batch_size = int(batch_size)
args.aggregation_size = int(agg_size)
logging.getLogger().info(str(args))
# Set config.
config = {}
config["initializer_range"] = 0.02
config["n_head"] = 8
config["n_embd"] = 256
config["n_layer"] = 6
config["input_dim"] = 300
config["orderbook_depth"] = 6
config["horizon"] = 30
config["modes"] = [
"bid_classification",
"bid_increase",
"bid_decrease",
"ask_classification",
"ask_increase",
"ask_decrease",
]
# Commented-out trial suggestions should be placed at top of block.
# config["n_head"] = int(trial.suggest_discrete_uniform("n_head", 4, 16, 4))
# config["n_embd"] = int(trial.suggest_discrete_uniform("n_embd", 64, 128, 8))
# config["n_layer"] = trial.suggest_int("n_layer", 4, 8)
n_positions = int(trial.suggest_discrete_uniform("n_ctx", 60, 600, 30))
config["layer_norm_epsilon"] = trial.suggest_loguniform("layer_eps", 1e-5, 1e-3)
config["resid_pdrop"] = trial.suggest_loguniform("resid_pdrop", 0.01, 0.15)
config["attn_pdrop"] = trial.suggest_loguniform("attn_pdrop", 0.1, 0.3)
config["initializer_range"] = trial.suggest_loguniform("initrange", 0.005, 0.04)
config["n_positions"] = n_positions
config["n_ctx"] = n_positions
dirpath = tempfile.mkdtemp()
config_filename = str(time.time()) + ".json"
config_filepath = os.path.join(dirpath, config_filename)
with open(config_filepath, "w") as path:
json.dump(config, path)
args.gpst_model = config_filepath
args.model_name = "optuna"
args.trial = trial
loss = train(args)
shutil.rmtree(dirpath)
return loss
if __name__ == "__main__":
main()
|
langfield/spred
|
spred/gpst/optimize.py
|
optimize.py
|
py
| 4,018 |
python
|
en
|
code
| 3 |
github-code
|
6
|
37164877474
|
import torch
import numpy
import pandas
import sys
import os
import copy
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
#Global option defaults that can be changed later by command line
gcm_folder_path : str = "gcms"
target_folder_path : str = "targets"
class_index = "cat5"
use_cuda : bool = False
train_split: float = 0.8
test_split: float = 0.1
validation_split: float = 0.1
batch_size: int = 10
max_training_epochs: int = 200
CMD_HELP : str = """Options:
--cuda
uses nVidia CUDA acceleration for tensor calculations (recommended)
--batch-size <batch size>
sets the mini-batch size to use for training. Defaults to 10 if not supplied
--gcms-path <folder/directory path>
sets the path for the GCM CSV files to use as input. Defaults to ./gcms if not supplied
--targets_path <folder/directory path>
sets the path for the CSV files that contains the "cat5" class label column. Defaults to ./targets if not supplied. Note that a model is trained for each file that is found.
--validation_percentage
sets the percentage of instances to use as the validation set
--test_percentage
sets the percentage of instances to use as the final test set
"""
torch.set_printoptions(precision = 10)
def normalise(t: torch.tensor):
max: float = t.max()
min: float = t.min()
t = ((t - min) / (max - min)) #implicit broadcasting applied on scalars
return t
def parse_command_line():
i = 1 #sys.argv[0] contains the script name itself and can be ignored
while i < len(sys.argv):
if sys.argv[i] == "-h" or sys.argv[i] == "--help":
print(CMD_HELP)
sys.exit()
elif sys.argv[i] == "--gcms-path":
i += 1
global gcm_folder_path
gcm_folder_path = sys.argv[i]
elif sys.argv[i] == "--classlabel":
i += 1
global class_index
class_index = sys.argv[i]
elif sys.argv[i] == "--cuda":
global use_cuda
use_cuda = True
elif sys.argv[i] == "--targets-path":
i += 1
global target_folder_path
target_folder_path = sys.argv[i]
elif sys.argv[i] == "--test-percentage":
i += 1
global test_split
test_percentage = float(sys.argv[i]) / 100.0
elif sys.argv[i] == "--validation-percentage":
i += 1
global validation_split
validation_percentage = float(sys.argv[i]) / 100.0
elif sys.argv[i] == "--batch-size":
i += 1
global batch_size
batch_size = int(sys.argv[i])
elif sys.argv[i] == "--max-epochs":
i += 1
global max_training_epochs
max_training_epochs = int(sys.argv[i])
else:
print("Unknown argument: " + sys.argv[i] + "\n Use \"gcm-cnn -h\" to see valid commands")
sys.exit()
i += 1
global train_split
train_split = 1.0 - test_split - validation_split
assert(train_split > 0), "No instances left for training. Did the sum of your test and validation holdout percentages exceed 100%?"
assert(batch_size > 0), "Batch size can't be negative!!!"
def read_gcm_folder(path: str): #returns a folder of GCM CSVs as a 4-channel PyTorch Tensors
filenames = os.listdir(path)
files = []
for i in range(0, len(filenames)):
nextfile = pandas.read_csv((path + "/" + filenames[i]), sep=",", skiprows=3, header=None) #explicitly skip 3 rows to discard header, longitude, latitude
nextfile = nextfile.drop(nextfile.columns[0], axis=1)
nextfile = torch.from_numpy(nextfile.values).type(torch.FloatTensor)
if use_cuda == True:
nextfile = nextfile.cuda()
nextfile = nextfile.reshape(288,131,360)
nextfile = normalise(nextfile)
files.append(nextfile)
return torch.stack(files, dim=1)
def read_target_folder(path: str): #returns a folder of CSVs containing the class label as a list of PyTorch Tensors
filenames = os.listdir(path)
files = []
for i in range(0, len(filenames)):
nextfile = pandas.read_csv((path + "/" + filenames[i]), sep=",")
nextfile = nextfile[class_index] + 2
nextfile = torch.from_numpy(nextfile.values).type(torch.LongTensor)
if use_cuda == True:
nextfile = nextfile.cuda()
files.append(nextfile)
return files
class Network(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=len(os.listdir(gcm_folder_path)), out_channels=6, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5)
self.fc1 = nn.Linear(in_features= 30276, out_features=120)
self.fc2 = nn.Linear(in_features=120, out_features=60)
self.out = nn.Linear(in_features=60, out_features=5)
#note hyperparameter choice is arbitrary except initial in and final out
#they are dependant on the colour channels (3 since 3 GCMs) and output classes (5 since 5 classes on cat5) respectively
def forward(self, t):
# implement the forward pass
# (1) input layer
t = t #usually omitted since this is obviously trivial; size 360*131
# (2) hidden conv layer
t = self.conv1(t) #Haven't implemented wrapping - so after a 5x5 convolution, discard borders meaning feature maps are now 6 * 127 * 356 (Channels * height * width)
t = F.relu(t)
t = F.avg_pool2d(t, kernel_size=2, stride=2)
#pooling 2x2 with stride 2 - reduces to 6 * 178 * 63
# (3) hidden conv layer
t = self.conv2(t)
t = F.relu(t)
t = F.avg_pool2d(t, kernel_size=2, stride=2)
#pooling 2x2 with stride 2 - reduces to 12 * 29 * 87
# (4) hidden linear layer
t = t.reshape(-1, 12 * 29 * 87)
t = self.fc1(t)
t = F.relu(t)
# (5) hidden linear layer
t = self.fc2(t)
t = F.relu(t)
# (6) output layer
t = self.out(t)
#t = F.softmax(t, dim=1) #implicitly performed by F.cross_entropy()
return t
#Setting options from command line
parse_command_line()
#print(target_tensors[0].size()[0])
#Reading files from disk into PyTorch tensors
label_tensors = read_target_folder(target_folder_path)
gcm_tensor = read_gcm_folder(gcm_folder_path)
#Split the gcm_tensor into train, validation, test tensors
instances = gcm_tensor.size()[0]
train_tensor = gcm_tensor[:int(instances * train_split)] #note int() truncates/floors
validation_tensor = gcm_tensor[int(instances * train_split):int(instances * (train_split + validation_split))]
test_tensor = gcm_tensor[int(instances * (train_split + validation_split)):]
#Now we set up a loop to train a network for each label file that was present
for n in range(0, len(label_tensors)):
#Creating pytorch dataset and dataloader for easy access to minibatch sampling without replacement in randomnised order
train_set = torch.utils.data.TensorDataset(train_tensor, (label_tensors[n])[ : int(instances * train_split)])
train_loader = torch.utils.data.DataLoader(train_set, batch_size = batch_size, shuffle=True)
validation_set = torch.utils.data.TensorDataset(validation_tensor, (label_tensors[n])[int(instances * train_split) : int(instances * (train_split + validation_split))])
validation_loader = torch.utils.data.DataLoader(validation_set, batch_size=validation_tensor.size()[0], shuffle = False)
test_set = torch.utils.data.TensorDataset(test_tensor, (label_tensors[n])[int(instances * (train_split + validation_split)) : ])
test_loader = torch.utils.data.DataLoader(test_set, batch_size = test_tensor.size()[0], shuffle = False)
#Initialising the CNN and gradient descender (optimizer)
network = Network()
if use_cuda == True:
network = network.cuda()
optimizer = optim.SGD(network.parameters(), lr = 0.01)
#running the training loop
epoch_correct : int = 0
epoch_loss : float = 0
lowest_valid_loss : float = float('inf')
epochs_without_improvement = 0
best_network = copy.deepcopy(network)
print("results for", os.listdir(target_folder_path)[n])
for epoch in range(0, max_training_epochs):
previous_epoch_loss = epoch_loss
epoch_correct = 0
epoch_loss = 0
for images, labels in train_loader:
#Getting predictions before any training on this batch has occurred
predictions = network(images)
loss = F.cross_entropy(predictions, labels)
#making the gradient step for this batch
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_correct += predictions.argmax(dim=1).eq(labels).int().sum().item()
epoch_loss += loss.item()
valid_preds = network(validation_tensor)
valid_loss = F.cross_entropy(valid_preds, label_tensors[n][int(instances * train_split) : int(instances * (train_split + validation_split))])
if (lowest_valid_loss > valid_loss) :
lowest_valid_loss = valid_loss
best_network = copy.deepcopy(network)
epochs_without_improvement = 0
else:
epochs_without_improvement += 1
if (epochs_without_improvement > 10) :
print("stopping early")
break
print("epoch: ", epoch, "\ttrain_loss: ", round(epoch_loss, 5), "\ttrain_correct: ", epoch_correct, "\tvalidation_loss: ", round(valid_loss.item(),5), sep='' )
test_preds = best_network(test_tensor)
test_loss = F.cross_entropy(test_preds, label_tensors[n][int(instances * (train_split + validation_split)) : ])
test_correct = test_preds.argmax(dim=1).eq(label_tensors[n][int(instances * (train_split + validation_split)) : ]).int().sum().item()
print("test_correct: ", test_correct, "/", test_preds.size()[0], "\ttest_loss: ", round(test_loss.item(), 5), sep='' )
|
tigerwxu/gcm-cnn
|
gcm-cnn.py
|
gcm-cnn.py
|
py
| 10,296 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3145047297
|
#!/usr/bin/env python
#!/users/legarreta/opt/miniconda3/envs/sims/bin/python
#!/usr/bin/python3
# Get RMSDs from multiple files
# INPUTS: input dir and pattern string
USAGE="rmsds-trajectory-ALL.py <input dir>"
import os, sys
def main ():
# Check command line arguments
args = sys.argv
if (len(args) < 2):
print (USAGE)
sys.exit (0)
inputDir = args [1] # trajectories
outputDir = "out-rmsds"
# Get RMSDs for each DCD file
allRMSDValuesProtein = []
allRMSDValuesLigand = []
startTime = 1
subDirs = os.listdir (inputDir)
os.system ("mkdir %s" % outputDir)
for subdir in subDirs:
subdirPath = "%s/%s" % (inputDir, subdir)
psfFile = [x for x in os.listdir (subdirPath) if ".psf" in x][0]
dcdFile = [x for x in os.listdir (subdirPath) if ".dcd" in x][0]
psfPath = "%s/%s" % (subdirPath, psfFile)
dcdPath = "%s/%s" % (subdirPath, dcdFile)
print (">>> ", subdir, psfPath, dcdPath)
outFile = "%s/rmsds-protein-%s.csv" % (outputDir, os.path.basename (subdir))
rmsdsProtein = getRMSDsFile (psfPath, dcdPath, startTime, "protein", outFile)
outFilename = "%s/RMSDs-protein-%s.csv" % (outputDir, subdir)
writeRMSDsToFile (rmsdsProtein, outFilename)
outFile = "%s/rmsds-ligand-%s.csv" % (outputDir, os.path.basename (subdir))
rmsdsLigand = getRMSDsFile (psfPath, dcdPath, startTime, "ligand", outFile)
outFilename = "%s/RMSDs-%s-ligand.csv" % (outputDir, subdir)
writeRMSDsToFile (rmsdsLigand, outFilename)
allRMSDValuesProtein.extend (rmsdsProtein)
allRMSDValuesLigand.extend (rmsdsLigand)
allRMSDValues = allRMSDValuesProtein
allRMSDValues.extend (allRMSDValuesLigand)
outFilename = "%s/RMSDs-%s.csv" % (outputDir, inputDir)
writeRMSDsToFile (allRMSDValues, outFilename)
#allRMSDsFile = open (outFilename, "w")
#allRMSDsFile.write ("POSE, FRAME, TYPE, RMSD\n")
#allRMSDsFile.writelines (allRMSDValues)
#--------------------------------------------------------------------
#--------------------------------------------------------------------
def writeRMSDsToFile (rmsds, outFilename):
allRMSDsFile = open (outFilename, "w")
allRMSDsFile.write ("POSE, FRAME, TYPE, RMSD\n")
allRMSDsFile.writelines (rmsds)
allRMSDsFile.close()
#--------------------------------------------------------------------
# Get RMSDs from trayectory using a VMD script
def getRMSDsFile (psfFile, dcdFile, startTime, rmsdType, outFile=""):
# Get and save RMSDs from namd out file
if (outFile==""):
outFile = os.path.basename ("%s_RMSD.csv" % dcdFile.split(".dcd")[0])
prefix = os.path.basename (dcdFile).split("_")[0]
else:
prefix = os.path.basename (outFile).split(".")[0].split ("-")[-1]
cmm = ""
if (rmsdType=="protein"):
cmm = "rmsds-trajectory-protein.tcl %s %s %s" % (psfFile, dcdFile, outFile)
elif (rmsdType=="ligand"):
cmm = "rmsds-trajectory-ligand.tcl %s %s %s" % (psfFile, dcdFile, outFile)
print (">>> ", cmm)
os.system (cmm)
#createRMSDTable (outFile)
values = open (outFile).readlines()[1:]
N = len (values)
lines = [",".join (x) for x in zip ([prefix]*N, values)]
return (lines)
#--------------------------------------------------------------------
# Create CSV table from RMSD values in data file
def createRMSDTable (datFile):
rmsdValues = open (datFile).readlines ()
steps = [str(x) for x in range (0, len (rmsdValues))]
stepsRMSDs = [",".join (x) for x in zip (steps, rmsdValues)]
csvFilename = "%s.csv" % datFile.split (".csv")[0]
csvFile = open (csvFilename, "w")
csvFile.write ("STEPS,RMSDs\n")
csvFile.writelines (stepsRMSDs)
csvFile.close()
#--------------------------------------------------------------------
main()
|
luisgarreta/dockingBCL2
|
scripts/rmsds-trajectory-ALL.py
|
rmsds-trajectory-ALL.py
|
py
| 3,642 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1306545281
|
import os, datetime
def call_msra():
terr = input('код территории: ')
if terr == "":
print()
call_msra()
comp = input('номер АРМа: ')
if comp == "":
print()
call_msra()
else:
os.system(r'C:\Windows\System32\msra.exe /offerra kmr-' + terr + '-' + comp)
try:
logging(1)
except BaseException:
print("Ошибка при запоси в log.txt")
print()
call_msra()
# чтение файла log.exe и запись в него счетчика открытых АРМов
# cnt - количество добавленных в лог элементов
def logging(cnt):
today = str(datetime.date.today())
# проверка, что файл существует. Если нет - то создается
try:
outputFile = open("log.txt", "r")
except FileNotFoundError:
outputFile = open("log.txt", "w+")
print("", file=outputFile)
# запись строк файла в lines
lines = []
for line in outputFile:
if line.rstrip() != "":
lines.append(line.rstrip())
outputFile.close()
# проверка, что файл не пустой, и что присутствует шапка
if len(lines) == 0:
lines.insert(0, "Date Count")
lastLine = lines[-1]
elif lines[0] != "Date Count":
lines.insert(0, "Date Count")
lastLine = lines[-1]
else:
lastLine = lines[-1]
# проверка, есть ли текущая дата в файле
# если нет, то добавляем ее со счетчиком 1
# если есть, то считвываем и увеличиваем значение счетчика
if lastLine.split()[0] != today:
lines.append(today + " 1")
f = open("log.txt", "w")
for line in lines:
if line != "":
print(line, file=f)
f.close()
else:
# проверка, что в счетчике на сегодня число
try:
oldCount = int(lastLine.split()[1])
except ValueError:
oldCount = 0
print("\n Счетчик за сегодня сброшен из-за нечислового значения!\n")
lines[-1] = today + " " + str(oldCount + cnt)
f = open("log.txt", "w")
for line in lines:
if line != "":
print(line, file=f)
f.close()
print('Данная программа открывает msra c параметром /offerra kmr-????-???')
call_msra()
|
Aarghe/some_scripts
|
msra/msra.py
|
msra.py
|
py
| 2,782 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
20921293526
|
import numpy as np
import matplotlib.pyplot as plt
def plot_results(results, range_param, label='', color='r', marker='o'):
mean_results = np.mean(results, axis=1)
min_results = np.mean(results, axis=1) - np.std(results, axis=1)
max_results = np.mean(results, axis=1) + np.std(results, axis=1)
plt.plot(range_param, mean_results, marker=marker, color=color, label=label)
plt.fill_between(range_param, min_results, max_results, facecolor=color, interpolate=True, alpha=.2)
def save_results(results, range_param, directory, file_name):
file = open(directory + file_name, "w")
for i in range_param:
file.write(str(i) + " ")
file.write("\n")
for result_list in results:
for result in result_list:
file.write(str(result) + " ")
file.write(str("\n"))
file.close()
def load_results(directory, file_name):
file = open(directory + file_name, "r")
range_param = []
results = []
for i, line in enumerate(file):
if i == 0:
range_param = map(float, line.split())
else:
results.append(map(float, line.split()))
return range_param, results
def save_clusters(G, clusters, label, directory, file_name):
clusters_sorted = sorted(clusters, key=len, reverse=True)
file = open(directory + file_name, "w")
for i, c in enumerate(clusters_sorted):
c_sorted = sorted(c, key=G.degree, reverse=True)
file.write("\n\nCluster " + str(i) + " (" + str(len(c)) +" nodes)")
for u in c_sorted:
file.write("\n" + str(u) + ": " + label[u])
file.close()
|
sharpenb/Multi-Scale-Modularity-Graph-Clustering
|
Scripts/experiments/results_manager.py
|
results_manager.py
|
py
| 1,613 |
python
|
en
|
code
| 2 |
github-code
|
6
|
70929697148
|
from tasks import Task
from workWithFiles import *
class WorkWithUser:
def __init__(self):
self.task: Task = Task.NONE
self.idNote: int = None
self.title: str = None
self.item: str = None
self.datetime_min: datetime = None
self.datetime_max: datetime = None
def initial_state(self):
self.task = Task.NONE
self.idNote = None
self.title = None
self.item = None
self.datetime_min = None
self.datetime_max = None
def parse_string_datetime_limits(self, string):
splitted = string.split(',')
if len(splitted) != 2:
return 'сплит по запятой должен дать 2 элемента'
parsed_left = splitted[0].strip()
if parsed_left.lower() == 'no':
self.datetime_min = None
else:
result = parse_datetime(parsed_left)
if result == None:
return 'не спарсено выражение ' + parsed_left
self.datetime_min = result
parsed_right = splitted[1].strip()
if parsed_right.lower() == 'no':
self.datetime_max = None
else:
result = parse_datetime(parsed_right)
if result == None:
return 'не спарсено выражение ' + parsed_right
self.datetime_max = result
if self.datetime_min != None and self.datetime_max != None and self.datetime_min > self.datetime_max:
return 'некооректно заданы границы: нижняя превышает верхнюю'
return None
def print_offer(self):
if self.task == Task.NONE:
print('\nВыберите операцию:')
print('1. Вывести список заметок')
print('2. Создать новую заметку')
print('3. Отредактировать заметку')
print('4. Просмотреть заметку')
print('5. Удалить заметку')
elif self.task == Task.GET_LIST:
print('Для формирования выборки заметок укажите границы дат слева и справа, через запятую в формате "%m-%d-%Y %H:%M:%S"')
print("Если дата не установлена, введите 'no'")
print('Примеры: 08-01-2023 17:47:35, 08-10-2023 23:59:59')
print(' no, 08-10-2023 23:59:59')
print(' no, no (будут выведены все заметки)')
elif self.task == Task.ADD:
if self.title == None:
print('Введите название заметки:')
else:
print('Введите текст заметки:')
elif self.task == Task.EDIT:
if self.idNote == None:
print(
'Введите id заметки для редактирования. Посмотреть id заметок можно в выборе п.1')
else:
print('введите новое содержимое заметки:')
elif self.task == Task.GET:
print('Введите id заметки для просмотра. Посмотреть id заметок можно в выборе п.1')
elif self.task == Task.DELETE:
print('Введите id заметки для удаления. Посмотреть id заметок можно в выборе п.1')
def handler_message_user(self, message_user):
if message_user == '':
print('Введена пустая строка')
return
if self.task == Task.NONE:
if message_user == '1':
self.task = Task.GET_LIST
elif message_user == '2':
self.task = Task.ADD
elif message_user == '3':
self.task = Task.EDIT
elif message_user == '4':
self.task = Task.GET
elif message_user == '5':
self.task = Task.DELETE
else:
print('Некорректный ввод')
elif self.task == Task.GET_LIST:
errorMessage = self.parse_string_datetime_limits(message_user)
if errorMessage != None:
print('Ошибка ввода данных: ' + errorMessage)
else:
list_notes, errorMessage = get_notes(self.datetime_min, self.datetime_max)
if errorMessage != None:
print('Ошибка get_notes: ' + errorMessage)
else:
if len(list_notes) == 0:
print('заметок нет')
else:
print('Список заметок (отсортирован по id):')
for el in list_notes:
print(
f"id:{el['id']}, '{el['title']}', создана: {el['datetime']}")
self.initial_state()
elif self.task == Task.ADD:
if self.title == None:
self.title = message_user
else:
self.item = message_user
errorMessage = handler_files(self.task, self.title, self.item, self.idNote)
if errorMessage == None:
print('заметка успешно создана')
self.initial_state()
else:
print(errorMessage)
elif self.task == Task.EDIT:
list_notes, errorMessage = get_notes(None, None)
if errorMessage != None:
print('Ошибка get_notes: ' + errorMessage)
else:
if self.idNote == None:
for el in list_notes:
if message_user == str(el['id']):
self.idNote = el['id']
print(f'Выбранная заметка: id = {self.idNote}')
print(f"Заголовок(название): {el['title']}")
print(f"Содержимое: {el['item']}")
print(f"Дата-время создания: {el['datetime']}")
break
if self.idNote == None:
print('Неверно указан id')
else:
self.item = message_user
errorMessage = handler_files(self.task, self.title, self.item, self.idNote)
if errorMessage == None:
print('заметка успешно отредактирована')
self.initial_state()
else:
print(errorMessage)
elif self.task == Task.GET:
list_notes, errorMessage = get_notes(None, None)
if errorMessage != None:
print('Ошибка get_notes: ' + errorMessage)
else:
found = False
for el in list_notes:
if message_user == str(el['id']):
print('Выбранная заметка: id = ' + message_user)
print(f"Заголовок(название): {el['title']}")
print(f"Содержимое: {el['item']}")
print(f"Дата-время создания: {el['datetime']}")
self.initial_state()
found = True
break
if not found:
print('Неверно указан id')
elif self.task == Task.DELETE:
list_notes, errorMessage = get_notes(None, None)
if errorMessage != None:
print('Ошибка get_notes: ' + errorMessage)
else:
found = False
for el in list_notes:
if message_user == str(el['id']):
found = True
self.idNote = el['id']
errorMessage = handler_files(self.task, self.title, self.item, self.idNote)
if errorMessage == None:
print('заметка успешно удалена')
self.initial_state()
else:
print(errorMessage)
break
if not found:
print('Неверно указан id')
|
galkinnikolay/HomeworkPython
|
workWithUser.py
|
workWithUser.py
|
py
| 8,880 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
13305672302
|
import cv2
#image read
"""
img = cv2.imread("Resources/lena.png")
cv2.imshow("Output",img)
cv2.waitKey(0)
"""
#video read
"""
cap = cv2.VideoCapture("Resources/test_video.mp4")
while True:
success, img = cap.read()
cv2.imshow("Video",img)
if cv2.waitKey(1) & 0xFF ==ord('q'):
break
"""
#webcam
cap = cv2.VideoCapture(2)
cap.set(3,640)
cap.set(4,480)
cap.set(10,100)
while True:
success, img = cap.read()
cv2.imshow("Video",img)
if cv2.waitKey(1) & 0xFF ==ord('q'):
break
|
Umang-Seth/General_OpenCV_Fn
|
Load_Webcam.py
|
Load_Webcam.py
|
py
| 543 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8499616042
|
import os
import sys
user_path = os.environ.get("USER_PATH")
sys.path.append(user_path)
from data_structures.min_heap import MinHeap
def k_largest_elements(arr, k):
temp = []
heap = MinHeap(temp)
for elmnt in arr:
if len(heap) < k:
heap.insert(elmnt)
else:
if elmnt > temp[0]:
heap.extract_min()
heap.insert(elmnt)
output = []
for _ in range(0, k):
output.append(heap.extract_min())
output.reverse()
return output
if __name__ == "__main__":
arr = [10,20,30,50,100,15]
k = 3
output = k_largest_elements(arr, k)
print(output)
|
mathivanansoft/algorithms_and_data_structures
|
data_structures/k_largest_elements.py
|
k_largest_elements.py
|
py
| 654 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5405379024
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import random # imports relevant libraries
import operator
import matplotlib.pyplot
import agentframework
import csv
import matplotlib.animation
num_of_agents = 10
num_of_iterations = 100
neighbourhood = 20
f = open('datain.txt') # opens csv file from directory
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC) # reads csv and ensures any non mueric characters quoted
environment = [] #creates empty list to hold all data for environment
agents = [] #make list called agents
for row in reader: # A list of rows
rowlist = [] #creates empty list for rows
for value in row: # A list of value
rowlist.append(value) #move row values to row list
environment.append(rowlist) # append row lists to environment list
#print(environment) # Floats
matplotlib.pyplot.imshow(environment) #use this library to display raster values from environment list
matplotlib.pyplot.show()
f.close() # closes reader
def distance_between(agent0, agent1): #new function created to call pythhagorus calc for all looped agents
return (((agent0.x - agent1.x)**2) + ((agent0.y - agent1.y)**2))**0.5
# Make the agents.
for i in range(num_of_agents):
agents.append(agentframework.Agent(environment,agents))
# Move the agents.
for j in range(num_of_iterations):
for i in range(num_of_agents):
agents[i].move()
agents[i].eat()
agents[i].share_with_neighbours(neighbourhood)
matplotlib.pyplot.ylim(0, 99)
matplotlib.pyplot.xlim(0, 99)
matplotlib.pyplot.imshow(environment)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i].x, agents[i].y)
matplotlib.pyplot.show()
for agent0 in agents:
for agent1 in agents:
distance = distance_between(agent0, agent1)
|
cman2000/Portfolioabm
|
model.py
|
model.py
|
py
| 1,832 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4537498830
|
from DEFINITIONS import *
from Structures import ERTree
from AF import make_create_condition, AFD, AFND, State
class ER:
def __init__(self, erid, regex):
self.id = erid
self.regex = regex
def parse_generic(self, ps):
try:
i = ps.index("-")
replace_string = ""
while True:
if ps[i-1] in DIG and ps[i+1] in DIG and DIG.index(ps[i-1]) < DIG.index(ps[i+1]):
replace_string = UNION.join(
DIG[DIG.index(ps[i-1]):DIG.index(ps[i+1])+1])
else:
raise Exception("Erro de formação")
ps = ps.replace(ps[i-1:i+2], replace_string)
i = ps.index("-")
except ValueError:
self.regex = ps
def get_entries(self):
E = []
ignore_next = False
ignore_chars = ["(", UNION, CLOSURE, ")", ONEORNONE]
add_next = False
for i in range(len(self.regex)):
if add_next:
add_next = False
E.append(self.regex[i])
if ignore_next:
ignore_next = False
continue
if self.regex[i] == "\\":
add_next = True
continue
if self.regex[i] not in ignore_chars:
if self.regex[i] == "-" and i-1 >= 0 and i+1 < len(self.regex) and self.regex[i-1] not in ignore_chars and self.regex[i+1] not in ignore_chars:
ignore_next = True
E.pop()
E.append(self.regex[i-1:i+2])
else:
E.append(self.regex[i])
return list(set(E))
def get_entries_from_group(self, group):
[start, end] = list(map(lambda x: DIG.index(x), group.split("-")))
E = []
for c in range(len(DIG)):
if start > end:
if c >= start or c <= end:
E.append(DIG[c])
else:
if c >= start and c <= end:
E.append(DIG[c])
return E
def check_entry_in_group(self, e, group):
[start, end] = list(map(lambda x: DIG.index(x), group.split("-")))
E = self.get_entries_from_group(group)
return e in E
def get_afd(self, debug=False):
# create tree from regex
tree = ERTree()
[leaves, fp_nodes] = tree.create_tree(self.regex)
if debug:
tree.pretty_print()
# get followpos
followpos = dict((str(el.nid), []) for el in leaves)
for node in fp_nodes:
if node.value == ".":
for i in node.nodes[0].lastpos():
followpos[str(i.nid)] += node.nodes[1].firstpos()
followpos[str(i.nid)] = list(set(followpos[str(i.nid)]))
elif node.value == "*":
for i in node.lastpos():
followpos[str(i.nid)] += node.firstpos()
followpos[str(i.nid)] = list(set(followpos[str(i.nid)]))
for k, v in followpos.items():
v.sort()
if debug:
print(k, list(map(lambda x: x.nid, v)))
# start the afd creation
K = []
E = self.get_entries()
E.sort()
create_condition = make_create_condition(E)
S_firstpos = list(map(lambda x: x.nid, tree.root.firstpos()))
S_firstpos.sort()
S_name = ','.join(map(str, S_firstpos))
S = State(S_name)
K.append(S)
Dstates = {}
Dstates[S_name] = False
T = {}
F = []
# get states and transitions
while False in Dstates.values():
cur_state_name = [k for k, v in Dstates.items() if v == False][0]
cur_state = [k for k in K if k == cur_state_name][0]
cur_state_entries = cur_state_name.split(',')
cur_state_transitions = []
Dstates[cur_state_name] = True
for e in E:
U_list = []
for cse in cur_state_entries:
if len([l for l in leaves if cse == str(l.nid) and l.value == e]) > 0:
U_list += followpos[cse]
if (len(U_list) == 0):
cur_state_transitions.append(None)
continue
U_list = list(set(U_list))
U_list.sort()
U_name = ','.join(map(lambda x: str(x.nid), U_list))
U = None
if U_name not in Dstates.keys():
Dstates[U_name] = False
U = State(U_name)
K.append(U)
else:
U = [k for k in K if k == U_name][0]
if str(leaves[-1].nid) in U_name and U not in F:
F.append(U)
cur_state_transitions.append(U)
T[cur_state.id] = create_condition(cur_state_transitions)
new_E = []
for e in E:
if len(e) > 1 and "-" in e:
new_E += self.get_entries_from_group(e)
else:
new_E += e
new_E = list(set(new_E))
new_E.sort()
# parse generic groups (u.e. a-z)
create_condition = make_create_condition(new_E)
new_T = {}
for k in K:
transitions = []
for e in new_E:
next_states = []
if e in E:
next_states.append(T[k.id](e))
groups = [g for g in E if len(
g) > 1 and "-" in g and self.check_entry_in_group(e, g)]
for g in groups:
next_states.append(T[k.id](g))
next_states = [ns for ns in next_states if ns is not None]
if len(next_states) == 0:
next_states = None
transitions.append(next_states)
new_T[k.id] = create_condition(transitions)
for f in F:
f.regex_final_id = [self.id]
# parsing generic groups generate an AFND, so determinizing is needed
afd = AFND(K, new_E, new_T, S, F).determinize()
if debug:
afd.print_transition_table()
return afd
|
bruno-borges-2001/Analisador
|
ER/ER.py
|
ER.py
|
py
| 6,254 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71010185787
|
#!/usr/bin/python
import sys, copy
from scanner import scanner
from parser import parser
from lex_rules import build_patterns
from bookmarks import create_bookmark
from symbol_table import build_productions, build_subproductions, Symbol, Production
from scope import build_scopes, Scope, ScopeSymbol, handleScope
from handler_auto_class import handleAutoClass, tracksAutoClass
from handler_template_class import handleTemplateClass, tracksTemplateClass
from handler_reflection import handleReflection, tracksReflection
from tracks import write_track, TrackType
from end_of_line import is_end_of_line
def main( argv ):
patterns = build_patterns()
productions = build_productions()
sub_productions = build_subproductions()
scope_rules = build_scopes()
argv.pop(0)
for filename in argv:
line_count = 0
file_tracks = { TrackType.SOURCE: [], TrackType.HEADER: [] }
tokens = []
symbol_matches = []
scope_stack = []
# Read in the file contents
for line in open( filename ).readlines():
line_count += 1
# Parse the tokens
for token in scanner( patterns, line, line_count ):
tokens.append( copy.deepcopy( token ) )
# Are we at the end of a statement?
if not is_end_of_line( tokens ):
continue
# Go through the indexes, attempting to match a symbol
for production in productions:
node = parser( tokens, production.production, production.symbols, production.build, sub_productions )
if node is not None:
symbol_matches.append( node.production )
if node.production == Symbol.AUTO_KLASS:
tokens = handleAutoClass( node, tokens )
elif node.production == Symbol.TEMPLATE_KLASS:
tokens = handleTemplateClass( node, tokens )
elif node.production == Symbol.REFLECTION:
tokens = handleReflection( node, tokens )
# Copy the tokens into our output tracks
line_tokens = { TrackType.SOURCE: copy.deepcopy(tokens), TrackType.HEADER: [] }
# Increase the depth?
stack_increased = False
if token.token == '{':
stack_increased = True
# Setup the default scope rule
node = [x for x in scope_rules if x.production == ScopeSymbol.BLOCK][0]
# Attempt to find a better scope rule
for s in scope_rules:
tmp = parser( tokens, s.production, s.symbols, s.build, sub_productions )
if tmp is not None:
node = tmp
break
# Attach the generic bookmarks
tokens = handleScope( node, tokens )
ss = Scope( node.production, node, list() )
scope_stack.append( ss )
### Run through through all our track processors, allow them to update the output
line_numbers = {key: len(line_tokens[key]) for key in line_tokens.keys()}
bookmarks = create_bookmark(tokens)
line_tokens = tracksReflection( line_tokens, bookmarks, symbol_matches, scope_stack, stack_increased, line_numbers )
line_tokens = tracksTemplateClass( line_tokens, bookmarks, symbol_matches, scope_stack, stack_increased, line_numbers )
line_tokens = tracksAutoClass( line_tokens, bookmarks, symbol_matches, scope_stack, stack_increased, line_numbers )
# We've got our tracks for this line, add them to the file
for trk in line_tokens.keys():
if len(line_tokens[trk]) > 0:
file_tracks[trk].append( line_tokens[trk] )
#Reset my tokens so we can load another line
if token.token == '}':
scope_stack.pop()
symbol_matches.clear()
tokens.clear()
# Dump all the now collected tokens into the output file
for trk in file_tracks.keys():
write_track( filename, trk, file_tracks[trk] )
main( sys.argv )
|
lukedupin/C2
|
main.py
|
main.py
|
py
| 4,412 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42132347145
|
import math
import numpy as np
from scipy.stats import bernoulli
simlen = 1000000
pmf = np.full(10,0.1)
def cdf(k):
if(k>10):
return 1
elif(k<=0):
return 0
else:
return k*0.1
print("Value equal to 7:")
p1 = pmf[7]
data_bern1 = bernoulli.rvs(size=simlen,p=p1)
err_ind1 = np.nonzero(data_bern1 == 1)
print("Probability-simulation,actual:",round(np.size(err_ind1)/simlen,4),round(p1,2))
#print("Simulated values: ", data_bern1)
print("Value greater than 7:")
p2 = cdf(10)-cdf(7)
data_bern2 = bernoulli.rvs(size=simlen ,p=p2)
err_ind2 = np.nonzero(data_bern2 == 1)
print("Probability-simulation,actual:",round(np.size(err_ind2)/simlen,4),round(p2,2))
#print("Simulated values: ", data_bern2)
print("Value less than 7:")
p3 = cdf(6)
data_bern3 = bernoulli.rvs(size=simlen ,p=p3)
err_ind3 = np.nonzero(data_bern3 == 1)
print("Probability-simulation,actual:",round(np.size(err_ind3)/simlen, 4),round(p3,2))
#print("Simulated values: ", data_bern3)
|
gadepall/digital-communication
|
exemplar/10/13/3/30/codes/code.py
|
code.py
|
py
| 984 |
python
|
en
|
code
| 7 |
github-code
|
6
|
16325138014
|
from abc import abstractmethod
import threading
from types import MethodType
from typing import Any, Dict, Generic, NoReturn
from jsonIO.Serializable import SerializableType
from protocol.interface import (
AbstractRequestClient,
AbstractRequestServer,
AbstractSerializableHandler,
AbstractSerializableRequest,
CONTEXT_TYPE,
Promise,
RequestData,
request,
)
import protocol
from socketIO import (
GeneralPurposeSocket,
NullPackageType,
Package,
RequestPackage,
ResponsePackage,
)
from socketIO.interface.socket.AbstractPackageSocket import (
PackageSocketShutdownException,
)
from util.threading import Thread
class PackageSocketRequestServer(AbstractRequestServer[SerializableType, SerializableType, CONTEXT_TYPE]):
def __init__(self, socket: GeneralPurposeSocket):
super().__init__()
self.is_alive = True
self.request_socket: GeneralPurposeSocket[
RequestPackage[SerializableType]
] = socket.duplicate(filter=lambda x: socket.filter(x) and PackageSocketRequestServer.is_request(x))
self.init_request_handlers()
self.request_handler_thread = self.start_request_handler_thread()
def get_request(self) -> RequestData[SerializableType]:
request_package = self.request_socket.recv()
if isinstance(request_package, RequestPackage):
type = request_package._type
payload = request_package.payload
uuid = request_package._uuid
return RequestData(type, payload, uuid)
raise Exception
def dispatch_response(
self, request: RequestData[SerializableType], response
) -> None:
response_package = ResponsePackage(response, request.id)
self.request_socket.send(response_package)
def start_request_handler_thread(self) -> Thread:
def request_handler_loop():
try:
self.serve()
except PackageSocketShutdownException:
self.is_alive = False
handler_thread = Thread(target=request_handler_loop, daemon=True)
handler_thread.start()
return handler_thread
def init_request_handlers(self):
handlers = list(
i(context = self.get_context()) for i in AbstractSerializableHandler[CONTEXT_TYPE]._handler_types.values()
)
self.register_handler(*handlers)
@abstractmethod
def get_context(self) -> CONTEXT_TYPE:
raise NotImplementedError
def shutdown(self):
self.request_socket.close()
self.wait_until_termination()
def detach(self):
self.request_handler_thread.join(0)
self.wait_until_termination()
def wait_until_termination(self):
self.request_handler_thread.join()
self.is_alive = False
@staticmethod
def is_request(package: Package):
return isinstance(package, RequestPackage)
|
MysteriousChallenger/nat-holepunch
|
server/PackageSocketRequestServer.py
|
PackageSocketRequestServer.py
|
py
| 2,929 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2502699508
|
# To manage matrices correctly
# At deployment, check if new matrices have been added to old batch sizes
import grid
import orjson
import sys
# VERSION_FILE
VERSION_FILE = "versioning.json"
def readable_string(batch, num_infected, infection_rate):
m,n = grid.parse_batch(batch)
return f'{n} Samples (with {m} tests. Upto {num_infected} positives)'
def update_cache(mlabels, matrices, codenames, jfile):
old_data = {}
f = {}
try:
with open(jfile, 'rb') as reader:
old_data = orjson.loads(reader.read())
except Exception as e:
print(f'Error : {e}')
for batch in mlabels:
print(batch)
m,n,i = mlabels[batch]
mat = matrices[m]
g, c = grid.generate_grid_and_cell_data(batch, mat)
f[batch] = {m : {"num_infected" : n, "infection_rate" : i, "readable" : readable_string(batch, n, i), "gridData" : g, "cellData" : c, "matrix" : m, "codename" : codenames[m]}}
ob = set(old_data)
nb = set(f)
for batch in old_data:
od = old_data[batch]
# Batch does not exist in new data
if batch not in f or not f[batch]:
print(f"Batch {batch} not in new matrix data, marking as inactive")
od["metadata"]["active"] = False
continue
nd = f[batch]
oa = od["metadata"]["active"]
oam = od["metadata"]["matrices"][-1]
if oam in nd:
# Currently active matrix in old data is same as new data
if not oa:
od["metadata"]["active"] = True
od[m] = nd[m]
continue
# If old batch is not active, check if there is a key in new data
if not oa:
for m in nd:
# Mark m as active, increment version, add to od
od["metadata"]["latest_version"] += 1
od["metadata"]["matrices"].append(m)
od["metadata"]["active"] = True
od[m] = nd[m]
continue
# Make matrix in new data active
for m in nd:
# Mark m as active, increment version, add to od
od["metadata"]["latest_version"] += 1
od["metadata"]["matrices"].append(m)
od["metadata"]["active"] = True
od[m] = nd[m]
# New batches can be safely added to old_data
for batch in nb - ob:
print(f"New batch added - {batch}")
od = {"metadata" : {}}
od["metadata"]["active"] = True
od["metadata"]["latest_version"] = 0
nd = f[batch]
for m in nd:
od["metadata"]["matrices"] = [m]
od[m] = nd[m]
old_data[batch] = od
jstr = orjson.dumps(old_data)
with open(jfile, "wb") as outfile:
outfile.write(jstr)
def load_cache():
data = {}
try:
with open(VERSION_FILE, 'rb') as reader:
data = orjson.loads(reader.read())
except Exception as e:
raise
active_batches = {}
all_batches = {}
for batch in data:
meta = data[batch]["metadata"]
mats = meta["matrices"]
is_active = meta["active"]
mat_names = set(data[batch]) - {"metadata"}
curr_version = len(mats) - 1
for i, m in enumerate(mats):
all_batches[f'{batch}_v{i}'] = data[batch][m]
if i == curr_version and is_active:
active_batches[f'{batch}_v{i}'] = data[batch][m]
# Active batches to be sorted by number of samples
sorted_bnames = sorted((grid.parse_batch(b)[1], b) for b in active_batches)
sorted_active_batches = {b : active_batches[b] for n, b in sorted_bnames}
bbs = {b : grid.batch_size_from_batch_name(b) for b in all_batches}
batch_size_to_batch = {}
for bn, bs in bbs.items():
batch_size_to_batch[bs] = batch_size_to_batch.get(bs, [])
batch_size_to_batch[bs].append({bn : all_batches[bn]["codename"]})
return sorted_active_batches, all_batches, batch_size_to_batch
if __name__ == '__main__':
from compute_wrapper import get_matrix_sizes_and_labels, get_matrix_labels_and_matrices, get_matrix_codenames
update_cache(get_matrix_sizes_and_labels(), get_matrix_labels_and_matrices(), get_matrix_codenames(), VERSION_FILE)
|
Aakriti28/tapestry-server
|
old-server/matrix_manager.py
|
matrix_manager.py
|
py
| 4,223 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6756372344
|
#======================
# Author: Susmita Datta
# Title: insertionSort
#
# Time Complexity of Solution:
# O(n^2)
#
# Sample input = [3, 2, 1, 4, 5, 6, 9, 8, 7]
# Sample output = [1, 2, 3, 4, 5, 6, 7, 8, 9]
#
#--------------------------------------------
def insertionSort(unsorted):
for index in range(1, len(unsorted)):
current_value = unsorted[index]
position = index
while position>0 and unsorted[position-1]>current_value:
unsorted[position]=unsorted[position-1]
position = position-1
unsorted[position]=current_value
return unsorted
if __name__ =="__main__":
import random
unsorted_list = random.sample(range(101), 11)
print(unsorted_list)
sorted_list = insertionSort(unsorted_list)
print(sorted_list)
|
ssmtdatta/Sorting
|
insertionSort.py
|
insertionSort.py
|
py
| 763 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2026773879
|
import json
import os
import pathlib
import time
from selenium import webdriver
from selenium.webdriver import ActionChains
driver = webdriver.Chrome()
targetUrl = 'https://www.douban.com/'
username = ""
psw = ""
def login_zhi_hu():
loginurl = targetUrl # 登录页面
# 加载webdriver驱动,用于获取登录页面标签属性
# driver = webdriver.Chrome()
driver.get(loginurl) # 请求登录页面
# time.sleep(50)
# driver.implicitly_wait(10)
driver.switch_to.frame(driver.find_elements_by_tag_name('iframe')[0])
bottom = driver.find_element_by_xpath('/html/body/div[1]/div[1]/ul[1]/li[2]') # 获取用户名输入框,并先清空
# bottom = driver.find_element_by_class_name('account-tab-account on')
bottom.click()
driver.find_element_by_name('username').send_keys(username) # 输入用户名
driver.find_element_by_name('password').clear() # 获取密码框,并清空
driver.find_element_by_name('password').send_keys(psw) # 输入密码
# #
time.sleep(5)
bottom = driver.find_element_by_class_name('account-form-field-submit ')
bottom.click()
time.sleep(4)
auth_frame = driver.find_element_by_id('tcaptcha_iframe')
driver.switch_to.frame(auth_frame)
element = driver.find_element_by_xpath('//*[@id="tcaptcha_drag_thumb"]')
ActionChains(driver).click_and_hold(on_element=element).perform()
ActionChains(driver).move_to_element_with_offset(to_element=element, xoffset=180, yoffset=0).perform()
tracks = get_tracks(25) # 识别滑动验证码设置了个随意值,失败概率很大,网上方案抓取缺口图片分析坐标,成功率提高,考虑智能识别为最佳方案
for track in tracks:
# 开始移动move_by_offset()
ActionChains(driver).move_by_offset(xoffset=track, yoffset=0).perform()
# 7.延迟释放鼠标:release()
time.sleep(0.5)
ActionChains(driver).release().perform()
def get_tracks(distance):
"""
拿到移动轨迹,模仿人的滑动行为,先匀加速后匀减速
匀变速运动基本公式:
①v = v0+at
②s = v0t+1/2at^2
"""
# 初速度
v = 0
# 单位时间为0.3s来统计轨迹,轨迹即0.3内的位移
t = 0.31
# 位置/轨迹列表,列表内的一个元素代表0.3s的位移
tracks = []
# 当前位移
current = 0
# 到达mid值开始减速
mid = distance * 4 / 5
while current < distance:
if current < mid:
# 加速度越小,单位时间内的位移越小,模拟的轨迹就越多越详细
a = 2.3
else:
a = -3
# 初速度
v0 = v
# 0.3秒内的位移
s = v0 * t + 0.5 * a * (t ** 2)
# 当前的位置
current += s
# 添加到轨迹列表
tracks.append(round(s))
# 速度已经到达v,该速度作为下次的初速度
v = v0 + a * t
return tracks
def login_with_cookies():
driver.get(targetUrl)
with open("cookies.txt", "r") as fp:
cookies = json.load(fp)
for cookie in cookies:
driver.add_cookie(cookie)
driver.get(targetUrl)
update_cookies()
def update_cookies():
f = open("cookies.txt", 'w')
f.truncate()
cookies = driver.get_cookies()
with open("cookies.txt", "w") as fp:
json.dump(cookies, fp)
def is_file_exit():
path = pathlib.Path('cookies.txt')
if not os.path.getsize(path):
return False
return path.is_file()
if __name__ == '__main__':
if is_file_exit():
login_with_cookies()
else:
login_zhi_hu()
time.sleep(4)
cookies = driver.get_cookies()
with open("cookies.txt", "w") as fp:
json.dump(cookies, fp)
|
Nienter/mypy
|
personal/douban.py
|
douban.py
|
py
| 3,789 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75163149306
|
# -*- coding: utf-8 -*-
"""
Flask Skeleton
"""
from flask import Blueprint, request, redirect, url_for, render_template, flash, session
from pymongo import errors as mongo_errors
from bson.objectid import ObjectId
from flask_login import login_required
import datetime
from app import mongo, login_manager
from app.usuario.model import Usuario
@login_manager.user_loader
def load_user(usuario_id):
return Usuario.get_by_id(usuario_id)
post = Blueprint('post', __name__)
@post.route('/blogs/<blog_id>/posts/novo', methods=['GET'])
@login_required
def get_novo(blog_id):
data_cadastro = datetime.datetime.utcnow()
return render_template('blog/form-post.html', data_cadastro=data_cadastro, blog_id=blog_id)
@post.route('/blogs/<blog_id>/posts/novo', methods=['POST'])
@login_required
def post_novo(blog_id):
data_cadastro = datetime.datetime.utcnow()
try:
post = mongo.db.blog.update_one(
{"_id": ObjectId(blog_id)},
{"$push": {
"posts": {
"_id": ObjectId(),
"titulo": request.form['titulo'],
"data_cadastro": data_cadastro,
"secoes": [{
"titulo": request.form['titulo'],
"data_cadastro": data_cadastro,
"conteudo": request.form['conteudo'],
"secoes": []
}]
}
}})
except mongo_errors.OperationFailure as e:
return render_template('db_error.html', error=e)
return redirect(url_for('blog.get_blog', blog_id=blog_id))
# (?) @post.route('/posts/<post_id>', methods=['GET'])
@post.route('/blogs/<blog_id>/posts/<post_id>', methods=['GET'])
def get_post(blog_id, post_id):
"""Detalha um post específico
"""
try:
blog = mongo.db.blog.find_one(
{
'_id': ObjectId(blog_id),
'posts': {'$elemMatch': {'_id': ObjectId(post_id)}}
},
{'titulo': 1, 'posts.$': 1}
)
except mongo_errors.OperationFailure as e:
return render_template('db_error.html', error=e)
# print(blog)
return render_template('blog/post-detalhe.html', blog=blog, blog_id=blog_id)
|
e-ruiz/big-data
|
01-NoSQL/atividade-04/src/app/blog/posts.py
|
posts.py
|
py
| 2,268 |
python
|
en
|
code
| 1 |
github-code
|
6
|
1925325981
|
from odoo import models, fields, api
class LP_Crm(models.Model):
_inherit = 'crm.lead'
lp_company_id = fields.Many2one('res.partner', 'company' , compute = '_compute_company')
lp_individual_id = fields.Many2many('res.partner')
lp_OneDrive_url = fields.Char('OneDrive folder URL')
lp_client_size = fields.Char('Size of the client')
lp_industry = fields.Selection([('Automobiles_and_Components', 'Automobiles and Components'),
('banks', 'Banks'),
('Capital_Goods', 'Capital Goods'),
('Commercial_Professional_Services', 'Commercial and Professional Services'),
('Consumer_Durables_Apparel', 'Consumer Durables and Apparel'),
('Consumer_Services', 'Consumer Services'),
('Diversified_Financials', 'Diversified Financials'),
('energy', 'Energy'),
('Food_Beverage_Tobacco', ' Food, Beverage, and Tobacco'),
('Food_Staples_Retailing', 'Food and Staples Retailing'),
('Health_Care_Equipment_Services', ' Health Care Equipment and Services'),
('Household_Personal_Products', ' Household and Personal Products'),
('Hospitality', 'Hospitality'),
('insurance', 'Insurance'),
('materials', 'Materials'),
('logistics', 'Logistics'),
('Media_Entertainment', 'Media and Entertainment'),
('Pharmaceuticals_Biotechnology_LifeSciences', 'Pharmaceuticals, Biotechnology, and Life Sciences'),
('Real_Estate', 'Real Estate'),
('retailing', 'Retailing'),
('Semiconductors_Semiconductor_Equipment', ' Semiconductors and Semiconductor Equipment'),
('Software_Services', 'Software and Services'),
('Technology_Hardware_Equipment', 'Technology Hardware and Equipment'),
('Telecommunication_Services', 'Telecommunication Services'),
('transportation', 'Transportation'),
('travel', 'Travel'),
('utilities', 'Utilities'),
('travel', 'Travel'),
('others', 'Others')
],
'Indusrty', default="others")
lp_country = fields.Many2one('res.country','country')
lp_channel_source = fields.Char('Channel From')
lp_others = fields.Text('Others Information') #description
contact_other_info = fields.Text('Others') #description
lp_opportunity = fields.Selection([('new', 'New'),
('Existing', 'Existing (e.g. CR)'),
('outsourcing_contracts', 'Outsourcing contracts'),
('maintenance', 'Maintenance')],
'Opportunity Type', default="new")
lp_budget = fields.Selection([('yes', 'Yes'),('no', 'No')],'Do they have budget for this opportunity?')
lp_budget_authority = fields.Selection([('yes', 'Yes'),('no', 'No')],'Authority to use budget ?')
lp_start_date = fields.Datetime('Start Date')
lp_end_date = fields.Datetime('Finsh Date')
lp_dept_head = fields.Many2one('res.users', string='Department head', domain=lambda self: [('id', 'in', self.env.ref('lp_crm.lp_group_crm_dept_head').users.ids)])
lp_director = fields.Many2one('res.users', string='Director', domain=lambda self: [('id', 'in', self.env.ref('lp_crm.lp_group_crm_director').users.ids)])
lp_go_ahead = fields.Boolean('GoAhead')
lp_stage_name = fields.Char(related="stage_id.name", string='Stage Name')
lp_director_viewer = fields.Boolean(compute='_driector_approve_viewer')
stage_id = fields.Many2one(
'crm.stage', string='Stage', index=True, tracking=True,
compute='_compute_stage_id', readonly=False, store=True,
copy=False, group_expand='_read_group_stage_ids', ondelete='restrict',
domain="['|', ('team_id', '=', False), ('team_id', '=', team_id)]")
@api.constrains('lp_start_date', 'lp_end_date')
def check_dates(self):
if self.lp_start_date and self.lp_end_date:
if self.lp_start_date > self.lp_end_date:
raise UserError('The date from cannot be greater than date to')
@api.depends('partner_id')
def _compute_company(self):
self.lp_company_id = self.partner_id
@api.depends('lp_director')
def _driector_approve_viewer(self):
if self.env.user.id == self.lp_director.id:
self.lp_director_viewer = True
else:
self.lp_director_viewer = False
def Director_approver(self):
self.ensure_one()
is_da = self.env.user.id in self.env.ref('lp_crm.lp_group_crm_director').users.ids
if is_da and self.env.user.id == self.lp_director.id:
if self.lp_go_ahead==False:
self.lp_go_ahead = True
stage_presentation = self.env['crm.stage'].sudo().search([('name', '=', 'Presentation')])
if stage_presentation:
self.stage_id = stage_presentation[0].id
def notify_dept_head(self):
marketing_head=self.env['hr.department'].sudo().search([('name','=','Marketing')])
support_head = self.env['hr.department'].sudo().search([('name', '=', 'Support')])
if marketing_head and marketing_head.manager_id.user_id.partner_id.id:
notification_marketing= [(0, 0, {
'res_partner_id': marketing_head.manager_id.user_id.partner_id.id,
'notification_type': 'inbox'
})]
self.message_post(
body='Opportunity won: ' + str(self.name) +'-'
+str(self.company_id.name) +' '+' Dears, ' +'We would like to inform you that the opportunity '
+str(self.name)+
' - '
+str(self.company_id.name) +
' is won. regards', message_type="notification",
author_id=self.env.user.partner_id.id,
notification_ids=notification_marketing)
if support_head and support_head.manager_id.user_id.partner_id.id:
notification_support= [(0, 0, {
'res_partner_id': support_head.manager_id.user_id.partner_id.id,
'notification_type': 'inbox'
})]
self.message_post(
body='Opportunity won: ' + str(self.name) +'-'
+str(self.company_id.name) +' '+' Dears, ' +'We would like to inform you that the opportunity '
+str(self.name)+
' - '
+str(self.company_id.name) +
' is won. regards', message_type="notification",
author_id=self.env.user.partner_id.id,
notification_ids=notification_support)
if self.lp_dept_head.partner_id.id:
notification_delivery = [(0, 0, {
'res_partner_id': self.lp_dept_head.partner_id.id,
'notification_type': 'inbox'
})]
self.message_post(
body='Opportunity won: ' + str(self.name) +'-'
+str(self.company_id.name) +' '+' Dears, ' +'We would like to inform you that the opportunity '
+str(self.name)+
' - '
+str(self.company_id.name) +
' is won. regards', message_type="notification",
author_id=self.env.user.partner_id.id,
notification_ids=notification_delivery)
@api.onchange('stage_id')
def onchange_stage_id(self):
if self.lp_go_ahead==True:
self.lp_stage_name = 'Won'
def write(self, vals):
if self.stage_id.name=='Won':
self.notify_dept_head()
res = super(LP_Crm, self).write(vals)
return res
class LP_contact(models.Model):
_inherit = 'res.partner'
class LP_stages(models.Model):
_inherit = 'crm.stage'
|
MoathAlrefai2/lp-erp-dev001
|
lp_crm/model/lp_crm.py
|
lp_crm.py
|
py
| 8,538 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19400090459
|
from typing import List
class Solution:
def minFallingPathSum(self, A: List[List[int]]) -> int:
h = len(A)
w = len(A[0])
for i in range(1,h):
for j in range(w):
if j == 0:
A[i][j] = min(A[i-1][j] + A[i][j],A[i-1][j+1] + A[i][j])
elif j == w - 1:
A[i][j] = min(A[i-1][j-1] + A[i][j],A[i-1][j] + A[i][j])
else:
A[i][j] = min(A[i-1][j-1] + A[i][j],A[i-1][j] + A[i][j],A[i-1][j+1] + A[i][j])
print(A)
return min(A[-1])
A = [[51,24],[-50,82]]
r = Solution().minFallingPathSum(A)
print(r)
|
Yigang0622/LeetCode
|
minFallingPathSum.py
|
minFallingPathSum.py
|
py
| 653 |
python
|
en
|
code
| 1 |
github-code
|
6
|
38740337725
|
import pytest
import numpy as np
from uncoverml import patch
@pytest.mark.parametrize('make_multi_patch',
['make_patch_31', 'make_patch_11'],
indirect=True)
def test_grid_patch(make_multi_patch):
timg, pwidth, tpatch, tx, ty = make_multi_patch
patches = patch.grid_patches(timg, pwidth)
assert np.allclose(patches, tpatch)
def test_point_patches(make_points):
timg, pwidth, points, tpatch = make_points
patches = np.array(list(patch.point_patches(timg, pwidth, points)))
assert np.allclose(patches, tpatch)
|
GeoscienceAustralia/uncover-ml
|
tests/test_patch.py
|
test_patch.py
|
py
| 593 |
python
|
en
|
code
| 32 |
github-code
|
6
|
36066284113
|
#%%
from PIL import Image
import numpy as np
import onnxruntime
import torch
import cv2
def preprocess_image(image_path, height, width, channels=3):
image = Image.open(image_path)
image = image.resize((width, height), Image.LANCZOS)
image_data = np.asarray(image).astype(np.float32)
image_data = image_data.transpose([2, 0, 1]) # transpose to CHW
mean = np.array([0.079, 0.05, 0]) + 0.406
std = np.array([0.005, 0, 0.001]) + 0.224
for channel in range(image_data.shape[0]):
image_data[channel, :, :] = (image_data[channel, :, :] / 255 - mean[channel]) / std[channel]
image_data = np.expand_dims(image_data, 0)
return image_data
#%%
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def run_sample(session, image_file, categories):
output = session.run([], {'input':preprocess_image(image_file, 224, 224)})[0]
output = output.flatten()
output = softmax(output) # this is optional
top5_catid = np.argsort(-output)[:5]
for catid in top5_catid:
print(categories[catid], output[catid])
# write the result to a file
with open("result.txt", "w") as f:
for catid in top5_catid:
f.write(categories[catid] + " " + str(output[catid]) + " \r")
#%%
# create main function
if __name__ == "__main__":
# Read the categories
with open("imagenet_classes.txt", "r") as f:
categories = [s.strip() for s in f.readlines()]
# Create Inference Session
session = onnxruntime.InferenceSession("mobilenet_v2_float.onnx")
# get image from camera
cap = cv2.VideoCapture(0)
cap.set(3,640) # set Width
cap.set(4,480) # set Height
# capture image from camera
ret, frame = cap.read()
frame = cv2.flip(frame, -1) # Flip camera vertically
cv2.imwrite('capture.jpg', frame)
cap.release()
cv2.destroyAllWindows()
run_sample(session, 'capture.jpg', categories)
# %%
|
cassiebreviu/onnxruntime-raspberrypi
|
inference_mobilenet.py
|
inference_mobilenet.py
|
py
| 2,000 |
python
|
en
|
code
| 4 |
github-code
|
6
|
7159944925
|
# 由于反/防爬虫策略,以及防止封ip的风险
# 我们选择动态切换user-agent
import urllib.request
import ssl
import random
# 创建未证实的ssl上下文
context = ssl._create_unverified_context()
def load_baidu():
url = "https://www.baidu.com"
header = {
''
}
# 创建代理列表
user_agent_list = [
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'
]
#每次请求的浏览器都是不一样的
random_user_agent = random.choice(user_agent_list)
#创建请求对象,方便后期新增 headers信息
request = urllib.request.Request(url)
# 增加请求头
request.add_header('User-Agent',random_user_agent)
#请求网络数据
response = urllib.request.urlopen(request,context=context)
data = response.read().decode('utf-8')
with open("baidu1.html","w") as f:
f.write(data)
# print(response.headers)
load_baidu()
|
hengxuZ/python-crawler-lesson
|
lesson-01/随机设置代理爬取页面.py
|
随机设置代理爬取页面.py
|
py
| 1,194 |
python
|
en
|
code
| 1 |
github-code
|
6
|
8063903284
|
import logging
import subprocess
from subprocess import Popen, PIPE
def run(command: str) -> None:
"""
:param command: shell statement
:return:
"""
logging.debug(command)
subprocess.call(command, shell=True, universal_newlines=True)
def call(command: str) -> str:
"""
:param command: shell statement
:return the result of execute the shell statement
"""
logging.debug(command)
with Popen(command, shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True) as fd:
out, err = fd.communicate()
if fd.returncode:
raise Exception(err.strip())
logging.debug(out.strip())
return out.strip()
def ssh_call(address: str, work_dir: str, command: str) -> str:
"""
:param address: the remote server ip
:param work_dir: the remote server dir
:param command: the shell statement
:return the result of execute the shell statement
"""
return call(
"""
ssh -q {address} 'cd {work_dir} && {command}'
"""
.format(address=address, work_dir=work_dir, command=command)
)
|
leaderli/li_py
|
li/li_bash.py
|
li_bash.py
|
py
| 1,123 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4828707472
|
from fastapi.security import OAuth2PasswordBearer
from sqlalchemy.orm import Session
from models import Quote, Title, Year
from schemas import QuoteBase, QuoteCreate, TitleBase, TitleCreate, YearBase, YearCreate
import random
import auth
import models
import schemas
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
def create_quote(db: Session, quote: QuoteCreate, title_text: str, year_text: str):
db_title = db.query(Title).filter(Title.text == title_text).first()
if not db_title:
db_title = Title(text=title_text)
db.add(db_title)
db.commit()
db.refresh(db_title)
db_year = db.query(Year).filter(Year.text == year_text).first()
if not db_year:
db_year = Year(text=year_text)
db.add(db_year)
db.commit()
db.refresh(db_year)
db_quote = Quote(text=quote.text, name=db_title, periode=db_year)
db.add(db_quote)
db.commit()
db.refresh(db_quote)
return db_quote
#get quote by id
def get_quote(db: Session, quote_id: int):
return db.query(Quote).filter(Quote.id == quote_id).first()
#get random quote between the first and the 10th
def get_quote_random(db:Session):
id = 6
return db.query(Quote).filter(Quote.id == 6)
#update quote by id
def update_quote(db: Session, quote_id: int, quote: QuoteBase):
db_quote = db.query(Quote).filter(Quote.id == quote_id).first()
db_quote.text = quote.text
db.commit()
db.refresh(db_quote)
return db_quote
#delete quote by id
def delete_quote(db: Session, quote_id: int):
db_quote = db.query(Quote).filter(Quote.id == quote_id).first()
db.delete(db_quote)
db.commit()
return {"message": "Quote deleted"}
#
def get_title(db: Session, title_id: int):
return db.query(Title).filter(Title.id == title_id).first()
def delete_title(db: Session, title_id: int):
db_title = db.query(Title).filter(Title.id == title_id).first()
db.delete(db_title)
db.commit()
return {"message": "Title deleted"}
def get_year(db: Session, year_id: int):
return db.query(Year).filter(Year.id == year_id).first()
def delete_year(db: Session, year_id: int):
db_year = db.query(Year).filter(Year.id == year_id).first()
db.delete(db_year)
db.commit()
return {"message": "Year deleted"}
def get_all_quotes(db: Session,skip:int=0,limit:int=50):
all_quotes = db.query(models.Quote).offset(skip).limit(limit).all()
return all_quotes
def get_all_titles(db: Session):
return db.query(Title).all()
def get_all_years(db: Session):
return db.query(Year).all()
# create admin
def create_admin(db: Session, admin: schemas.AdminCreate):
hashed_password = auth.get_password_hash(admin.password)
db_admin = models.Admin(username=admin.username, hashed_password=hashed_password)
adminexists = db.query(models.Admin).filter(models.Admin.username == admin.username).first()
if adminexists:
adminerror = {
"username": "error",
"id": 0,
}
return adminerror
else:
db.add(db_admin)
db.commit()
db.refresh(db_admin)
return db_admin
# get admin by username
def get_admin_username(db: Session, username: str):
admin = db.query(models.Admin).filter(models.Admin.username == username).first()
return admin
# delete admin by username
def delete_admin(db: Session, admin: schemas.Admin):
admin = db.query(models.Admin).filter(models.Admin.username == admin.username).first()
db.delete(admin)
db.commit()
return admin
|
rubenpinxten/herexamen_API
|
myProject/crud.py
|
crud.py
|
py
| 3,543 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2216069250
|
class Macro:
def render(self, inv):
ctx = akat.prepare(inv, required_args = ["cond"], keywords = ["likely", "unlikely"], required_enclosing_macros = ["COROUTINE"])
if ctx.likely and ctx.unlikely:
akat.fatal_error("It can't be both likely and unlikely!")
label, state = ctx.COROUTINE.alloc_label_and_state()
return_value = ctx.COROUTINE.get_return_value()
return akat.render(self, label = label, state = state, return_value = return_value, cond = ctx.cond, likely = ctx.likely, unlikely = ctx.unlikely)
|
akshaal/akatlib4
|
akatpp/akat_wait_until.py
|
akat_wait_until.py
|
py
| 561 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30117142033
|
import numpy as np
from PIL import Image
class predict_day_night_algos:
def __init__(self,img_path,algorithm_choice):
self.img_path = img_path
self.algorithm_choice = algorithm_choice
def select_algorithm(self):
"""
the function selects which algorithm,
based on the user input
"""
algo_choices = ["intensity_based","pixel_based"]
if algo_choices[self.algorithm_choice] == "intensity_based":
print("Using Intensity based method")
intensity_value = self.intensity_algorithm()
if intensity_value >= 0.35:
return "day"
else:
return "night"
elif algo_choices[self.algorithm_choice] == "pixel_based":
print("Using pixel based method")
percentage_darker_pixels = self.pixel_percentage_algorithm()
if percentage_darker_pixels > 0.75:
return "night"
else:
return "day"
def intensity_algorithm(self):
"""
description :the function calculates the intensity based on HSI model,
intensity = (R+G+B)/3, where R,G,B are all normalised arrays/bands
input params : the image path
return : intensity value of the image(single value)
"""
### Reading the images ####
img = Image.open(self.img_path)
###converting to numpy array###
arr = np.array(img)
###normalising the bands individually###
Rn,Gn,Bn = (arr[:,:,0]/255),(arr[:,:,1]/255),(arr[:,:,2]/255)
###calculating the Intensity based on HSI model####
intensity_arr = (Rn+Gn+Bn)/3
#### taking average of the intensity array based on number of pixels in the intensity array ##
intensity_value = np.sum(intensity_arr)/(intensity_arr.shape[0]*intensity_arr.shape[1])
return intensity_value
def pixel_percentage_algorithm(self):
"""
description : this function calculates the percentage of darker pixels,
more the darker pixels tends to darker intensity in the image.
input params : the image path
return : percentage of number of pixels
"""
### Reading the images ####
img = Image.open(self.img_path)
###converting to numpy array###
arr = np.array(img)
### Calculating the number of pixels in the range 0--40, pixels in this range refer to darker intensity ###
num_darker_pixels = np.sum(np.unique(arr,return_counts=True)[1][0:40])
###Calculating the percentage ####
percentage_darker_pixels = (num_darker_pixels)/(arr.shape[0]*arr.shape[1]*arr.shape[2])
##### Rounding the percentage value #####
percentage_darker_pixels = round(percentage_darker_pixels,2)
return percentage_darker_pixels
|
shivargha98/shivargha_bandopadhyay
|
predict_day_night.py
|
predict_day_night.py
|
py
| 2,888 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30669751378
|
import os
import cv2
dir = "/Users/sunxiaofei/PycharmProjects/remote-server-projects/unlabeled_dataset/data"
for i, eachVid in enumerate(os.listdir(dir)):
vPath = os.path.join(dir, eachVid)
vname = vPath.split("/")[-1][:-4]
print(vname)
print(vPath)
vidcap = cv2.VideoCapture(vPath)
success,image = vidcap.read()
count = 0
valid_count = 0
save_path = "./pic_data/"+vname
if not os.path.exists(save_path):
os.makedirs(save_path)
while success:
if count%40==0:
valid_count += 1
cv2.imwrite("./pic_data/"+vname+"/"+str(valid_count)+".jpg", image) # save frame as JPEG file
success,image = vidcap.read()
print('Read a new frame: ', success)
count += 1
|
sxfduter/python_utils
|
video_frame_extraction.py
|
video_frame_extraction.py
|
py
| 709 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73510591867
|
class Solution:
def numSmallerByFrequency(self, queries: List[str], words: List[str]) -> List[int]:
countWord = []
countQuery = []
ans = []
for word in words:
w = sorted(word)
countWord.append(w.count(w[0]))
countWord.sort()
for query in queries:
q = sorted(query)
countQuery.append(q.count(q[0]))
for query in countQuery:
low = 0
high = len(countWord)
while low < high:
mid = low + (high - low) // 2
if query < countWord[mid]:
high = mid
else:
low = mid + 1
ans.append(len(words) - low)
return ans
|
yonaSisay/a2sv-competitive-programming
|
compare-strings-by-frequency-of-the-smallest-character.py
|
compare-strings-by-frequency-of-the-smallest-character.py
|
py
| 771 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22002934531
|
"""
Interfaces for Deep Q-Network.
"""
import random
import numpy as np
import tensorflow as tf
from collections import deque
from scipy.misc import imresize
from qnet import QNet
class DeepQLearner(object):
"""
Provides wrapper around TensorFlow for Deep Q-Network.
"""
def __init__(self,
actions,
weight_save_path,
weight_restore_path,
log_path,
weight_save_frequency,
update_frequency,
log_frequency,
batch_size,
learning_rate,
burn_in_duration,
exploration_duration,
exploration_end_rate,
replay_max_size,
discount_rate,
action_repeat,
state_frames,
frame_height,
frame_width,
dueling,
pooling,
training):
"""
Intializes the TensorFlow graph.
Args:
actions: List of viable actions learner can make. (Must be PyGame constants.)
checkpoint_path: File path to store saved weights.
save: If true, will save weights regularly.
restore: If true, will restore weights right away from checkpoint_path.
"""
# Save allowed actions.
self.actions = actions
# Handle network save/restore.
self.weight_save_path = weight_save_path
self.weight_restore_path = weight_restore_path
self.log_path = log_path
# Save training parameters.
self.weight_save_frequency = weight_save_frequency
self.update_frequency = update_frequency
self.log_frequency = log_frequency
self.batch_size = batch_size
self.learning_rate = learning_rate
self.burn_in_duration = burn_in_duration
self.exploration_duration = exploration_duration
self.exploration_rate = 1.
self.exploration_end_rate = exploration_end_rate
self.replay_max_size = replay_max_size
self.discount_rate = discount_rate
self.action_repeat = action_repeat
self.state_frames = state_frames
self.frame_height = frame_height
self.frame_width = frame_width
self.dueling = dueling
self.pooling = pooling
self.training = training
# Initialize variables.
self.iteration = -1
self.actions_taken = 0
self.repeating_action_rewards = 0
self.update_count = 0
# Create network.
self.net = QNet(self.state_frames,
self.frame_height,
self.frame_width,
len(actions),
self.learning_rate)
# Restore weights if needed.
if self.weight_restore_path:
try: self.__restore()
except: pass
if self.log_path:
open(self.log_path, 'w').close()
# Store all previous transitions in a deque to allow for efficient
# popping from the front and to allow for size management.
#
# Transitions are dictionaries of the following form:
# {
# 'state_in': The Q-network input state of this instance.
# 'action': The action index (indices) taken at this frame.
# 'reward': The reward from this action.
# 'terminal': True if the action led to a terminal state.
# 'state_out': The state resulting from the transition action and initial state.
# }
self.transitions = deque(maxlen=self.replay_max_size)
def __normalize_frame(self, frame):
"""
Normalizes the screen array to be 84x84x1, with floating point values in
the range [0, 1].
Args:
frame: The pixel values from the screen.
Returns:
An 84x84x1 floating point numpy array.
"""
return np.reshape(
np.mean(imresize(frame, (self.frame_height, self.frame_width)), axis=2),
(self.frame_height, self.frame_width, 1))
def __preprocess(self, frame):
"""
Resize image, pool across color channels, and normalize pixels.
Args:
frame: The frame to process.
Returns:
The preprocessed frame.
"""
proc_frame = self.__normalize_frame(frame)
if not len(self.transitions):
return np.repeat(proc_frame, self.state_frames, axis=2)
else:
return np.concatenate(
(proc_frame, self.transitions[-1]['state_in'][:, :, -(self.state_frames-1):]),
axis=2)
def __remember_transition(self, pre_frame, action, terminal):
"""
Returns the transition dictionary for the given data. Defer recording the
reward and resulting state until they are observed.
Args:
pre_frame: The frame at the current time.
action: The index of the action(s) taken at current time.
terminal: True if the action at current time led to episode termination.
"""
self.transitions.append({
'state_in': pre_frame,
'action': self.actions.index(action),
'terminal': terminal})
def __observe_result(self, resulting_state, reward):
"""
Records the resulting state and reward from the previous action.
Args:
resulting_state: The (preprocessed) state resulting from the previous action.
reward: The reward from the previous transition.
"""
if not len(self.transitions):
return
self.transitions[-1]['reward'] = reward
self.transitions[-1]['state_out'] = resulting_state
def __is_burning_in(self):
"""
Returns true if the network is still burning in (observing transitions)."""
return self.iteration < self.burn_in_duration
def __do_explore(self):
"""
Returns true if a random action should be taken, false otherwise.
Decays the exploration rate if the final exploration frame has not been reached.
"""
if not self.__is_burning_in() and self.exploration_rate > self.exploration_end_rate:
self.exploration_rate = max(self.exploration_end_rate, (self.exploration_duration - self.update_count) / (self.exploration_duration))
return random.random() < self.exploration_rate or self.__is_burning_in()
def __best_action(self, frame):
"""
Returns the best action to perform.
Args:
frame: The current (preprocessed) frame.
"""
return self.actions[np.argmax(self.net.compute_q(frame))]
def __random_action(self):
"""
Returns a random action to perform.
"""
return self.actions[int(random.random() * len(self.actions))]
def __compute_target_reward(self, trans):
"""
Computes the target reward for the given transition.
Args:
trans: The transition for which to compute the target reward.
Returns:
The target reward.
"""
target_reward = trans['reward']
if not trans['terminal']:
target_reward += self.discount_rate * np.amax(self.net.compute_q(trans['state_out']))
return target_reward
def step(self,
frame,
reward,
terminal,
score_ratio=None):
"""
Steps the training algorithm given the current frame and previous reward.
Assumes that the reward is a consequence of the previous action.
Args:
frame: Current game frame.
reward: Reward value from previous action.
terminal: True if the previous action was termnial.
Returns:
The next action to perform.
"""
self.iteration += 1
# Log if necessary.
if self.iteration % self.log_frequency == 0:
self.__log_status(score_ratio)
# Repeat previous action for some number of iterations.
# If we ARE repeating an action, we pretend that we did not see
# this frame and just keep doing what we're doing.
if self.iteration % self.action_repeat != 0:
self.repeating_action_rewards += reward
return [self.transitions[-1]['action']]
# Observe the previous reward.
proc_frame = self.__preprocess(frame)
self.__observe_result(proc_frame, self.repeating_action_rewards)
if self.training:
# Save network if necessary before updating.
if self.weight_save_path and self.iteration % self.weight_save_frequency == 0 and self.iteration > 0:
self.__save()
# If not burning in, update the network.
if not self.__is_burning_in() and self.actions_taken % self.update_frequency == 0:
self.update_count += 1
# Update network from the previous action.
minibatch = random.sample(self.transitions, self.batch_size)
batch_frames = [trans['state_in'] for trans in minibatch]
batch_actions = [trans['action'] for trans in minibatch]
batch_targets = [self.__compute_target_reward(trans) for trans in minibatch]
self.net.update(batch_frames, batch_actions, batch_targets)
# Select the next action.
action = self.__random_action() if self.__do_explore() else self.__best_action(proc_frame)
self.actions_taken += 1
# Remember the action and the input frames, reward to be observed later.
self.__remember_transition(proc_frame, action, terminal)
# Reset rewards counter for each group of 4 frames.
self.repeating_action_rewards = 0
return [action]
def __log_status(self, score_ratio=None):
"""
Print the current status of the Q-DQN.
Args:
score_ratio: Score ratio given by the PyGamePlayer.
"""
print(' Iteration : %d' % self.iteration)
if self.update_count > 0:
print(' Update count : %d' % self.update_count)
if self.__is_burning_in() or len(self.transitions) < self.replay_max_size:
print(' Replay capacity : %d' % len(self.transitions))
if self.exploration_rate > self.exploration_end_rate and not self.__is_burning_in():
print(' Exploration rate: %0.20f' % self.exploration_rate)
# If we're using the network, print a sample of the output.
if not self.__is_burning_in():
print(' Sample Q output :', self.net.compute_q(self.transitions[-1]['state_in']))
if score_ratio:
print(' Score ratio : %0.20f' % score_ratio)
print('==============================================================================')
# Write to log file.
open(self.log_path, "a").write(str(score_ratio) + '\n')
def __save(self):
"""
Save the current network parameters in the checkpoint path.
"""
self.net.saver.save(self.net.sess, self.weight_save_path, global_step=self.iteration)
def __restore(self):
"""
Restore the network from the checkpoint path.
"""
if not os.path.exists(self.weight_restore_path):
raise Exception('No such checkpoint path %s!' % self.weight_restore_path)
# Get path to weights.
path = tf.train.get_checkpoint_state(self.weight_restore_path).model_checkpoint_path
# Restore iteration number.
self.iteration = int(path[(path.rfind('-')+1):]) - 1
# Restore exploration rate.
self.exploration_rate = max(self.exploration_end_rate, (self.exploration_duration - self.iteration / self.update_frequency / self.action_repeat) / (self.exploration_duration))
# Restore network weights.
self.net.saver.restore(self.net.sess, path)
print("Network weights, exploration rate, and iteration number restored!")
|
TianyiWu96/DQN
|
src/qlearn.py
|
qlearn.py
|
py
| 11,988 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18100363274
|
"""
1143. Longest Common Subsequence
https://leetcode.com/problems/longest-common-subsequence/
"""
from typing import Dict, List, Tuple
from unittest import TestCase, main
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
"""
This is a classic DP problem
text1 = "abcde", text2 = "ace"
1. Since the 1st chars are the same (= "a"), we can break it down to a subproblem.
-> text1 = "bcde", text2 = "ce" + 1
2. The next 2 chars aren't the same ("b", "c"), let's divide it into 2 subproblem.
-> text1 = "cde", text2 = "ce" + 1, or
-> text1 = "bcde", text2 = "c" + 1
2-1. The next 2 chars are the same ("c")
-> text1 = "de", text2 = "e" + 1 + 1
3. The next 2 chars are not ("d", "e"), so let's divide it in to 2 subproblem.
-> text1 = "de", text2 = "" + 1 + 1, or
-> text1 = "e", text2 = "e" + 1 + 1
3-2. The next 2 chars are the same ("e")
-> 1 + 1 + 1
4. With this approach we can start from the last to fist indexes, using DP.
"""
m, n = len(text1), len(text2)
dp = [[0] * (n + 1) for _ in range(m + 1)] # Need extra column and row
for i in range(m - 1, -1, -1): # start from the 2nd last character
for j in range(n - 1, -1, -1): # start from the 2nd last character
if text1[i] == text2[j]:
dp[i][j] = dp[i + 1][j + 1] + 1
else:
dp[i][j] = max(dp[i][j + 1], dp[i + 1][j])
return dp[0][0]
class Test(TestCase):
data_set: List[Tuple[str, str, int]] = [
("abcde", "ace", 3),
("abc", "abc", 3),
("abc", "def", 0),
]
def test_solution(self):
for a, b, expected in self.data_set:
s = Solution()
self.assertEqual(s.longestCommonSubsequence(a, b), expected)
if __name__ == "__main__":
main()
|
hirotake111/leetcode_diary
|
leetcode/1143/solution.py
|
solution.py
|
py
| 1,955 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29099504157
|
import xml.etree.ElementTree as ET
from datetime import date
from pathlib import Path
def _convert_dict(temp_dict):
"""
Convert one dict to a new one
:param temp_dict: A temporary dict
:type temp_dict: dict
:return: The same dict in a new formate that fits with the database
:rtype: dict
"""
data_dict = {}
for transferee in temp_dict:
counter = 0
for transferee_counter in temp_dict[transferee]["transferees"]:
temp_name = f"{transferee}_{counter}"
data_dict[temp_name] = {}
# for info in data_dict[transferee]["transferees"][transferee_counter]:
data_dict[temp_name]["SourceBarcode"] = temp_dict[transferee]["source"]
data_dict[temp_name]["SourceWell"] = temp_dict[transferee]["transferees"][transferee_counter]["source_well"]
data_dict[temp_name]["DestinationBarcode"] = temp_dict[transferee]["destination"]
data_dict[temp_name]["DestinationWell"] = temp_dict[transferee]["transferees"][transferee_counter]["destination_well"]
data_dict[temp_name]["Volume"] = temp_dict[transferee]["transferees"][transferee_counter]["transferee_volume"]
counter += 1
return data_dict
def _get_transferee_dict(file_list):
"""
Translate XML file_lise in to two dict
:param file_list: A list of files
:type file_list: list
:return:
- transferee: what have been transfereed
- destination_plates: What plate the transferee have gone to
:rtype:
- dict
- dict
"""
transferee = {}
destination_plates = {}
# source_plate = {}
for i in file_list:
doc = ET.parse(i)
root = doc.getroot()
for dates in root.iter("transfer"):
date_running = dates.get("date")
date_str = f"plate_production_{date_running}"
transferee[date_str] = {}
# finds barcode for source and destination
for plates in root.iter("plate"):
source_destination = plates.get("type")
barcode = plates.get("barcode")
transferee[date_str][source_destination] = barcode
# if plates.get("type") == "source":
# source_plate[barcode] = {}
# source_plate[barcode]["SourceBarcode"] = barcode
# source_plate[barcode]["date"] = date.today()
if plates.get("type") == "destination":
destination_plates[barcode] = {}
destination_plates[barcode]["DestinationBarcode"] = barcode
destination_plates[barcode]["date"] = date.today()
# find source, destination and volume for each transferee
for wells_t in root.iter("printmap"):
wells_transferee = int(wells_t.get("total"))
transferee[date_str]["transferees"] = {}
for counter in range(wells_transferee):
temp_str = f"Transferee_{counter + 1}"
transferee[date_str]["transferees"][temp_str] = {}
wells_source = wells_t[counter].get("n")
wells_destination = wells_t[counter].get("dn")
transferee_volume = float(wells_t[counter].get("vt")) * 10e-6
transferee[date_str]["transferees"][temp_str]["source_well"] = wells_source
transferee[date_str]["transferees"][temp_str]["destination_well"] = wells_destination
transferee[date_str]["transferees"][temp_str]["transferee_volume"] = transferee_volume
# find source, destination and reason for each skipped well
for wells in root.iter("skippedwells"):
wells_skipped = int(wells.get("total"))
transferee[date_str]["Skipped"] = {}
# finds destination and source wells data
for z in range(wells_skipped):
temp_str = f"Skipped_{z + 1}"
transferee[date_str]["Skipped"][temp_str] = {}
wells_destination = wells[z].get("dn")
wells_source = wells[z].get("n")
reason = wells[z].get("reason")
transferee[date_str]["Skipped"][temp_str]["source_well"] = wells_source
transferee[date_str]["Skipped"][temp_str]["destination_well"] = wells_destination
transferee[date_str]["Skipped"][temp_str]["reason"] = reason
return transferee, destination_plates
def xml_controller(file_list):
"""
Controls the XML reader
:param file_list: List of files with XML data
:type file_list: list
:return:
- transferee: what have been transfereed
- destination_plates: What plate the transferee have gone to
:rtype:
- dict
- dict
"""
transferee_dict, destination_plates = _get_transferee_dict(file_list)
data_dict = _convert_dict(transferee_dict)
return data_dict, destination_plates
def convert_echo_to_db(files):
echo_to_db = {}
transfer_counter = 0
for file_index, files in enumerate(files):
files = Path(files)
if files.name.startswith("Transfer"):
doc = ET.parse(files)
root = doc.getroot()
# for counting plates and transferees
for plates in root.iter("plate"):
barcode = plates.get("barcode")
source_destination = plates.get("type")
if source_destination == "destination":
temp_d_barcode = barcode
if source_destination == "source":
temp_s_barcode = barcode
try:
echo_to_db[temp_d_barcode]
except KeyError:
echo_to_db[temp_d_barcode] = {"skipped_wells": {},
"transferred_wells": {}}
for wells in root.iter("printmap"):
wells_transferred = wells.get("total")
if int(wells_transferred) != 0:
for z in range(int(wells_transferred)):
destination_well = wells[z].get("dn")
source_well = wells[z].get("n")
vol = wells[z].get("vt")
echo_to_db[temp_d_barcode]["transferred_wells"][destination_well] = {
"mp_source_plate": temp_s_barcode,
"mp_source_well": source_well,
"vol": vol}
for wells in root.iter("skippedwells"):
wells_skipped = wells.get("total")
if int(wells_skipped) != 0:
transfer_counter += 1
for z in range(int(wells_skipped)):
destination_well = wells[z].get("dn")
source_well = wells[z].get("n")
reason = wells[z].get("reason")
reason = reason.split(":")[0]
echo_to_db[temp_d_barcode]["skipped_wells"][destination_well] = {
"mp_source_plate": temp_s_barcode,
"mp_source_well": source_well,
"reason": reason}
return echo_to_db
if __name__ == "__main__":
path = "2022-03-03"
from file_handler import get_file_list
file_list = get_file_list(path)
data, test = xml_controller(file_list)
print(data)
|
ZexiDilling/structure_search
|
xml_handler.py
|
xml_handler.py
|
py
| 7,386 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17044150914
|
# ESCOLHA DA PALAVRA #
import random
palavrasaleatorias = ['GIRAFA', 'GATO', 'ESMALTE', 'MIOJO', 'MORANGO', 'CHOCOLATE', 'VERDE', 'CINZA', 'PYTHON', 'ABELHA', 'PUCPR', 'ALEGRIA','ESTUDAR', 'PROGRAMA', 'PIMENTA']
# escolha de maneira randômica
escolhida = palavrasaleatorias[random.randint(0,14)]
# conta o número de letras da palavra escolhida, que será usada para preencher o vetor
numeroletras = len(escolhida)
palavrasecreta = [0] * numeroletras
# preenche o vetor, letra a letra
i = 0
while i < numeroletras:
for char in escolhida:
palavrasecreta[i] = char
i += 1
# INÍCIO JOGO #
tentativa = numeroletras + 5
palpite = 'A'
palavra = ["_"] * numeroletras # o que é impresso à medida dos acertos
print("₊˚ʚᗢ₊˚✧゚。JOGO DA FORCA ₊˚ʚᗢ₊˚✧゚。\n")
print(" ".join(palavra))
# repete enquanto ainda há tentativas restantes e a palavra ainda não foi descoberta
while (tentativa > 0) and (palavrasecreta != palavra):
print(f'\n\nTentativas restantes: {tentativa}')
palpite = input("\nDigite uma letra: ")
palpite = palpite.upper() # para o jogo funcionar tanto com letras minúsculas, quanto maiúsculas
# checa se a letra inserida está presente na palavra
for posicao in range(numeroletras):
if palpite == palavrasecreta[posicao]:
palavra[posicao] = palpite
# ao final do palpite, imprime a palavra atualizada:
# se há a ocorrência da letra, ela preenche as posições. caso contrário, continua vazia
print(f'\n{" ".join(palavra)}')
tentativa -= 1
# caso o jogador perca, ou seja, a palavra não foi adivinhada e o número de tentativas acabou
if (tentativa <= 0) and (palavrasecreta != palavra):
print("\n\n₊˚ʚᗢ₊˚✧゚。GAME OVER ₊˚ʚᗢ₊˚✧゚。\n")
print(f'Puxa! Você perdeu :P\nA palavra era: {" ".join(palavrasecreta)}')
# quando o jogador ganha, acertou a palavra dentro do número de tentativas!
elif palavrasecreta == palavra:
print("\n\n₊˚ʚᗢ₊˚✧゚。VENCEDOR ₊˚ʚᗢ₊˚✧゚。\n")
print("Oba! Parabéns! Você acertou a palavra! :D\n\n")
|
micheleotta/Jogo-da-forca
|
forca.py
|
forca.py
|
py
| 2,079 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
36637137136
|
import tkinter as tk
from tkinter import ttk
from tkinter import *
import numpy as np
from PIL import ImageTk, Image
from os import listdir
from os.path import isfile, join
from PIL.Image import Resampling
from hopfield_clouds import HopfieldClouds
# root.columnconfigure(0, weight=1)
# root.columnconfigure(1, weight=3)
class GUI:
def __init__(self):
self.picture_size = 420
self.network = HopfieldClouds(130 ** 2)
self.root = tk.Tk()
self.root.geometry('1280x500')
self.root.title('Hopfield Clouds')
self.next_button = ttk.Button(self.root, text='>', command=self.next_image)
self.next_button.grid(row=1, column=0, sticky=tk.E)
self.prev_button = ttk.Button(self.root, text='<', command=self.prev_image)
self.prev_button.grid(row=1, column=0, sticky=tk.W)
self.original_img = self.network.get_current_image()
self.original_img = self.original_img.resize((self.picture_size, self.picture_size), Resampling.LANCZOS)
self.img_tk = ImageTk.PhotoImage(self.original_img)
original_frame = Frame(self.root, width=self.picture_size, height=self.picture_size)
original_frame.grid(row=0, columnspan=1, sticky='we')
self.original_image_label = Label(original_frame, image=self.img_tk)
self.original_image_label.grid(row=1, column=0)
self.cropped_img = Image.fromarray(np.uint8(np.zeros((self.picture_size, self.picture_size, 3))))
self.cropped_img = ImageTk.PhotoImage(self.cropped_img)
self.cropped_frame = Frame(self.root, width=self.picture_size, height=self.picture_size)
self.cropped_frame.grid(row=0, column=1, sticky='we')
self.cropped_image_label = Label(self.cropped_frame, image=self.cropped_img)
self.cropped_image_label.grid(row=1, column=1)
self.current_value = tk.DoubleVar()
# slider
self.slider = ttk.Scale(self.root, from_=1, to=99, orient='horizontal', command=self.slider_changed,
variable=self.current_value)
self.slider.set(50)
self.slider.bind('<ButtonRelease-1>', self.slider_up)
self.slider_label = Label(self.root, text='Percentage to crop:')
self.slider_label.grid(row=1, column=1, columnspan=1, sticky='we')
self.slider.grid(column=1, columnspan=1, row=2, sticky='we')
self.value_label = ttk.Label(self.root, text=self.get_current_value())
self.value_label.grid(row=3, column=1, columnspan=1, sticky='n')
self.reconstructed_img = Image.fromarray(np.uint8(np.zeros((self.picture_size, self.picture_size, 3))))
self.reconstructed_img = ImageTk.PhotoImage(self.reconstructed_img)
self.reconstructed_frame = Frame(self.root, width=self.picture_size, height=self.picture_size)
self.reconstructed_frame.grid(row=0, column=2, columnspan=1, sticky='n')
self.reconstructed_image_label = Label(self.reconstructed_frame, image=self.reconstructed_img)
self.reconstructed_image_label.grid(row=1, column=2, columnspan=1)
self.reconstruct_button = ttk.Button(self.root, text='Reconstruct', command=self.reconstruct)
self.reconstruct_button.grid(row=1, column=2, sticky='n')
self.slider_up(None)
self.root.mainloop()
def slider_changed(self, event):
self.value_label.configure(text=self.get_current_value())
def get_current_value(self):
return '{: .2f}'.format(self.current_value.get())
def next_image(self):
img = self.network.next_image()
self.original_img = img.resize((self.picture_size, self.picture_size), Resampling.LANCZOS)
self.img_tk = ImageTk.PhotoImage(self.original_img)
self.original_image_label.configure(image=self.img_tk)
self.slider_up(None)
def prev_image(self):
img = self.network.prev_image()
self.original_img = img.resize((self.picture_size,self.picture_size), Resampling.LANCZOS)
self.img_tk = ImageTk.PhotoImage(self.original_img)
self.original_image_label.configure(image=self.img_tk)
self.slider_up(None)
def reconstruct(self):
cropped, reconstructed = self.network.get_current_image_predictions(int(self.current_value.get()))
self.reconstructed_img = reconstructed
self.reconstructed_img = self.reconstructed_img.resize((self.picture_size, self.picture_size), Resampling.LANCZOS)
self.reconstructed_img = ImageTk.PhotoImage(self.reconstructed_img)
self.reconstructed_image_label.configure(image=self.reconstructed_img)
def slider_up(self, event):
cropped = self.network.get_current_cropped(int(self.current_value.get()))
self.cropped_img = cropped
self.cropped_img = self.cropped_img.resize((self.picture_size, self.picture_size), Resampling.LANCZOS)
self.cropped_img = ImageTk.PhotoImage(self.cropped_img)
self.cropped_image_label.configure(image=self.cropped_img)
gui = GUI()
|
behenate/hopfield-reconstruction
|
gui.py
|
gui.py
|
py
| 5,007 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15512669243
|
import pygame #Impordime pygame'i
#Defineerime funktsiooni, mis joonistab ruudustiku
def draw_grid(screen, ruudu_suurus, read, veerud, joone_värv):
for i in range(read): #Esimene tsükel, mis käib läbi kõik read
for j in range(veerud): #Teine tsükel, mis käib läbi kõik veerud
rect = pygame.Rect(j * ruudu_suurus, i * ruudu_suurus, ruudu_suurus, ruudu_suurus) #Loon rect objekti (x-koordinaat, y-koordinaat, laius ja kõrgus)
pygame.draw.rect(screen, joone_värv, rect, 1) #Joonistab kasti (ekraani väärtus, joone värv, kast ja joone laius)
# Loome Pygame'i ekraani
pygame.init() #Algatan pygame'i
screen = pygame.display.set_mode((640, 480)) #Määrab akna suuruse
pygame.display.set_caption("Ruudustik") #Määrab praeguse akna pealkirja
# Määrame parameetrid
ruudu_suurus = 20 #Määrab ruudu suuruse
read = 24 #Määrab ridade arvu
veerud = 32 #Määrab veergude arvu
joone_värv = (255, 0, 0) #Määrab joone värvi
#Ristist sulgemine
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
#Joonistame ekraani täis ruute
screen.fill((150, 255, 150)) #Roheline värv taustaks
draw_grid(screen, ruudu_suurus, read, veerud, joone_värv) #Joonistab ekraanile ruudustiku
pygame.display.update() #Uuendab ekranni
#Lõpetame Pygame'i
pygame.quit()
|
KermoV/Ulesanne_3
|
Ülesanne_3.py
|
Ülesanne_3.py
|
py
| 1,403 |
python
|
et
|
code
| 0 |
github-code
|
6
|
27679859460
|
# Things to show
# Name, Orbital Radius, Gravity, Mass, Distance, Planet Type, Goldilock, Discovery Date, Mass of hoststar
from flask import Flask, jsonify, make_response
from pandas import read_csv
app = Flask(__name__)
data = read_csv("csv/display.csv")
@app.get("/")
def index():
to_send = []
i = 1
while True:
res = get_data(i)
if res[0] == False:
break
to_send.append(res[1])
i += 1
return cors(jsonify(to_send))
@app.route("/home")
def get_home():
to_send = []
columns = data.columns[1:]
columns_to_share = ["name", "planet_type"]
i = 0
while True:
try:
planet_data = {}
response = data.iloc[i].to_json(orient='records')[1:-1].split(",")[1:]
for j, item in enumerate(response):
if columns[j] in columns_to_share:
planet_data.update({ columns[j]: item.replace("\"", "").replace("\"", "") })
planet_data.update({ "index": i })
to_send.append(planet_data)
except:
break
i += 1
return to_send
@app.route("/get/<int:i>")
def get_data_end_point(i):
return cors(jsonify(get_data(i)))
def get_data(i):
try:
to_send = {}
columns = data.columns[1:]
response = data.iloc[i].to_json(orient='records')[1:-1].split(",")[1:]
for j, item in enumerate(response):
to_send.update({ columns[j]: item.replace("\"", "").replace("\"", "") })
return [True, to_send]
except:
return [False, {}]
def cors(res):
res.headers.add("Access-Control-Allow-Origin", "*")
return res
if __name__ == "__main__":
app.run()
|
CometConnect/python
|
api.py
|
api.py
|
py
| 1,549 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15598819292
|
import sys
sys.path.append('..')
import torch
from torch import nn
from torch.nn import functional as F
from ssd import config as cfg
from basenet.vgg import vgg_feat
from basenet.resnet import resnet101_feat
from ssd.utils_ssd.priorbox import PriorBox
from ssd.utils_ssd.L2Norm import L2Norm
from ssd.utils_ssd.detect import Detect
extras_vgg = {'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256]}
extras_res = {'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256]}
l_vgg = [23, 512]
l_res = [11, 512]
mbox_vgg = {'300': [(512, 4), (1024, 6), (512, 6), (256, 6), (256, 4), (256, 4)]}
mbox_res = {'300': [(512, 4), (2048, 6), (512, 6), (256, 6), (256, 4), (256, 4)]}
# extend vgg: 5 "additional" feature parts
def add_extras(i, cfg=extras_vgg, vgg=True):
fc7 = [nn.MaxPool2d(3, 1, 1), nn.Conv2d(512, 1024, 3, 1, 6, 6), nn.ReLU(inplace=True),
nn.Conv2d(1024, 1024, 1), nn.ReLU(inplace=True)] if vgg else []
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
layers += [nn.Conv2d(in_channels, cfg[k + 1],
kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
return fc7, layers
# feature map to loc+conf
def multibox(num_classes=21, cfg=mbox_vgg):
loc_layers = []
conf_layers = []
for channel, n in cfg:
loc_layers += [nn.Conv2d(channel, n * 4, 3, 1, 1)]
conf_layers += [nn.Conv2d(channel, n * num_classes, 3, 1, 1)]
return loc_layers, conf_layers
# single shot multibox detector
class SSD(nn.Module):
def __init__(self, phase, base, extras, loc, conf, num_classes, l=l_vgg):
super(SSD, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.priors = PriorBox(cfg.v2)()
self.size = 300
self.l = l[0]
self.bone = nn.ModuleList(base)
self.l2norm = L2Norm(l[1], 20)
self.extras = nn.ModuleList(extras)
self.loc, self.conf = nn.ModuleList(loc), nn.ModuleList(conf)
if phase == 'test':
self.detect = Detect(num_classes, cfg.top_k, cfg.conf_thresh, cfg.nms_thresh)
def forward(self, x):
source, loc, conf = list(), list(), list()
for k in range(self.l):
x = self.bone[k](x)
source.append(self.l2norm(x))
for k in range(self.l, len(self.bone)):
x = self.bone[k](x)
source.append(x)
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
source.append(x)
# apply multibox head to source layers
for (x, l, c) in zip(source, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if not self.priors.is_cuda and loc.is_cuda:
self.priors = self.priors.cuda()
if self.phase == 'test':
output = self.detect(
loc.view(loc.size(0), -1, 4),
F.softmax(conf.view(conf.size(0), -1, self.num_classes), dim=2),
self.priors
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
self.priors
)
return output
def build_ssd(phase, size=300, num_classes=21, bone='vgg'):
if phase != 'test' and phase != 'train':
assert "Error: Phase not recognized"
if size != 300:
assert "Error: Only SSD300 us supported"
if bone == 'vgg':
base_ = vgg_feat()
fc7_, extras_ = add_extras(1024, extras_vgg['300'])
loc_, conf_ = multibox(num_classes, mbox_vgg['300'])
l = l_vgg
elif bone == 'res101':
base_ = resnet101_feat()
fc7_, extras_ = add_extras(2048, extras_res['300'], False)
loc_, conf_ = multibox(num_classes, mbox_res['300'])
l = l_res
else:
raise IOError("only vgg or res101")
return SSD(phase, base_ + fc7_, extras_, loc_, conf_, num_classes, l)
if __name__ == '__main__':
net = build_ssd('train', bone='vgg')
img = torch.randn((1, 3, 300, 300))
out = net(img)
print(out[1])
|
AceCoooool/detection-pytorch
|
ssd/ssd300.py
|
ssd300.py
|
py
| 4,567 |
python
|
en
|
code
| 24 |
github-code
|
6
|
42896164462
|
import jax
import numpy as np
import pytest
import hilbert_sort.jax as jax_backend
import hilbert_sort.numba as np_backend
@pytest.fixture(scope="module", autouse=True)
def config_pytest():
jax.config.update("jax_enable_x64", True)
@pytest.mark.parametrize("dim_x", [2, 3, 4])
@pytest.mark.parametrize("N", [150, 250])
@pytest.mark.parametrize("seed", [0, 42, 666])
def test_random_agree(dim_x, N, seed):
np.random.seed(seed)
x = np.random.randn(N, dim_x)
np_res = np_backend.hilbert_sort(x)
jax_res = jax_backend.hilbert_sort(x)
np.testing.assert_allclose(np_res, jax_res)
@pytest.mark.parametrize("nDests", [2, 3, 4, 5])
@pytest.mark.parametrize("N", [150, 250])
@pytest.mark.parametrize("seed", [0, 42, 666])
def test_transpose_bits(nDests, N, seed):
np.random.seed(seed)
x = np.random.randint(0, 150021651, (5,))
np_res = np_backend.transpose_bits(x, nDests)
jax_res = jax_backend.transpose_bits(x, nDests)
np.testing.assert_allclose(np_res, jax_res)
@pytest.mark.parametrize("nDests", [5, 7, 12])
@pytest.mark.parametrize("N", [150, 250])
@pytest.mark.parametrize("seed", [0, 42, 666])
def test_unpack_coords(nDests, N, seed):
np.random.seed(seed)
x = np.random.randint(0, 150021651, (nDests,))
max_int = 150021651
np_res = np_backend.unpack_coords(x)
jax_res = jax_backend.unpack_coords(x, max_int)
np.testing.assert_allclose(np_res, jax_res)
def test_gray_decode():
for n in range(5, 1_000):
np_res = np_backend.gray_decode(n)
jax_res = jax_backend.gray_decode(n)
np.testing.assert_allclose(np_res, jax_res)
|
AdrienCorenflos/parallel-Hilbert
|
tests/test_agree.py
|
test_agree.py
|
py
| 1,622 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36942742650
|
class Erecord:
def __init__(self, eId, eName, eAddress, eRate, eHour):
self.id = eId
self.name = eName
self.address = eAddress
self.rate = eRate
self.hour = eHour
def __str__(self):
return "ID: " + str(self.id) + " Name: " + self.name
def calc_salary(self):
if self.hour <= 40:
grossPay = self.rate * self.hour
else:
grossPay = self.rate*40 + 1.5*self.rate*(self.hour-40)
stateTax = grossPay * 0.075
fedTax = grossPay * 0.2
netPay = grossPay - stateTax - fedTax
return netPay
|
Sir-Lance/CS1400
|
EmployeeClass.py
|
EmployeeClass.py
|
py
| 615 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7873679939
|
import numpy as np
from multiprocessing import Pool
h, w = 1080, 1920
def draw_pixel():
pixel = np.zeros(24, dtype=np.uint8)
for i in range(24):
pixel[i] = np.random.randint(0, 2)
return pixel
def draw_row(p):
row = np.zeros((24, w), dtype=np.uint8)
row[:, 0] = draw_pixel()
for j in range(1, w):
if np.random.binomial(1, p):
row[:, j] = draw_pixel()
else:
row[:, j] = row[:, j-1]
return row
def draw(p, pool_size=4, chunk_size=10):
with Pool(pool_size) as pool:
rows = pool.map(draw_row, [p]*h, chunksize=chunk_size)
imgs = np.zeros((24, h, w), dtype=np.uint8)
for i, row in enumerate(rows):
imgs[:, i, :] = row
return imgs
def draw_single_process(p):
imgs = np.zeros((24, h, w), dtype=np.uint8)
for i in range(h):
imgs[:, i, :] = draw_row(p)
return imgs
|
e841018/ERLE
|
rand_img.py
|
rand_img.py
|
py
| 888 |
python
|
en
|
code
| 2 |
github-code
|
6
|
160637604
|
import numpy as np
import pandas as pd
#Setting the recent season match
yrBefore = np.arange(1900,2023)
yrAfter = np.arange(1901,2024)
yrBefore_list = []
yrAfter_list = []
for s in yrBefore:
a = str(s)
yrBefore_list.append(a)
for j in yrAfter:
b = str(j)
yrAfter_list.append(b)
season_list = []
for f in range (len(yrBefore)):
season = yrBefore_list[f] + '/' + yrAfter_list[f]
season_list.append(season)
#Getting Table from online
df_bt = pd.read_html("https://www.soccerbase.com/teams/team.sd?team_id=2898&team2_id=376&teamTabs=h2h")
#Picking Table From Source
sdf= df_bt[2]
startingYear = sdf.columns[0]
if startingYear in season_list:
x = startingYear
else:
print ('No past record of the teams')
y = x
r = x + '.1'
n = x + '.2'
m = x + '.7'
l = x + '.8'
p = x + '.9'
new_df = sdf[sdf[r].apply(lambda x: x[4])!= '/']
new_df.drop(y, axis = 1, inplace = True)
new_df.set_index(r,inplace= True)
new_df.drop([n, m,l,p], axis = 1, inplace = True)
new_df.columns = ['Home', 'Scores', 'Away', 'Result']
new_df.index.names = ['Date']
new_df['ScoresH'] = new_df['Scores'].apply(lambda x: x[0])
new_df['ScoresA'] = new_df['Scores'].apply(lambda x: x[4])
new_df['ScoresH'] = new_df['ScoresH'].apply(lambda x: int(x))
new_df['ScoresA'] = new_df['ScoresA'].apply(lambda x: int(x))
new_df['ResultN'] = new_df['ScoresH'] - new_df['ScoresA']
new_df['Result'][new_df['ResultN']>0]=new_df['Home']
new_df['Result'][new_df['ResultN']<0]=new_df['Away']
new_df['Result'][new_df['ResultN']==0]='Draw'
new_df['Result']= new_df['Result'] + ' Wins'
Result = pd.get_dummies(new_df['Result'])
Home = pd.get_dummies(new_df['Home'])
Away = pd.get_dummies(new_df['Away'])
new_df.drop(['Home','Scores', 'Away'], axis = 1,inplace = True)
ddf= pd.concat([new_df,Result,Home,Away],axis = 1)
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report,confusion_matrix
for i in Result:
x = i
print(x.upper())
X_train, X_test, y_train, y_test = train_test_split(ddf.drop([x,'Result'],axis=1),
ddf[x], test_size=0.30,
random_state=101)
logmodel = LogisticRegression()
logmodel.fit(X_train,y_train)
predictions = logmodel.predict(X_test)
print(classification_report(y_test,predictions))
print(confusion_matrix(y_test,predictions))
|
Taofeek26/Taofeek26
|
btttt.py
|
btttt.py
|
py
| 2,577 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42134577155
|
import numpy as np
# Define the number of simulations
num_simulations = 100000
# Initialize a counter for successful outcomes
successful_outcomes = 0
for _ in range(num_simulations):
# Initialize the bag with 5 red and 3 blue balls
bag = np.array(['red', 'red', 'red', 'red', 'red', 'blue', 'blue', 'blue'])
# Draw 3 balls without replacement
drawn_balls = np.random.choice(bag, size=3, replace=False)
# Check if exactly 2 of the 3 balls are red and the first ball is red
if np.sum(drawn_balls == 'red') == 2 and drawn_balls[0] == 'red':
successful_outcomes += 1
# Calculate the probability
probability = successful_outcomes / num_simulations
print(f"The probability that exactly two of the three balls were red, with the first ball being red, is approximately: {probability:.4f}")
|
gadepall/digital-communication
|
exemplar/12/13/3/75/codes/main.py
|
main.py
|
py
| 822 |
python
|
en
|
code
| 7 |
github-code
|
6
|
71186923
|
import boto3
import uuid
import json
from jwcrypto import jwt, jwk
DDB_CLIENT = boto3.client('dynamodb')
ddb_table = "iowt-devices"
def create_new_device():
id = str(uuid.uuid4())
key = jwk.JWK(generate="oct", size=256)
key_data = json.loads(key.export())['k']
token = jwt.JWT(header={"alg": "A256KW", "enc": "A256CBC-HS512"},
claims={"device_id": id})
token.make_encrypted_token(key)
return id, key_data, token.serialize()
device_id, key, token = create_new_device()
db_item = dict()
db_item['id'] = {'S': device_id}
db_item['deviceLocation'] = {'S': "Not Set"}
db_item['deviceName'] = {'S': "Not Set"}
db_item['deviceKey'] = {'S': key}
db_item['deviceToken'] = {'S': token}
db_item['deviceStatus'] = {'S': "new"}
db_item['deviceOwner'] = {'S': "none"}
DDB_CLIENT.put_item(TableName=ddb_table,
Item=db_item)
|
wilsonc101/iowt
|
www/create_device.py
|
create_device.py
|
py
| 886 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12064414796
|
from sys import argv
def next_nb(ls, visited, iterator, element):
for i in range(len(ls[iterator])):
print(f"{visited} {ls[iterator][i]}")
if visited[ls[iterator][i] - 1] == 0:
visited[ls[iterator][i] - 1] = element
next_nb(ls, visited, ls[iterator][i] - 1, element)
if len(argv) < 2:
print("Podaj input")
exit()
ls = []
visited = []
vertices = 0
# read graph from file
with open(argv[1]) as f:
line = (str(f.readline()).rstrip())
if line != "LS":
print("Podaj liste sasiedztwa")
for line in f:
ls.append(line.rstrip().split(' '))
vertices += 1
for i in range(len(ls)):
if len(ls[i]) > 0 and ls[i][0] != '':
ls[i] = list(map(int, ls[i]))
else:
ls[i].remove('')
f.close()
# set visited list to work
for i in range(vertices):
visited.append(0)
# print(vertices)
element = 1
i = 0
for i in range(len(visited)):
if visited[i] == 0:
visited[i] = element
next_nb(ls, visited, i, element)
else:
if element in visited:
element += 1
i += 1
print(ls)
#print(visited)
|
patyen/GiIZ
|
Set2/Task3/kopia.py
|
kopia.py
|
py
| 1,145 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33613649647
|
'''
--- Day 2: Dive! ---
Now, you need to figure out how to pilot this thing.
It seems like the submarine can take a series of commands like forward 1, down 2, or up 3:
forward X increases the horizontal position by X units.
down X increases the depth by X units.
up X decreases the depth by X units.
Note that since you're on a submarine, down and up affect your depth, and so they have the opposite result of what you might expect.
The submarine seems to already have a planned course (your puzzle input). You should probably figure out where it's going. For example:
forward 5
down 5
forward 8
up 3
down 8
forward 2
Your horizontal position and depth both start at 0. The steps above would then modify them as follows:
forward 5 adds 5 to your horizontal position, a total of 5.
down 5 adds 5 to your depth, resulting in a value of 5.
forward 8 adds 8 to your horizontal position, a total of 13.
up 3 decreases your depth by 3, resulting in a value of 2.
down 8 adds 8 to your depth, resulting in a value of 10.
forward 2 adds 2 to your horizontal position, a total of 15.
After following these instructions, you would have a horizontal position of 15 and a depth of 10. (Multiplying these together produces 150.)
Calculate the horizontal position and depth you would have after following the planned course. What do you get if you multiply your final horizontal position by your final depth?
'''
from aoc.helpers import read_file_to_list
part1_test_set = read_file_to_list("/day2/day2_part1_test.txt")
part1_input_set = read_file_to_list("/day2/day2_part1.txt")
"""
Calculate final position and depth
"""
def get_final_position(data_set: list[str]):
currentState = [0, 0] # hposition, depth
for i in range(0, len(data_set)):
line = data_set[i].split()
direction = line[0]
dist = int(line[1])
if direction == 'forward':
currentState[0] += dist
elif direction == 'up':
currentState[1] -= dist
else:
currentState[1] += dist
return currentState[0] * currentState[1]
"""
Calculate final position and depth with Aim
"""
def get_final_position_with_aim(data_set: list[str]):
currentState = [0, 0, 0] # hposition, depth, aim
for i in range(0, len(data_set)):
line = data_set[i].split()
direction = line[0]
dist = int(line[1])
if direction == 'forward':
currentState[0] += dist
if(dist != 0):
currentState[1] += (currentState[2] * dist)
elif direction == 'up':
currentState[2] -= dist
else:
currentState[2] += dist
return currentState[0] * currentState[1]
assert get_final_position(part1_test_set) == 150
print("solution 1:", get_final_position(part1_input_set))
assert get_final_position_with_aim(part1_test_set) == 900
print("solution 2:", get_final_position_with_aim(part1_input_set))
|
Patbmcdonald/Advent-Of-Code-2021
|
aoc/day2/day2.py
|
day2.py
|
py
| 3,032 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39001711691
|
import csv
import MySQLdb
mydb= MySQLdb.connect(host='localhost',
user='root',
db='celebal')
cursor=mydb.cursor()
with open('dataset1.csv', 'r') as csvfile:
csv_data1 = csv.reader(csvfile, delimiter=',')
next(csv_data1)
cursor.execute("TRUNCATE TABLE data1")
for row in csv_data1:
cursor.execute("INSERT INTO data1(ID,Cities,Pincode,Office_ID) VALUES(%s,%s,%s,%s)",row)
mydb.commit()
with open('dataset2.csv','r') as csvfile2:
csv_data2 = csv.reader(csvfile2,delimiter=',')
next(csv_data2)
cursor.execute("TRUNCATE TABLE data2")
for row in csv_data2:
cursor.execute("INSERT INTO data2(ID,Office_ID,Population) VALUES(%s,%s,%s)",row)
mydb.commit()
cursor.execute("DROP TABLE new_records")
sql=("CREATE TABLE new_records AS SELECT d.ID,d.Office_ID,d.Cities,d.Pincode,dd.population from data1 d join data2 dd on d.Office_ID=dd.Office_ID;")
cursor.execute(sql)
cursor.close()
print("Done")
|
shauryaa/CelebalAssignment1
|
try.py
|
try.py
|
py
| 910 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1360530890
|
import Utils.Data as data
from Utils.Data.DatasetUtils import is_test_or_val_set, get_train_set_id_from_test_or_val_set, \
get_test_or_val_set_id_from_train
from Utils.Data.Features.Feature import Feature
from Utils.Data.Features.Generated.EnsemblingFeature.MatrixEnsembling import ItemCBFMatrixEnsembling
from Utils.Data.Features.Generated.EnsemblingFeature.XGBEnsembling import XGBEnsembling
from Utils.Data.Features.Generated.GeneratedFeature import GeneratedFeaturePickle
import pathlib as pl
import numpy as np
import pandas as pd
import hashlib
from Utils.Data.Sparse.CSR.CreatorTweetMatrix import CreatorTweetMatrix
from Utils.Data.Sparse.CSR.HashtagMatrix import HashtagMatrix
from Utils.Data.Sparse.CSR.URM import URM
class HashtagSimilarityFoldEnsembling(GeneratedFeaturePickle):
def __init__(self,
dataset_id: str,
label: str,
number_of_folds: int = 5
):
feature_name = f"hashtag_similarity_fold_ensembling_{label}"
super().__init__(feature_name, dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/similarity_ensembling/{self.feature_name}.pck.gz")
# self.csv_path = pl.Path(
# f"{Feature.ROOT_PATH}/{self.dataset_id}/similarity_ensembling/{self.feature_name}.csv.gz")
# self.number_of_folds = number_of_folds
# self.engager_features = [
# "mapped_feature_engager_id",
# "mapped_feature_tweet_id",
# f"tweet_feature_engagement_is_{label}"
# ]
# self.creator_features = [
# "mapped_feature_creator_id",
# "mapped_feature_tweet_id"
# ]
def create_feature(self):
raise Exception("This feature is created externally. See gen_hashtag_similarity...py")
# # Load the hashtag similarity
# sim = HashtagMatrix().load_similarity().tocsr()
#
# # Check if the dataset id is train or test
# if not is_test_or_val_set(self.dataset_id):
# # Compute train and test dataset ids
# train_dataset_id = self.dataset_id
#
# # Load the dataset and shuffle it
# X_train = data.Data.get_dataset(features=self.engager_features,
# dataset_id=train_dataset_id).sample(frac=1)
#
# creator_X_train = data.Data.get_dataset(features=self.creator_features,
# dataset_id=train_dataset_id)
#
# # Create the ctm 'creator tweet matrix'
# ctm = CreatorTweetMatrix(creator_X_train).get_as_urm().astype(np.uint8)
#
# # Compute the folds
# X_train_folds = np.array_split(X_train, self.number_of_folds)
#
# # Declare list of scores (of each folds)
# # used for aggregating results
# scores = []
#
# # Train multiple models with 1-fold out strategy
# for i in range(self.number_of_folds):
# # Compute the train set
# X_train = pd.concat([X_train_folds[x].copy() for x in range(self.number_of_folds) if x is not i])
# X_train.columns = [
# "mapped_feature_engager_id",
# "mapped_feature_tweet_id",
# "engagement"
# ]
#
#
# # Compute the test set
# X_test = X_train_folds[i].copy()
#
# # Generate the dataset id for this fold
# fold_dataset_id = f"{self.feature_name}_{self.dataset_id}_fold_{i}"
#
# # Load the urm
# urm = URM(X_train).get_as_urm().astype(np.uint8)
# urm = urm + ctm
#
# # Create the sub-feature
# feature = ItemCBFMatrixEnsembling(self.feature_name, fold_dataset_id, urm, sim, X_train)
#
# # Retrieve the scores
# scores.append(pd.DataFrame(feature.load_or_create()))
# print(X_test.index)
# print(scores.index)
#
# # Compute the resulting dataframe and sort the results
# result = pd.concat(scores).sort_index()
#
# # Save it as a feature
# self.save_feature(result)
#
# else:
# test_dataset_id = self.dataset_id
# train_dataset_id = get_train_set_id_from_test_or_val_set(test_dataset_id)
#
# creator_X_train = data.Data.get_dataset(features=self.creator_features,
# dataset_id=train_dataset_id)
# creator_X_test = data.Data.get_dataset(features=self.creator_features,
# dataset_id=test_dataset_id)
# creator_X = pd.concat([creator_X_train, creator_X_test])
#
# # Create the ctm 'creator tweet matrix'
# ctm = CreatorTweetMatrix(creator_X).get_as_urm().astype(np.uint8)
#
# # Load the train dataset
# X_train = data.Data.get_dataset(features=self.engager_features, dataset_id=train_dataset_id)
# X_train.columns = [
# "mapped_feature_engager_id",
# "mapped_feature_tweet_id",
# "engagement"
# ]
# # Load the urm
# urm = URM(X_train).get_as_urm().astype(np.uint8)
# urm = urm + ctm
#
# # Load the test dataset
# X_test = data.Data.get_dataset(features=self.engager_features, dataset_id=test_dataset_id)
# X_test.columns = ["user", "item", "engagement"]
#
# # Create the sub-feature
# feature = ItemCBFMatrixEnsembling(self.feature_name, self.dataset_id, urm, sim, X_test.copy())
#
# # Retrieve the scores
# result = pd.DataFrame(feature.load_or_create(), index=X_test.index)
#
# # Save it as a feature
# self.save_feature(result)
class DomainSimilarityFoldEnsembling(GeneratedFeaturePickle):
def __init__(self,
dataset_id: str,
label: str,
number_of_folds: int = 5
):
feature_name = f"domain_similarity_fold_ensembling_{label}"
super().__init__(feature_name, dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/similarity_ensembling/{self.feature_name}.pck.gz")
def create_feature(self):
raise Exception("This feature is created externally. See gen_hashtag_similarity...py")
class LinkSimilarityFoldEnsembling(GeneratedFeaturePickle):
def __init__(self,
dataset_id: str,
label: str,
number_of_folds: int = 5
):
feature_name = f"link_similarity_fold_ensembling_{label}"
super().__init__(feature_name, dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/similarity_ensembling/{self.feature_name}.pck.gz")
def create_feature(self):
raise Exception("This feature is created externally. See gen_hashtag_similarity...py")
|
MaurizioFD/recsys-challenge-2020-twitter
|
Utils/Data/Features/Generated/EnsemblingFeature/SimilarityFoldEnsembling.py
|
SimilarityFoldEnsembling.py
|
py
| 7,398 |
python
|
en
|
code
| 39 |
github-code
|
6
|
75136926587
|
import pytest
import tgalice
from dialog_manager import QuizDialogManager
@pytest.fixture
def default_dialog_manager():
return QuizDialogManager.from_yaml('texts/quiz.yaml')
def make_context(text='', prev_response=None, new_session=False):
if prev_response is not None:
user_object = prev_response.updated_user_object
else:
user_object = {}
if new_session:
metadata = {'new_session': True}
else:
metadata = {}
return tgalice.dialog_manager.Context(user_object=user_object, metadata=metadata, message_text=text)
def test_start(default_dialog_manager):
r0 = default_dialog_manager.respond(make_context(new_session=True))
assert 'Йоу!' in r0.text # substring in string
assert 'да' in r0.suggests # string in list of strings
assert 'нет' in r0.suggests # string in list of strings
def test_randomization(default_dialog_manager):
r0 = default_dialog_manager.respond(make_context(new_session=True))
r1 = default_dialog_manager.respond(make_context(text='да', prev_response=r0))
chosen_options = set()
for i in range(100):
r2 = default_dialog_manager.respond(
make_context(text='какая-то безумная хрень которая точно не матчится', prev_response=r1)
)
chosen_options.add(r2.updated_user_object['form']['sex'])
assert chosen_options == {'м', 'ж'}
|
avidale/musiquiz
|
test_scenarios.py
|
test_scenarios.py
|
py
| 1,432 |
python
|
en
|
code
| 0 |
github-code
|
6
|
811146806
|
'''Minimum characters that are to be inserted such that no three consecutive characters are same
Given a string str and the task is to modify the string such that no three consecutive characters are same.
In a single operation, any character can be inserted at any position in the string.
Find the minimum number of such operations required.
Examples:
Input : str = “aabbbcc”
Output: 1
“aabbdbcc” is the modified string.
Input: str = “geeksforgeeks”
Output: 0
'''
def getCount(s):
count = 0
i = 0
while i < len(s) - 2:
if s[i] == s[i + 1] and s[i] == s[i + 2]:
count += 1
i += 2
else:
i += 1
print(count)
return count
getCount('aabbbccc')
getCount('baaaaa')
getCount('baaabbaabbba')
getCount('baabab')
|
Saima-Chaity/Leetcode
|
Interviews/NoThreeConsecutiveCharacter.py
|
NoThreeConsecutiveCharacter.py
|
py
| 791 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2542812722
|
import os
import json
import logging
from infy_bordered_table_extractor import bordered_table_extractor
from infy_bordered_table_extractor.bordered_table_extractor import OutputFileFormat
from infy_bordered_table_extractor.providers.tesseract_data_service_provider import TesseractDataServiceProvider
from infy_bordered_table_extractor.bordered_table_extractor import LineDetectionMethod
def __create_new_instance():
if not os.path.exists("./logs"):
os.makedirs("./logs")
logging.basicConfig(
filename=("./logs" + "/app_log.log"),
format="%(asctime)s- %(levelname)s- %(message)s",
level=logging.INFO,
datefmt="%d-%b-%y %H:%M:%S",
)
logger = logging.getLogger()
TESSERACT_PATH = os.environ['TESSERACT_PATH']
provider = TesseractDataServiceProvider(TESSERACT_PATH)
# input files path
temp_folderpath = './data/temp'
img_filepath = os.path.abspath(
'./data/sample_1.png')
table_object = bordered_table_extractor.BorderedTableExtractor(
provider, provider, temp_folderpath, logger, True)
return table_object, img_filepath
def test_bordered_table_extractor_bbox_RGBLineDetect():
"""test method"""
table_object, img_filepath = __create_new_instance()
save_folder_path = os.path.abspath('./data/output')
result = table_object.extract_all_fields(
img_filepath, within_bbox=[73, 2001, 4009, 937], config_param_dict={
'output': {'path': save_folder_path,
'format': [OutputFileFormat.EXCEL]}
}
)
__pretty_print(result)
assert result['error'] is None
assert __get_summary(result) == {
'table_count': 1,
'row_count': 5,
'col_count': [4, 4, 4, 4, 4]
}
def test_bordered_table_extractor_bbox_OpenCVLineDetect():
"""test method"""
table_object, img_filepath = __create_new_instance()
result = table_object.extract_all_fields(
img_filepath, within_bbox=[73, 2001, 4009, 937],
config_param_dict={'line_detection_method': [
LineDetectionMethod.OPENCV_LINE_DETECT]})
__pretty_print(result)
assert result['error'] is None
assert __get_summary(result) == {
'table_count': 1,
'row_count': 5,
'col_count': [4, 4, 4, 4, 4]
}
def test_bordered_table_extractor_with_custom_cells():
"""test method"""
table_object, img_filepath = __create_new_instance()
result = table_object.extract_all_fields(
img_filepath, within_bbox=[73, 2001, 4009, 937],
config_param_dict={
'custom_cells': [
{'rows': [1], 'columns':[1]}, {'rows': [2], 'columns':[2]}]
}
)
__pretty_print(result)
assert result['error'] is None
assert __get_summary(result) == {
'table_count': 1,
'row_count': 2,
'col_count': [3, 3]
}
def __get_summary(api_result):
row_count = -1
col_counts = []
for table in api_result['fields']:
rows = table['table_value']
row_count = len(rows)
for row in rows:
col_counts.append(len(row))
return {
'table_count': len(api_result['fields']),
'row_count': row_count,
'col_count': col_counts
}
def __pretty_print(dictionary):
p = json.dumps(dictionary, indent=4)
print(p.replace('\"', '\''))
|
Infosys/Document-Extraction-Libraries
|
infy_bordered_table_extractor/tests/test_border_table_img.py
|
test_border_table_img.py
|
py
| 3,357 |
python
|
en
|
code
| 6 |
github-code
|
6
|
40319534507
|
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.api import \
Session
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.cls import GeneralModule
class General(GeneralModule):
CMDS = {
'set': 'set',
'search': 'get',
}
API_KEY_PATH = 'proxy.forward'
API_KEY_PATH_REQ = API_KEY_PATH
API_MOD = 'proxy'
API_CONT = 'settings'
API_CONT_REL = 'service'
API_CMD_REL = 'reconfigure'
FIELDS_CHANGE = [
'interfaces', 'port', 'port_ssl', 'transparent', 'ssl_inspection',
'ssl_inspection_sni_only', 'ssl_ca', 'ssl_exclude', 'ssl_cache_mb',
'ssl_workers', 'allow_interface_subnets', 'snmp', 'port_snmp',
'snmp_password', 'interfaces_ftp', 'port_ftp', 'transparent_ftp',
]
FIELDS_ALL = FIELDS_CHANGE
FIELDS_TRANSLATE = {
'port_ssl': 'sslbumpport',
'transparent': 'transparentMode',
'ssl_inspection': 'sslbump',
'ssl_inspection_sni_only': 'sslurlonly',
'ssl_ca': 'sslcertificate',
'ssl_exclude': 'sslnobumpsites',
'ssl_cache_mb': 'ssl_crtd_storage_max_size',
'ssl_workers': 'sslcrtd_children',
'allow_interface_subnets': 'addACLforInterfaceSubnets',
'snmp': 'snmp_enable',
'port_snmp': 'snmp_port',
'interfaces_ftp': 'ftpInterfaces',
'port_ftp': 'ftpPort',
'transparent_ftp': 'ftpTransparentMode',
}
FIELDS_TYPING = {
'bool': [
'transparent_ftp', 'snmp', 'allow_interface_subnets', 'ssl_inspection_sni_only',
'ssl_inspection', 'transparent',
],
'list': ['interfaces', 'ssl_exclude', 'interfaces_ftp'],
'int': ['port', 'port_ssl', 'ssl_cache_mb', 'ssl_workers', 'port_snmp'],
'select': ['ssl_ca'],
}
FIELDS_IGNORE = ['acl', 'icap', 'authentication']
INT_VALIDATIONS = {
'ssl_workers': {'min': 1, 'max': 32},
'ssl_cache_mb': {'min': 1, 'max': 65535},
'port': {'min': 1, 'max': 65535},
'port_ssl': {'min': 1, 'max': 65535},
'port_snmp': {'min': 1, 'max': 65535},
}
FIELDS_DIFF_EXCLUDE = ['snmp_password']
def __init__(self, module: AnsibleModule, result: dict, session: Session = None):
GeneralModule.__init__(self=self, m=module, r=result, s=session)
|
ansibleguy/collection_opnsense
|
plugins/module_utils/main/webproxy_forward.py
|
webproxy_forward.py
|
py
| 2,388 |
python
|
en
|
code
| 158 |
github-code
|
6
|
72255300348
|
from __future__ import annotations
import json
import re
from typing import TYPE_CHECKING
import asyncpg
import discord
import pandas as pd
from tweepy.asynchronous import AsyncClient
from ..helpers import add_prefix
if TYPE_CHECKING:
from bot import Bot
async def setup_cache(bot: Bot):
prefixes = await bot.pool.fetch("SELECT * FROM guild_prefixes")
for record in prefixes:
add_prefix(bot, record["guild_id"], record["prefix"])
guild_settings = await bot.pool.fetch("SELECT * FROM guild_settings")
for guild in guild_settings:
if guild["poketwo"]:
await bot.redis.sadd("poketwo_guilds", guild["guild_id"])
if guild["auto_download"]:
await bot.redis.sadd("auto_download_channels", guild["auto_download"])
if guild["auto_reactions"]:
await bot.redis.sadd("auto_reactions", guild["guild_id"])
blacklisted = await bot.pool.fetch("SELECT snowflake FROM block_list")
for snowflake in blacklisted:
await bot.redis.sadd("block_list", snowflake["snowflake"])
afk = await bot.pool.fetch("SELECT * FROM afk")
for row in afk:
await bot.redis.sadd("afk_users", row["user_id"])
covers = await bot.pool.fetch("SELECT * FROM nsfw_covers")
for row in covers:
await bot.redis.sadd("nsfw_covers", row["album_id"])
opted_out = await bot.pool.fetch("SELECT * FROM opted_out")
for row in opted_out:
for item in row["items"]:
await bot.redis.sadd(f"opted_out:{row['user_id']}", item)
user_settings = await bot.pool.fetch("SELECT * FROM user_settings")
for row in user_settings:
if row["fm_autoreact"]:
await bot.redis.sadd("fm_autoreactions", row["user_id"])
if row["mudae_pokemon"]:
await bot.redis.sadd("mudae_pokemon_reminders", row["user_id"])
async def setup_webhooks(bot: Bot):
for name, webhook in bot.config["webhooks"].items():
bot.webhooks[name] = discord.Webhook.from_url(url=webhook, session=bot.session)
for name, webhook in bot.config["avatar_webhooks"].items():
bot.avatar_webhooks[name] = discord.Webhook.from_url(
url=webhook, session=bot.session
)
for name, webhook in bot.config["image_webhooks"].items():
bot.image_webhooks[name] = discord.Webhook.from_url(
url=webhook, session=bot.session
)
for name, webhook in bot.config["icon-webhooks"].items():
bot.icon_webhooks[name] = discord.Webhook.from_url(
url=webhook, session=bot.session
)
async def setup_pokemon(bot: Bot):
url = "https://raw.githubusercontent.com/poketwo/data/master/csv/pokemon.csv"
data = pd.read_csv(url)
pokemon = [str(p).lower() for p in data["name.en"]]
for p in pokemon:
if re.search(r"[\U00002640\U0000fe0f|\U00002642\U0000fe0f]", p):
pokemon[pokemon.index(p)] = re.sub(
"[\U00002640\U0000fe0f|\U00002642\U0000fe0f]", "", p
)
if re.search(r"[\U000000e9]", p):
pokemon[pokemon.index(p)] = re.sub("[\U000000e9]", "e", p)
bot.pokemon = pokemon
async def setup_accounts(bot: Bot):
accounts = await bot.pool.fetch("SELECT * FROM accounts")
for record in accounts:
if record["osu"]:
await bot.redis.hset(f"accounts:{record['user_id']}", "osu", record["osu"])
if record["lastfm"]:
await bot.redis.hset(
f"accounts:{record['user_id']}", "lastfm", record["lastfm"]
)
if record["steam"]:
await bot.redis.hset(
f"accounts:{record['user_id']}", "steam", record["steam"]
)
if record["roblox"]:
await bot.redis.hset(
f"accounts:{record['user_id']}", "roblox", record["roblox"]
)
if record["genshin"]:
await bot.redis.hset(
f"accounts:{record['user_id']}", "genshin", record["genshin"]
)
async def create_pool(bot: Bot, connection_url: str):
def _encode_jsonb(value):
return json.dumps(value)
def _decode_jsonb(value):
return json.loads(value)
async def init(con):
await con.set_type_codec(
"jsonb",
schema="pg_catalog",
encoder=_encode_jsonb,
decoder=_decode_jsonb,
format="text",
)
connection = await asyncpg.create_pool(connection_url, init=init)
if connection is None:
raise Exception("Failed to connect to database")
bot.pool = connection
|
LeoCx1000/fish
|
src/utils/core/startup.py
|
startup.py
|
py
| 4,587 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31957026711
|
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING, Any, Union, Optional, TypedDict, final
from datetime import datetime
import attr
import ujson
from tomodachi.utils import helpers
from tomodachi.core.enums import ActionType
if TYPE_CHECKING:
from tomodachi.core.bot import Tomodachi
__all__ = ["Action", "ActionScheduler"]
class ReminderExtras(TypedDict):
content: str
class InfractionExtras(TypedDict):
target_id: int
reason: str
def convert_action_type(val: Any) -> ActionType:
if isinstance(val, ActionType):
return val
return ActionType(val)
def convert_extra(val: Any) -> Optional[dict]:
if val is None:
return None
if isinstance(val, dict):
return val
return ujson.loads(val)
@attr.s(slots=True, auto_attribs=True)
class Action:
id: Optional[int] = None
action_type: Optional[ActionType] = attr.ib(converter=convert_action_type, default=ActionType.REMINDER)
created_at: Optional[datetime] = attr.ib(factory=helpers.utcnow)
trigger_at: Optional[datetime] = attr.ib(factory=helpers.utcnow)
author_id: Optional[int] = None
guild_id: Optional[int] = None
channel_id: Optional[int] = None
message_id: Optional[int] = None
extra: Optional[Union[ReminderExtras, InfractionExtras]] = attr.ib(converter=convert_extra, default=None)
@final
class ActionScheduler:
def __init__(self, bot: Tomodachi):
self.bot = bot
self.cond = asyncio.Condition()
self.task = asyncio.create_task(self.dispatcher())
self.active: Optional[Action] = None
async def dispatcher(self):
async with self.cond:
action = self.active = await self.get_action()
if not action:
await self.cond.wait()
await self.redispatch()
now = helpers.utcnow()
if action.trigger_at >= now:
delta = (action.trigger_at - now).total_seconds()
await asyncio.sleep(delta)
await self.trigger_action(action)
await self.redispatch()
async def redispatch(self):
if not self.task.cancelled() or self.task.done():
self.task.cancel()
self.task = asyncio.create_task(self.dispatcher())
async with self.cond:
self.cond.notify_all()
async def get_action(self):
async with self.bot.db.pool.acquire() as conn:
query = """SELECT *
FROM actions
WHERE (CURRENT_TIMESTAMP + '28 days'::interval) > actions.trigger_at
ORDER BY actions.trigger_at
LIMIT 1;"""
stmt = await conn.prepare(query)
record = await stmt.fetchrow()
if not record:
return None
return Action(**record)
async def schedule(self, a: Action):
now = helpers.utcnow()
delta = (a.trigger_at - now).total_seconds()
if delta <= 60 and a.action_type is not ActionType.INFRACTION:
asyncio.create_task(self.trigger_short_action(delta, a))
return a
async with self.bot.db.pool.acquire() as conn:
await conn.set_type_codec("jsonb", encoder=ujson.dumps, decoder=ujson.loads, schema="pg_catalog")
query = """INSERT INTO actions (action_type, trigger_at, author_id, guild_id, channel_id, message_id, extra)
VALUES ($1, $2, $3, $4, $5, $6, $7)
RETURNING *;"""
stmt = await conn.prepare(query)
record = await stmt.fetchrow(
a.action_type.name,
a.trigger_at,
a.author_id,
a.guild_id,
a.channel_id,
a.message_id,
a.extra,
)
a = Action(**record)
# Once the new action created dispatcher has to be restarted
# but only if the currently active action happens later than new
if (self.active and self.active.trigger_at >= a.trigger_at) or self.active is None:
asyncio.create_task(self.redispatch())
return a
async def trigger_action(self, action: Action):
if action.action_type is ActionType.INFRACTION:
infraction = await self.bot.infractions.get_by_action(action.id)
self.bot.dispatch("expired_infraction", infraction=infraction)
else:
self.bot.dispatch("triggered_action", action=action)
await self.bot.db.pool.execute("DELETE FROM actions WHERE id = $1;", action.id)
async def trigger_short_action(self, seconds, action: Action):
await asyncio.sleep(seconds)
self.bot.dispatch("triggered_action", action=action)
|
httpolar/tomodachi
|
tomodachi/core/actions.py
|
actions.py
|
py
| 4,732 |
python
|
en
|
code
| 4 |
github-code
|
6
|
35406045180
|
import json
import os
from elasticsearch import Elasticsearch, helpers, exceptions
client = Elasticsearch(os.getenv("ELASTICSEARCH_URL"))
f = open("dump", "r")
def main():
while True:
line = f.readline()
if len(line) == 0:
break
data = json.loads(line)
yield {
'_op_type': 'index',
'_index': 'data',
'_id': data["id"],
'doc': data
}
helpers.bulk(client, main(), stats_only=True, chunk_size=2000)
|
polianax/regex
|
upload.py
|
upload.py
|
py
| 506 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3653572970
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from skimage.color import rgb2lab
from skimage.color import lab2rgb
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
import sys
# representative RGB colours for each label, for nice display
COLOUR_RGB = {
'red': (255, 0, 0),
'orange': (255, 114, 0),
'yellow': (255, 255, 0),
'green': (0, 230, 0),
'blue': (0, 0, 255),
'purple': (187, 0, 187),
'brown': (117, 60, 0),
'pink': (255, 187, 187),
'black': (0, 0, 0),
'grey': (150, 150, 150),
'white': (255, 255, 255),
}
name_to_rgb = np.vectorize(COLOUR_RGB.get, otypes=[np.uint8, np.uint8, np.uint8])
def plot_predictions(model, lum=71, resolution=256):
"""
Create a slice of LAB colour space with given luminance; predict with the model; plot the results.
"""
wid = resolution
hei = resolution
n_ticks = 5
# create a hei*wid grid of LAB colour values, with L=lum
ag = np.linspace(-100, 100, wid)
bg = np.linspace(-100, 100, hei)
aa, bb = np.meshgrid(ag, bg)
ll = lum * np.ones((hei, wid))
lab_grid = np.stack([ll, aa, bb], axis=2)
# convert to RGB for consistency with original input
X_grid = lab2rgb(lab_grid)
# predict and convert predictions to colours so we can see what's happening
y_grid = model.predict(X_grid.reshape((wid*hei, 3)))
pixels = np.stack(name_to_rgb(y_grid), axis=1) / 255
pixels = pixels.reshape((hei, wid, 3))
# plot input and predictions
plt.figure(figsize=(10, 5))
plt.suptitle('Predictions at L=%g' % (lum,))
plt.subplot(1, 2, 1)
plt.title('Inputs')
plt.xticks(np.linspace(0, wid, n_ticks), np.linspace(-100, 100, n_ticks))
plt.yticks(np.linspace(0, hei, n_ticks), np.linspace(-100, 100, n_ticks))
plt.xlabel('A')
plt.ylabel('B')
plt.imshow(X_grid.reshape((hei, wid, 3)))
plt.subplot(1, 2, 2)
plt.title('Predicted Labels')
plt.xticks(np.linspace(0, wid, n_ticks), np.linspace(-100, 100, n_ticks))
plt.yticks(np.linspace(0, hei, n_ticks), np.linspace(-100, 100, n_ticks))
plt.xlabel('A')
plt.imshow(pixels)
#to convert rgb to lab
def rgb_to_lab(X):
X = pd.DataFrame(X)
#print(X)
X = X.values.reshape(1, -1, 3)
X = rgb2lab(X)
X = X.reshape(-1,3)
return X
def main(infile):
#def main():
data = pd.read_csv(infile)
data = pd.read_csv("colour-data.csv")
#print(data)
X = data[['R', 'G', 'B']] # array with shape (n, 3). Divide by 255 so components are all 0-1.
#print(X)
X = X/255
X = X.values.tolist()
#print(X)
#https://stackoverflow.com/questions/34165731/a-column-vector-y-was-passed-when-a-1d-array-was-expected
y = data[['Label']].values.ravel() # array with shape (n,) of colour words.
#print(y)
# TODO: build model_rgb to predict y from X.
# TODO: print model_rgb's accuracy score
X_train, X_valid, y_train, y_valid = train_test_split(X, y)
model_rgb = GaussianNB()
model_rgb.fit(X_train, y_train)
y_predicted = model_rgb.predict(X_valid)
print(model_rgb.score(X_valid, y_valid))
# TODO: build model_lab to predict y from X by converting to LAB colour first.
# TODO: print model_lab's accuracy score
#We can create a pipeline model where the first step is a transformer that converts from RGB to LAB, and the second is a Gaussian classifier, exactly as before.
model_lab = make_pipeline(
FunctionTransformer(rgb_to_lab, validate = False),
GaussianNB()
)
model_lab.fit(X_train, y_train)
lab_y_predicted = model_lab.predict(X_valid)
print(model_lab.score(X_valid, y_valid))
plot_predictions(model_rgb)
plt.savefig('predictions_rgb.png')
plot_predictions(model_lab)
plt.savefig('predictions_lab.png')
if __name__ == '__main__':
main(sys.argv[1])
#main()
|
injoon2019/SFU_CMPT353
|
Exercise/e7/colour_bayes.py
|
colour_bayes.py
|
py
| 4,009 |
python
|
en
|
code
| 1 |
github-code
|
6
|
14200847696
|
import discord
import asyncio
from discord.ext import commands
class Channels(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.role_bot_id = int(self.bot.config['Zone']['role_bot_id'])
self.channel_private_id = int(self.bot.config['Zone']['channel_private_id'])
self.category_private_id = int(self.bot.config['Zone']['category_private_id'])
@commands.command()
async def create(self, ctx):
guild = ctx.guild
role_bot = guild.get_role(self.role_bot_id)
category_private = guild.get_channel(self.category_private_id)
if role_bot and category_private:
if ctx.channel.id == self.channel_private_id:
if f'room-{ctx.author.name}' in [ch.name for ch in guild.text_channels]:
e_msg = discord.Embed(title=f'チャンネルは既に作成されています')
await ctx.reply(embed=e_msg, allowed_mentions=discord.AllowedMentions.none())
else:
overwrites = {
guild.default_role: discord.PermissionOverwrite(view_channel=False),
ctx.author: discord.PermissionOverwrite(view_channel=True),
role_bot: discord.PermissionOverwrite(view_channel=True)
}
channel = await guild.create_text_channel(f'room-{ctx.author.name}',
overwrites=overwrites,
category=category_private)
s_msg = discord.Embed(title='プライベートチャンネルを作成しました', description=f'チャンネル: {channel.mention}')
await ctx.reply(embed=s_msg, allowed_mentions=discord.AllowedMentions.none())
@commands.command()
async def clean(self, ctx):
guild = ctx.guild
category_private = guild.get_channel(self.category_private_id)
if category_private:
if ctx.channel.id == self.channel_private_id:
user_channel = [ch for ch in guild.text_channels if ch.name == f'room-{ctx.author.name}']
if user_channel:
e_msg = discord.Embed(title=f'チャンネの再生成',
description="再生成する場合は`y`、キャンセルする場合は`n`を送信してください")
re_msg = await ctx.reply(embed=e_msg, allowed_mentions=discord.AllowedMentions.none())
def check(message):
if message.author == ctx.author and (message.content in ["y", "n"]):
return message.content
try:
msg = await self.bot.wait_for('message', timeout=15.0, check=check)
except asyncio.TimeoutError:
await re_msg.edit(discord.Embed(description='時間切れです'))
if msg.content == 'y':
await msg.delete()
new_channel = await user_channel[0].clone(name=f'room-{ctx.author.name}')
await user_channel[0].delete()
await re_msg.edit(embed=discord.Embed(title='再生成しました',
description=f'チャンネル: {new_channel.mention}'))
elif msg.content == 'n':
await msg.delete()
await re_msg.edit(embed=discord.Embed(description='キャンセルしました'))
else:
pass
else:
await ctx.reply(embed=discord.Embed(description="プライベートチャンネルが見つかりません"),
allowed_mentions=discord.AllowedMentions.none())
def setup(bot):
bot.add_cog(Channels(bot))
|
yutarou12/bot-zone
|
cogs/channels.py
|
channels.py
|
py
| 3,982 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40347602041
|
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def serialize(root):
queue = [root]
string = ""
while queue:
curr = queue.pop(0)
if curr is not None:
queue.append(curr.left)
queue.append(curr.right)
string += curr.val + " "
else:
string += "None "
return string[:-1] # chop off last space lol
def deserialize(s):
if s == "":
return
str_list_nodes = s.split()
node_objects = []
for x in str_list_nodes:
node_objects.append(Node(x))
for i in range(len(node_objects)):
if node_objects[i].val != "None":
node_objects[i].left = node_objects[2*i+1]
node_objects[i].right = node_objects[2*i+2]
return node_objects[0]
node = Node('root', Node('left', Node('left.left')), Node('right'))
print(deserialize("root left right left.left None None None None None").left.left.val)
assert deserialize(serialize(node)).left.left.val == 'left.left'
|
dunningkrugerkid/programming-problems
|
tree_problems.py
|
tree_problems.py
|
py
| 1,116 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23247293601
|
class Person:
def __init__(self, language):
self.language = language
def hello(self):
if self.language == "ko":
print("안녕")
elif self.language == "en":
print("hey")
def hello(language):
if language == "ko":
print("안녕")
elif language == "en":
print("hey")
jihyun = Person("ko")
jihyun.hello()
eunjae = Person("en")
eunjae.hello()
hello("en")
for (i, num) in enumerate([3,8,6,10]):
print(i, num)
|
yoonej111/translation-practice-program
|
person.py
|
person.py
|
py
| 494 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17435023939
|
# coding: utf-8
"""
Simple multithread task manager
__author_ = 'naubull2 ([email protected])'
"""
import logging
import random
import json
import time
import atexit
from queue import Queue
from threading import Thread
logger = logging.getLogger("dialog-tool")
class Worker(Thread):
"""
Thread executing tasks from a given tasks queue
"""
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
# run as daemon thread in background
self.daemon = True
self.start()
def run(self):
while True:
func, kwargs = self.tasks.get()
try:
func(**kwargs)
except Exception as e: # pylint: disable=broad-except
logger.error(f"Evaluator Error: {str(e)}")
finally:
# Mark this task as done, whether an exception happened or not
self.tasks.task_done()
class ThreadPool(object):
"""
Pool of threads consuming tasks from a queue
- add_task()
: Worker thread runs func(**kwargs)
: busy waiting for a task
- graceful_stop()
: Wait until all running jobs are done
"""
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads):
Worker(self.tasks)
def add_task(self, handler, **kwargs):
"""
Add a task to the queue
"""
self.tasks.put((handler, kwargs))
def graceful_stop(self):
"""
Wait for completion of all the tasks in the queue
"""
self.tasks.join()
class EvaluationTaskManager(object):
"""
Class for centralized managing of new evaluation tasks
"""
def __init__(self, pool_size=5):
self.pool = ThreadPool(pool_size)
atexit.register(self.finalize)
def add_task(self, handler, **kwargs):
"""
Runs handler function with **kwargs
"""
self.pool.add_task(handler, **kwargs)
def finalize(self):
"""
Registered as 'atexit' handler
"""
logger.info("MANAGER: Waiting for all jobs to finish")
self.pool.graceful_stop() # wait until all evaluations are finished
logger.info("MANAGER: all jobs are finished")
if __name__ == "__main__":
import requests
###############################################################################
# NOTE Last task is finished the last, check that threads are gracefully joined
#
# Success in handler api1: Sup.
# Success in handler api2: Sleep tight.
# MANAGER: Waiting for all jobs to finish
# Success in handler api3: Yeah lets meet after lunch
# MANAGER: all jobs are finished
###############################################################################
task_manager = EvaluationTaskManager(pool_size=2)
def sample_handler(name, url, q):
"""make a delayed call to the given API url, print output response to the logger"""
time.sleep(random.random() * 10)
try:
ret = requests.get(url, params={"q": q}).json()
except Exception as e:
logger.error(f"Error in handler {name}: {str(e)}")
else:
logger.info(f'Success in handler {name}: {ret["output"]}')
# Supoose localhost is running a conversation API on port 8988
task_manager.add_task(
sample_handler,
name="api1",
url="http://localhost:8988/chat",
q="Hey what's up"
)
task_manager.add_task(
sample_handler,
name="api2",
url="http://localhost:8988/chat",
q="Good night"
)
task_manager.add_task(
sample_handler,
name="api3",
url="http://localhost:8988/chat",
q="We had a lunch meeting tommorow?",
)
time.sleep(10)
|
naubull2/codingtests
|
frequent_subjects/task_manager.py
|
task_manager.py
|
py
| 3,836 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73743939389
|
import os
from os import walk, getcwd
from PIL import Image
""" Class label (BDD) """
# same order with yolo format class annotation
classes = [ "bike" , "bus" , "car", "motor", "person", "rider", "traffic light", "traffic sign", "train", "truck"]
""" Inverse convert function """
def i_convert(size, box):
x = box[0]*size[0]
y = box[1]*size[1]
w = box[2]*size[0]
h = box[3]*size[1]
xmin = x - w/2
xmax = x + w/2
ymin = y - h/2
ymax = y + h/2
return (xmin, xmax, ymin, ymax)
mypath = "./labels/100k/train/" # txt file path
wd = getcwd()
txt_outfile =open('gt_bdd_train.json','w') # output json file name
txt_outfile.write("[\n")
""" Get input text file list """
txt_name_list = []
for (dirpath, dirnames, filenames) in walk(mypath):
txt_name_list.extend(filenames)
break
""" Process """
start = 0
for txt_name in txt_name_list:
""" Open input text files """
txt_path = mypath + txt_name
txt_file = open(txt_path, "r")
lines = txt_file.read().splitlines()
""" Open input image file """
img_path = txt_path.replace("labels","images")
img_path = img_path.replace("txt", "jpg")
img = Image.open(img_path)
img_size = img.size
""" Convert the YOLO format to BDD evaluation format """
for line in lines:
if(len(line) > 0):
if start != 0:
txt_outfile.write(",\n")
else :
start = 1
elems = line.split()
cls_id = int(elems[0])
x = elems[1]
y = elems[2]
w = elems[3]
h = elems[4]
box = (float(x), float(y), float(w), float(h))
xmin, xmax, ymin, ymax = i_convert(img_size, box)
txt_outfile.write("\t{\n\t\t\"name\":\"%s\",\n\t\t\"category\":\"%s\",\n\t\t\"bbox\":[%f,%f,%f,%f]\n\t}" %(os.path.splitext(txt_name)[0],classes[cls_id],xmin,ymin,xmax,ymax))
txt_outfile.write("\n]")
txt_outfile.close()
|
jwchoi384/Gaussian_YOLOv3
|
bdd_evaluation/convert_txt_to_bdd_eval_json.py
|
convert_txt_to_bdd_eval_json.py
|
py
| 2,038 |
python
|
en
|
code
| 660 |
github-code
|
6
|
35489755246
|
# file to work with polls
import db_interface
import random
num_to_part = {
0: "noun",
1: "verb",
2: "adj",
3: "adv",
4: "other"
}
class Poll:
def __init__(self, options, correct_option_id, question, is_anonymous):
self.options = options
self.correct_option_id = correct_option_id
self.question = question
self.is_anonymous = is_anonymous
def send(self, chat_id, bot):
poll_message = bot.send_poll(chat_id=chat_id,
options=self.options,
correct_option_id=self.correct_option_id,
type='quiz',
question=self.question,
is_anonymous=self.is_anonymous)
return poll_message
# generates quiz when user types "/quiz"
def generate_quiz(dict_id='B1'):
dictionary = db_interface.get_words_by_dict_id(dict_id)
highest_number = 4
if dict_id == 'ALL':
highest_number = 0
if dict_id == 'C1':
highest_number = 3
part_number = random.randint(0, highest_number)
indexes_options = random.sample(range(0, len(dictionary[num_to_part[part_number]]['word'])), 4)
answer_options = []
for x in indexes_options:
answer_options.append({
"word": dictionary[num_to_part[part_number]]['word'][x],
"trsl": dictionary[num_to_part[part_number]]['trsl'][x],
"trsc": dictionary[num_to_part[part_number]]['trsc'][x],
})
word_number = random.randint(0, 3)
return word_number, answer_options
# Creates a poll
def create_poll(dict_id='TEST_ALL'):
word_number, answer_options = generate_quiz(dict_id)
for answer in answer_options:
answer['word'] = answer['word'].capitalize()
answer['trsl'] = answer['trsl'].capitalize()
quiz_text = f"Как переводится слово: {answer_options[word_number]['word']} [{answer_options[word_number]['trsc']}]?\n"
possible_answers = [answer['trsl'] for answer in answer_options]
return Poll(possible_answers, word_number, quiz_text, True)
|
Bliznetc/tg_bot_
|
polls.py
|
polls.py
|
py
| 2,160 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18091289859
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0045_auto_20150130_0558'),
]
operations = [
migrations.AlterField(
model_name='basicmemberinformation',
name='auth_key',
field=models.CharField(default='43f9a685bc7146b4ecc63bdf9bc3e5136b7543f436a42e4a2f2ae749ffb0c6db', max_length=64),
preserve_default=True,
),
]
|
hongdangodori/slehome
|
slehome/account/migrations/0046_auto_20150130_0600.py
|
0046_auto_20150130_0600.py
|
py
| 531 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39713348458
|
from os.path import join, dirname, realpath, exists
from PIL import Image, ImageDraw, ImageFont
import numpy
import base64
from io import BytesIO
# info: image (PNG, JPG) to base64 conversion (string), learn about base64 on wikipedia https://en.wikipedia.org/wiki/Base64
def image_base64(img, img_type):
with BytesIO() as buffer:
img.save(buffer, img_type)
return base64.b64encode(buffer.getvalue()).decode()
# info: formatter preps base64 string for inclusion, ie <img src=[this return value] ... />
def image_formatter(img, img_type):
return "data:image/" + img_type + ";base64," + image_base64(img, img_type)
# text on an image
def drawFile(file, img_dict):
if exists(join(dirname(realpath(__file__)), f"static/design/drawn_images/{img_dict['file']}")):
print('file exists using drawn')
return join(dirname(realpath(__file__)), f"static/design/drawn_images/{img_dict['file']}")
else:
print('making file')
new_img = Image.open(join(dirname(realpath(__file__)), file))
d1 = ImageDraw.Draw(new_img)
font = ImageFont.truetype(join(dirname(realpath(__file__)), 'static/Roboto-MediumItalic.ttf'), 20)
d1.text((0, 0), f"{img_dict['label']}", font=font, fill=(255, 0, 0))
new_img.save(join(dirname(realpath(__file__)), f"static/design/drawn_images/{img_dict['file']}"))
drawn_file = join(dirname(realpath(__file__)), f"static/design/drawn_images/{img_dict['file']}")
return drawn_file
# info: color_data prepares a series of images for data analysis
def image_data(path="static/design/", img_list=None): # info: path of static images is defaulted
if img_list is None: # info: color_dict is defined with defaults and these are the images showing up
img_list = [
{'source': "Katie's Phone", 'label': "Katie Hickman", 'file': "katiergb.jpg"},
{'source': "Shreya's Phone", 'label': "Shreya Ahuja", 'file': "banff.jpg"},
{'source': "Derek's Phone", 'label': "Derek Bokelman", 'file': "derekrgb.jpeg"},
{'source': "Kian's Phone", 'label': "Kian Pasokhi", 'file': "kianplane2.jpg"},
]
# info: gather analysis data and meta data for each image, adding attributes to each row in table
for img_dict in img_list:
# to fix static images
img_dict['path'] = '/' + path
file = path + img_dict['file']
print(file)
img_reference = Image.open(drawFile(file, img_dict))
img_data = img_reference.getdata() # https://www.geeksforgeeks.org/python-pil-image-getdata/
img_dict['format'] = img_reference.format
img_dict['mode'] = img_reference.mode
img_dict['size'] = img_reference.size
# info: Conversion of original Image to Base64, a string format that serves HTML nicely
img_dict['base64'] = image_formatter(img_reference, img_dict['format'])
# info: Numpy is used to allow easy access to data of image, python list
img_dict['data'] = numpy.array(img_data)
img_dict['hex_array'] = []
img_dict['binary_array'] = []
img_dict['gray_data'] = []
# info: 'data' is a list of RGB data, the list is traversed and hex and binary lists are calculated and formatted
for pixel in img_dict['data']:
# hexadecimal conversions
hex_value = hex(pixel[0])[-2:] + hex(pixel[1])[-2:] + hex(pixel[2])[-2:]
hex_value = hex_value.replace("x", "0")
img_dict['hex_array'].append("#" + hex_value)
# binary conversions
bin_value = bin(pixel[0])[2:].zfill(8) + " " + bin(pixel[1])[2:].zfill(8) + " " + bin(pixel[2])[2:].zfill(8)
img_dict['binary_array'].append(bin_value)
# info: create gray scale of image, ref: https://www.geeksforgeeks.org/convert-a-numpy-array-to-an-image/
# for pixel in img_dict['data']: we changed this to a # to make it more efficient based on big O notation (deleting second loop)
average = (pixel[0] + pixel[1] + pixel[2]) // 3
if len(pixel) > 3:
img_dict['gray_data'].append((average, average, average, pixel[3]))
else:
img_dict['gray_data'].append((average, average, average))
# end for loop for pixel
img_reference.putdata(img_dict['gray_data'])
img_dict['base64_GRAY'] = image_formatter(img_reference, img_dict['format'])
# for hex and binary values
img_dict['hex_array_GRAY'] = []
img_dict['binary_array_GRAY'] = []
# for grayscale binary/hex changes
for pixel in img_dict['gray_data']:
# hexadecimal conversions
hex_value = hex(pixel[0])[-2:] + hex(pixel[1])[-2:] + hex(pixel[2])[-2:]
hex_value = hex_value.replace("x", "0")
img_dict['hex_array_GRAY'].append("#" + hex_value)
# binary conversions
bin_value = bin(pixel[0])[2:].zfill(8) + " " + bin(pixel[1])[2:].zfill(8) + " " + bin(pixel[2])[2:].zfill(8)
img_dict['binary_array_GRAY'].append(bin_value)
return img_list # list is returned with all the attributes for each image dictionary
# run this as standalone tester to see data printed in terminal
# if __name__ == "__main__":
# local_path = "./static/img/"
# img_test = [
# {'source': "iconsdb.com", 'label': "Blue square", 'file': "blue-square-16.png"},
# ]
# web = False
# items = image_data(local_path, img_test, web) # path of local run
# for row in items:
# # print some details about the image so you can validate that it looks like it is working
# # meta data
# print("---- meta data -----")
# print(row['label'])
# print(row['format'])
# print(row['mode'])
# print(row['size'])
# # data
# print("---- data -----")
# print(row['data'])
# print("---- gray data -----")
# print(row['gray_data'])
# print("---- hex of data -----")
# print(row['hex_array'])
# print("---- bin of data -----")
# print(row['binary_array'])
# # base65
# print("---- base64 -----")
# print(row['base64'])
# # display image
# print("---- render and write in image -----")
# filename = local_path + row['file']
# image_ref = Image.open(filename)
# draw = ImageDraw.Draw(image_ref)
# draw.text((0, 0), "Size is {0} X {1}".format(*row['size'])) # draw in image
# image_ref.show()
# print()
|
katiehickman/m224_seals
|
image.py
|
image.py
|
py
| 6,588 |
python
|
en
|
code
| 1 |
github-code
|
6
|
16409168230
|
import random
import time
'''
数据库,未来数据库会向实体数据库迁移。
这些将作为初始化用数据。
并给出展示数据的一类解决方案
'''
'''
================================================================tools=================================================
'''
'''
对只有str的list用
'''
def geting_str(lists):
lis = ''
for items in lists:
lis += items
return lis
'''
将str转换为float
'''
def eval_str(lists):
lis = list()
for items in lists:
lis.append(eval(items))
return lis
'''
get_all_in_lists是一个获取无限嵌套的list的内容的工具
对一个无限嵌套的list,把内容弄成一个str并返回。
'''
def get_all_in_lists(lists):
what_to_get = ''
for items in lists:
if type(items)==type(list()):
what_to_get +=get_all_in_lists(items)
else:
what_to_get +=items
return what_to_get
'''
choice_weight()是权重工具。
list1为原始数据
list2为数据权重
list3会输出一个根据权重分配的list
'''
def choice_weight(list1,list2):
times = 0
list3 = list()
for items in list1:
#print(list2[times])
if list2[times] >= 1:
for i in range(0,list2[times]):
list3.append(items)
else:
if (time.time()%10)/10 < list2[times]:
list3.append(items)
times += 1
return list3
'''
show_list 和 show_dic 是简单的展示工具
展示对象分别是简单list和只包含dict格式的dict
'''
def show_data(lis):
print(
'''====================================================================\n'''+str(lis)+'\n====================================================================\n'
)
def show_dic(dic):
for items in dic:
if type(dic[items])==type({}):
print('==================================\n' + items + '\n==================================')
show_dic(dic[items])
else:
print(items+':'+dic[items])
'''
为生物做设定
内容包含基础存在的参数和对应权重
未来会增加,每次增加都需要更改all_list前缀的list,使之包含对应内容
'''
'''
================================================================datas=================================================
'''
#种族设定
types_list = ["神", "精灵", "平民"]
types_list_weight = [0.125,0.25,2]
sex_list = ["男","女","不涉及"]
sex_list_sex= [1,2,0.005]
height_list = ["高", "低"]
height_list_weight = [0.5,2]
zu_list = ["宗族","散人"]
zu_list_weight =[0.25,2]
waimao_list =["平平无奇","丑陋不堪","姿色极佳","倾国倾城"]
waimao_list_weight = [2,0.5,1,0.5]
all_list = [tuple(types_list),tuple(sex_list),tuple(height_list),tuple(zu_list),tuple(waimao_list)]
all_list_weight = [tuple(types_list_weight),tuple(sex_list_sex),tuple(height_list_weight),tuple(zu_list_weight),tuple(waimao_list_weight)]
all_list_name = ("种族","性别","位阶","族群","外貌")
#all_list_values = ["types","sex","height","zu"]
'''
姓氏由种族和位阶决定
平民的名字另外还由性别决定
此外平民的宗族男性拥有特殊的第二字
'''
#取名设定
first_name = {"low_JL":("孑然/白马/青丘/夜宴/寒山/风月/崆峒/轩墨/虎/错/留观".split('/')),
"high_JL":("饕餮/千年/劫/麒麟/龙/乘/忘川/修罗/貔貅/沧海".split('/')),
"god":("神隐/时烬/墨染/须臾/帝/殇酒".split('/')),
"high_M":("猫/小诗/胧胧/皮皮/丘丘/杀神/楚楚".split('/')),
"low_M":("宫/鱼/方/陈/夏/黄/萧/上官/李/公输/叶/林/南方".split('/'))}
second_name_1 = {
"man_san":("知/忘/溟/斩/留//超/空/读/修/酒///莫///虚/寒/笑/乱/秋/抄/丹/青".split('/')),
"man_zu":("天/地/玄/黄/宇/宙/洪/荒/河/清/日/海/晏".split('/')),
"woman":("初/海/湘/倩/冰//溟/空/虚/清/记/怜/海/".split('/')),
"low_JL":("非/空/秋/其/小/筑/城/时/凉///".split('/')),
"high_JL":("窥/红/临/留/钟/流/晴//弥/区".split('/')),
"god":("古/释//上/枯/旧/初//".split('/'))
}
second_name = {"man_zu":('才/德/行/渺/名/记/者/客/迷/烬/期/棋/奇'.split('/')),
"man_san":("书/名/定/克/千/终/青/必/天/生/棋".split('/')),
"woman":("倩/涵/韵/晗/菡/脉/冰/琳/林/霖/琦/期/幂/宓/姬/璃".split('/')),
"low_JL":("马/非/山/名/兽/橙/诗/时/印".split('/')),
"high_JL":("天/名/江/仙/棋/觞/明/祭/耳".split('/')),
"god":("名/天/隐/舞/斩/清/枯/冬/秋/封-".split('/')),
}
#参数设定
types_of_job = {"low_JL":("巡林者/守夜人/指路人/导学者/堕仙".split('/')),
"high_JL":("神助/执法者/守剑者/守秘人/超位精灵/半神/高阶魔导/德鲁伊/鹰巡".split('/')),
"god":("守时人/引路人/告密者/驱命者/殇神/秘宗/除尘/熄灯者/语冰人".split('/')),
"high_M":("修客/.\"女王.\"/豪绅/伪绅/强盗/前旧约骑士/新约骑士/祭司".split('/')),
"low_M":("耕客/行商/村医/祝客/电工/初心者/隐者/无名/隐名".split('/'))}
types_of_needs_basic = ("尚武","崇文","贪财","好色","滥情","惜命","从善")
JL_need_weight =[3,1,0.5,5,2,3,2]
M_need_weight =[0.5,0.5,1,3,0.5,1,0.7]
god_need_weight =[0.1,0.1,0.5,1,1,0.3,1]
all_out_look = ["发色","刘海","后发","脸型","身材","腿型","脚型","气质"]
out_look_dict_man ={
"发色":"玄墨/银白/金黄/冥紫/骚粉/粉红/天蓝/朱红/碧绿/苍白".split('/'),
"刘海":"M字刘海/中分/高额头/覆眼".split('/'),
"后发":"短双马尾/长双马尾/及腰长发/长至脚踝的长发/及颈部的短发/马尾/盘头发/羊角辫/盘子头发/姬式长发/姬式短发/笔直的后发".split('/'),
"脸型":"瓜子脸/鹅蛋脸".split('/'),
"身材":"瘦弱的身姿/匀称的身材/圆润的身材".split('/'),
"腿型":"笔直的腿/纤细的腿/匀称的腿/粗壮的大腿".split('/'),
"气质":"文弱书生/大将军/大侠/仙风道骨/无情客/隐者".split('/')
}
out_look_dict_man_weight ={
"发色":"4/2/1/0.5/0.1/0.3/1.5/1/0.4/1".split('/'),
"刘海":"4/2/1/0.5".split('/'),
"后发":"0.1/0.1/2/0.3/2/0.2/1/0.01/0.3/0.2/0.1/5".split('/'),
"脸型":"3/5".split('/'),
"身材":"2/3/4".split('/'),
"腿型":"3/4/5/6".split('/'),
"气质":"2/3/2/5/1/1".split('/')
}
out_look_good_dict_man ={
"发色":"乌黑浓密的/飘逸的/落落大方的/清新脱俗的/飘逸的/干净利落的/笔直干练的/精神焕发的/飘飘若仙的/飘飘若仙的".split('/'),
"刘海":"/".split('/'),
"后发":"/".split('/'),
"脸型":"可人的/俊俏的".split('/'),
"身材":"迷人的/令人激赏的/讨人喜欢的".split('/'),
"腿型":"腿玩年的/强壮的/紧实的/完美的".split('/'),
"气质":"帅气的/俊秀的".split('/')
}
out_look_good_dict_man_weight ={
"发色":"4/2/1/0.5/0.1/0.3/1.5/1/0.4/1".split('/'),
"刘海":"4/2".split('/'),
"后发":"0.1/0.1".split('/'),
"脸型":"3/5".split('/'),
"身材":"2/3/4".split('/'),
"腿型":"3/4/5/6".split('/'),
"气质":"2/3".split('/')
}
out_look_bad_dict_man ={
"发色":"头发稀疏的/乱蓬蓬的/脏乱的/不整洁的/过时的".split('/'),
"刘海":"/".split('/'),
"后发":"/".split('/'),
"脸型":"很突出的/令人不快的".split('/'),
"身材":"不好看的/平常的/略有些畸形的".split('/'),
"腿型":"普通的/平常的/略有些罗圈的/略不等长的".split('/'),
"气质":"猥琐至极/不能直视".split('/')
}
out_look_bad_dict_man_weight ={
"发色":"4/2/1/0.5/2".split('/'),
"刘海":"4/2".split('/'),
"后发":"0.1/0.1".split('/'),
"脸型":"3/5".split('/'),
"身材":"2/3/4".split('/'),
"腿型":"3/4/5/6".split('/'),
"气质":"2/3".split('/')
}
out_look_dict_woman ={
"发色":"玄墨/银白/金黄/冥紫/骚粉/粉红/天蓝/朱红/碧绿/苍白".split('/'),
"刘海":"M字刘海/中分/高额头/覆眼".split('/'),
"后发":"短双马尾/长双马尾/及腰长发/长至脚踝的长发/及颈部的短发/留着马尾/盘头的长发/扎着羊角辫/扎着盘子头/姬式长发/姬式短发/笔直的后发".split('/'),
"脸型":"瓜子脸/鹅蛋脸".split('/'),
"身材":"瘦弱的身体/匀称的身材/圆润的身体".split('/'),
"腿型":"笔直的腿/纤细的腿/匀称的腿/粗壮的腿".split('/'),
"气质":"小家碧玉/闺中少女/大家闺秀/仙子/风尘女子/美人儿".split('/')
}
out_look_dict_woman_weight ={
"发色":"8/5/3/2/5/3/2/4/1/4".split('/'),
"刘海":"4/2/1/0.5".split('/'),
"后发":"6/4/4/3/3/5/1/7/3/6/6/5".split('/'),
"脸型":"5/3".split('/'),
"身材":"6/5/4".split('/'),
"腿型":"5/4/5/1".split('/'),
"气质":"2/3/2/5/1/1".split('/')
}
out_look_good_dict_woman ={
"发色":"乌黑浓密的/飘逸的/大方的/清新脱俗的/飘逸的/干净利落的/笔直干练的/精神焕发的/飘飘若仙的/飘飘若仙的".split('/'),
"刘海":"/".split('/'),
"后发":"/".split('/'),
"脸型":"娇美的/惹人怜爱的".split('/'),
"身材":"迷人的/令人激赏的/讨人喜欢的".split('/'),
"腿型":"腿玩年的/强壮的/紧实的/完美的".split('/'),
"气质":"倾国倾城的/如沐春风的".split('/')
}
out_look_good_dict_woman_weight ={
"发色":"4/2/1/0.5/0.1/0.3/1.5/1/0.4/1".split('/'),
"刘海":"4/2".split('/'),
"后发":"0.1/0.1".split('/'),
"脸型":"3/5".split('/'),
"身材":"2/3/4".split('/'),
"腿型":"3/4/5/6".split('/'),
"气质":"2/3".split('/')
}
out_look_bad_dict_woman ={
"发色":"头发稀疏的/乱蓬蓬的/脏乱的/不整洁的/过时的".split('/'),
"刘海":"/".split('/'),
"后发":"/".split('/'),
"脸型":"很突出的/令人不快的".split('/'),
"身材":"不好看的/平常的/略有些畸形的".split('/'),
"腿型":"普通的/平常的/略有些罗圈的/略不等长的".split('/'),
"气质":"挺丑的/难看的".split('/')
}
out_look_bad_dict_woman_weight ={
"发色":"4/2/1/0.5/2".split('/'),
"刘海":"4/2".split('/'),
"后发":"0.1/0.1".split('/'),
"脸型":"3/5".split('/'),
"身材":"2/3/4".split('/'),
"腿型":"3/4/5/6".split('/'),
"气质":"2/3".split('/')
}
def out_look_get(item,return_list):
#print(return_list)
if return_list[1] == '男':
if return_list[4] == '丑陋不堪' and return_list[0] !='神':
to_return = geting_str(random.choices(out_look_bad_dict_man[item],eval_str(out_look_bad_dict_man_weight[item]),k=1))
else:
to_return = geting_str(random.choices(out_look_good_dict_man[item], eval_str(out_look_good_dict_man_weight[item]), k=1))
to_return += geting_str(random.choices(out_look_dict_man[item],eval_str(out_look_dict_man_weight[item]),k=1))
else:
if return_list[4] == '丑陋不堪' and return_list[0] !='神':
to_return = geting_str(random.choices(out_look_bad_dict_woman[item], eval_str(out_look_bad_dict_woman_weight[item]), k=1))
else:
to_return = geting_str(random.choices(out_look_good_dict_woman[item], eval_str(out_look_good_dict_woman_weight[item]), k=1))
to_return += geting_str(random.choices(out_look_dict_woman[item], eval_str(out_look_dict_woman_weight[item]),k=1))
return to_return
|
masterfzb/AIreader
|
data_base.py
|
data_base.py
|
py
| 10,760 |
python
|
en
|
code
| 1 |
github-code
|
6
|
35257204680
|
import minerl
from minerl.data import BufferedBatchIter
import numpy as np
import random
from itertools import combinations
from actions import action_names
import cv2
import numpy as np
import torch
'''
The mineRL framework models actions as dictionaries of individual actions. Player recorded demonstration data has multiple
combinations of actions. The number of feasible combinations is too high and this would make it very hard for the agent
to generalize. Instead, we limit the agent to a smaller set of possible actions and their combinations. These basic actions
and their combinations are listed below. While training, we use frame skipping. Hence, one state is a combination of k frames
and their k actions. Action aggregation combines these k actions into one and action mapping maps this combined action to one
of the actions that the agent can perform.
'''
basic_actions = {'forward', 'back', 'left', 'right', 'attack', 'jump', 'look-left', 'look-right', 'look-up', 'look-down'}
action_combos = [{'forward', 'left'}, {'forward', 'right'}, {'forward', 'jump'}, {'forward', 'attack'}]
def get_aggregate_action(actions, cam_threshold=2.0):
'''
Function to aggregate actions from k transitions into one combined action
NOTE: Threshold is set to discount any micro-adjustments and only count camera movements for directional navigation
'''
# Removing spring and sneak from the actions dict
actions.pop('sneak')
actions.pop('sprint')
aggregate_action = actions
for key in aggregate_action.keys():
# Sum up the occurences of all actions other than the camera movement action
if not key=='camera':
aggregate_action[key] = np.sum(actions[key], axis=0)
else:
# For the camera action, instead of simply adding up movements, we compare the movement angle to a threshold
# The absolute maximum angle from one camera movement marks the direction of camera motion (l, r, u, d)
# We create a list with the camera movements from all k transitions called heading
heading = [0,0,0,0] # left, right, up, down
for i in list(actions[key]):
idx = np.argmax(np.abs(i))
if abs(i[idx]) > cam_threshold:
if idx == 0:
# Left OR Right
if i[idx] > 0:
# Left
heading[0] += 1
else:
# Right
heading[1] += 1
if idx == 1:
# Up OR Down
if i[idx] > 0:
# Up
heading[2] += 1
else:
# Down
heading[3] += 1
aggregate_action[key] = np.array(heading)
# Set camera movement to the direction that was chosen the most often. If multiple exist then choose one randomly
max_idx = [i for i, x in enumerate(heading) if x == max(heading)]
cam_dir = random.choice(max_idx) # 0,1,2,3 corresponds to l,r,u,d
# The 'camera' key now has the max number of direction occurences and the occured direction
aggregate_action['camera'] = [max(heading) ,cam_dir]
# Popping out any action that was not chosen
noop_list = []
for key, value in aggregate_action.items():
if not key=='camera':
if value == 0:
noop_list.append(key)
else:
if value[0] == 0:
noop_list.append(key)
for key in noop_list:
aggregate_action.pop(key)
# Mapping camera directions to the movement and dropping out the 'camera' key
cam_dirs = {0:'look-left', 1:'look-right', 2:'look-up', 3:'look-down'}
if 'camera' in aggregate_action:
cam = aggregate_action.pop('camera')
aggregate_action[cam_dirs[cam[1]]] = cam[0]
# print(aggregate_action)
return aggregate_action
def map_aggregate_action(aggregate_action):
'''
Function to map an aggregate action to one of the agent's available actions
'''
# If empty then select no-operation action
if len(aggregate_action.keys()) == 0:
action = 'noop'
# If there is only one action then pick that one
elif len(aggregate_action.keys()) == 1:
if list(aggregate_action.keys())[0] in basic_actions:
action = list(aggregate_action.keys())[0]
# If there are two actions then check if that pair is possible. Pick the pair if it is, else pick the most occuring one
elif len(aggregate_action.keys()) == 2:
if set(aggregate_action.keys()) in action_combos:
action = list(aggregate_action.keys())[0] + "_" + list(aggregate_action.keys())[1]
else:
max_idx = [i for i, x in enumerate(aggregate_action.values()) if x == max(aggregate_action.values())]
action = list(aggregate_action.keys())[random.choice(max_idx)]
# If there are more than 2 actions then check all pairs. Pick a pair with the max total occurence count
elif len(aggregate_action.keys()) > 2:
action_pairs = list(combinations(aggregate_action.keys(), 2))
max_occurences = 0
action = None
pair_match = False
for pair in action_pairs:
if set(pair) in action_combos:
pair_match = True
if aggregate_action[pair[0]] + aggregate_action[pair[1]] > max_occurences:
max_occurences = aggregate_action[pair[0]] + aggregate_action[pair[1]]
action = pair[0] + "_" + pair[1]
if not pair_match:
max_idx = [i for i, x in enumerate(aggregate_action.values()) if x == max(aggregate_action.values())]
action = list(aggregate_action.keys())[random.choice(max_idx)]
return action
def sample_demo_batch(demo_replay_memory, batch_size, grayscale=True):
'''
Returns batch_size number of transitions containing frame_stack in-game transitions. One transition here has
frame_stack number of in-game frames (because of frame-skipping and concatenation of observation images)
'''
# Setting up empty lists and zero arrays to store batch_size number of transitions
batch_states = []
batch_next_states = []
# if grayscale == True:
# batch_states = np.zeros((batch_size, 2, 64, 64))
# batch_next_states = np.zeros((batch_size, 2, 64, 64))
# else:
# batch_states = np.zeros((batch_size, 2, 64, 64, 3))
# batch_next_states = np.zeros((batch_size, 2, 64, 64, 3))
batch_actions = []
batch_rewards = []
batch_dones = []
# batch_actions = np.zeros((batch_size))
# batch_rewards = np.zeros((batch_size))
# batch_dones = np.zeros((batch_size))
count = 0
for current_states, actions, rewards, next_states, dones in demo_replay_memory:
if count == batch_size:
break
count +=1
# for i in range(batch_size):
# current_states, actions, rewards, next_states, dones = next(demo_replay_memory)
# Grayscale
if grayscale==True:
current_states_gray = np.zeros((current_states['pov'].shape[:-1]))
next_states_gray = np.zeros((next_states['pov'].shape[:-1]))
for j in range(current_states['pov'].shape[0]):
# current_states_gray = np.zeros((current_states['pov'].shape[:-1]))
# next_states_gray = np.zeros((next_states['pov'].shape[:-1]))
current_states_gray[j] = cv2.cvtColor(current_states['pov'][j], cv2.COLOR_BGR2GRAY)
next_states_gray[j] = cv2.cvtColor(next_states['pov'][j], cv2.COLOR_BGR2GRAY)
batch_states.append(current_states_gray)
batch_next_states.append(next_states_gray)
# batch_states[i] = current_states_gray
# batch_next_states[i] = next_states_gray
else:
batch_states.append(current_states['pov'])
batch_next_states.append(next_states['pov'])
# batch_states[i] = current_states['pov']
# batch_next_states[i] = next_states['pov']
batch_rewards.append(np.sum(rewards))
# batch_rewards[i] = np.sum(rewards)
aggregate_action = get_aggregate_action(actions)
agent_action = map_aggregate_action(aggregate_action)
action_idx = action_names[agent_action]
batch_actions.append(action_idx)
# batch_actions[i] = action_idx
if np.sum(dones) > 0:
batch_dones.append(1)
# batch_dones[i] = 1
else:
batch_dones.append(0)
# batch_dones[i] = 0
batch_states = torch.tensor(np.array(batch_states), dtype=torch.float32, requires_grad=True)
batch_next_states = torch.tensor(np.array(batch_next_states), dtype=torch.float32, requires_grad=True)
batch_actions = torch.tensor(np.array(batch_actions))
batch_rewards = torch.tensor(np.array(batch_rewards), dtype=torch.float32, requires_grad=True)
batch_dones = torch.tensor(np.array(batch_dones))
# batch_states = torch.tensor(batch_states, dtype=torch.float32, requires_grad=True)
# batch_next_states = torch.tensor(batch_next_states, dtype=torch.float32, requires_grad=True)
# batch_actions = torch.tensor(batch_actions)
# batch_rewards = torch.tensor(batch_rewards, dtype=torch.float32, requires_grad=True)
# batch_dones = torch.tensor(batch_dones)
return batch_states, batch_actions, batch_rewards, batch_next_states, batch_dones
|
anishhdiwan/DQfD_Minecraft
|
demo_sampling.py
|
demo_sampling.py
|
py
| 8,704 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71877607227
|
#pyautogui 라이브러리 추가
#pip install pyautogui
import pyautogui #듀얼모니터는 인식 안됨
#마우스 현재 좌표 출력
#pyautogui.position()
#해당 좌표로 마우스 이동
#pyautogui.moveTo(40, 154)
#이미지 추출 라이브러리 추가
#pip install opencv-python
#해당하는 이미지와 유사한 화면이 존재하는 위치로 이동(출력결과 : x축 값, y축 값 , 가로 길이, 세로 길이)
#pyautogui.locateOnScreen('')
#좌표, 저장될 이미지 길이(x축 값, y축 값, 가로 길이, 세로 길이)를 지정하면 해당 좌표를 스크린샷 후 특정 이름으로 저장
pyautogui.screenshot('1.png', region=(1584, 613, 30, 30))
#해당 경로에 존재하는 이미지와 유사한 화면 위치 정가운데로 이동(출력결과 : x축 값 y축 값)
num1 = pyautogui.locateCenterOnScreen('1.png')
num7 = pyautogui.locateCenterOnScreen('7.png')
#마우스 클릭 이벤트(값이 없으면 마우스 현재 위치 클릭)
pyautogui.click(num1)
pyautogui.click(num7)
|
BrokenMental/Python-Study
|
pyautogui.py
|
pyautogui.py
|
py
| 1,032 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
31678929018
|
# import and necessary libraries
import dask.distributed
import dask.utils
import numpy as np
import planetary_computer as pc
import xarray as xr
from IPython.display import display
from pystac_client import Client
import matplotlib.pyplot as plt
import folium
from odc.stac import configure_rio, stac_load
# Function to configure the data loading priocess
def configure_asset():
configuration = {
"sentinel-2-l2a": { # we specify the name of the data collection
"assets": { # call the asset dictionary under the data collection and load the sub-dictionaries
"*": {"data_type": "uint16", "nodata": 0},
"SCL": {"data_type": "uint8", "nodata": 0},
"visual": {"data_type": "uint8", "nodata": 0},
},
},
"*": {"warnings": "ignore"},# applies this to all assets within the data collection
}
return configuration
# Function to manage and coordinate distributed computation using dask
def client_info():
client = dask.distributed.Client() # create a dask disrtributed client which allows to manage and coordinate distributed computations.
configure_rio(cloud_defaults=True, client=client)
display(client) #display client
return client
# Function to pull image data collection
def get_data_collection(client, collection, date, tile_id):
data_catalog = client # client data source
query = data_catalog.search(
collections= [collection],# call the data collection, this time we want to call the sentinel 2 data collection
datetime= date, # cloudfree date
query={"s2:mgrs_tile": dict(eq= tile_id)}, # we select a specific tile from northern parts of Ghana, 'Janga'
)
# list the number of dataset, but this time we only need one
images = list(query.items())
# print the number of datasets found
print(f"Found;{len(images):d} datasets")
# we expect a single dataset since we selected a single day
return images
# Function to Lazy load entire bands in data collection
def load_dataset_with_resolution(images, configuration, resolution):
# specify the parameters
dataset = stac_load(
images, chunks={"x":2048, "y":2048},
stac_cfg=configuration, patch_url=pc.sign,
resolution=resolution,
)
# list the bands in the dataset
print(f"Bands: {','.join(list(dataset.data_vars))}")
#display the dataset
display(dataset)
return dataset
# Function to select specific bands
def select_bands(images, configuration, resolution):
dataset = stac_load(
images, bands=["red", "green", "blue", "nir", "SCL"],# select needed bands
chunks={"x":2048, "y":2048},
stac_cfg=configuration, patch_url=pc.sign,
resolution=resolution,
)
# List the selected bands
print(f"Bands: {','.join(list(dataset.data_vars))}")
# Display the dataset
display(dataset)
return dataset
# Function to convert data to float
def to_float(dataset):
dataset_float_1 = dataset.astype("float32")
nodata_1= dataset_float_1.attrs.pop("nodata", None)
if nodata_1 is None:
return dataset_float_1
return dataset_float_1.where(dataset != nodata_1)
|
Christobaltobbin/OpenDataCube
|
Scripts/odc_utils.py
|
odc_utils.py
|
py
| 3,287 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36818284421
|
import pytest
from database import Model, ModelAttribute
pytestmark = pytest.mark.asyncio
class A(Model):
a = ModelAttribute()
b = ModelAttribute()
c = ModelAttribute()
@pytest.mark.parametrize('count', (10, 15))
async def test_insert_find(db, count):
c_true_count = 0
for i in range(count):
is_three_mod = i % 3 == 0
await db.store(A(a=i, b=i*2, c=is_three_mod))
c_true_count += is_three_mod
assert (await db.find_one(A, b=2)).a == 1
async for item in db.find(A):
assert item.a * 2 == item.b
processed = 0
limit = count // 6
async for item in db.choose(A, {'c': True}, {'c': False}, limit_=limit):
assert item.c is False
assert item.a % 3 == 0
processed += 1
assert processed == min(limit, count)
assert await db.count(A) == count
assert await db.count(A, {'c': True}) == c_true_count - processed
assert await db.count(A, {'c': False}) == count - (c_true_count - processed)
|
AzaubaevViktor/vk_grabber
|
src/database/tests/test_no_uid.py
|
test_no_uid.py
|
py
| 1,000 |
python
|
en
|
code
| 1 |
github-code
|
6
|
4424193456
|
import sys
from utils.MyStats import *
if __name__=="__main__":
if len(sys.argv) != 2:
print("Error: number arguments")
exit()
classes_ = ["Ravenclaw", "Slytherin", "Gryffindor", "Hufflepuff"]
path = sys.argv[1]
classes_label = "Hogwarts House"
drop_columns = ["First Name", "Last Name", "Birthday", "Best Hand"] #Drop all non-numerical columns
scatter_plot(path, classes_, classes_label, drop_columns)
#What are the two features that are similar ?
#Care of magic creatures and arithmancy
#To reduce the computational cost and complexity of the algorithm we can elimiate one of similar features
|
artainmo/dslr
|
scatter_plot.py
|
scatter_plot.py
|
py
| 637 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35540502029
|
a, b = map(int, input().split())
if a > b:
temp: int = a
a, b = b, temp
arr = list([i for i in range(a+1, b)])
print(len(arr))
for i in arr:
print(i, end=" ")
|
BlueScreenMaker/Yeummy_Algorithm
|
BOJ/BOJ 10093 숫자.py
|
BOJ 10093 숫자.py
|
py
| 173 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21018004406
|
#!/usr/bin/env python
# -*- coding: utf -8 -*-
""" Collection of functions for TLE propagation and estimation
===========================================================
"""
__author__ = "Mirco Calura"
__maintainer__ = "Matteo Aquilano"
__copyright__ = "Copyright 2020 by SES ENGINEERING S.A. All Rights Reserved"
__version__ = "00.00"
# Last Modified:
#
# 10-Nov-2020 MA 00.00 Adjusted for AIS 2020
import math
import numpy as _np
_GM_earth = 398600.436e9 # Earth gravitational constant [m^3/s^2]
RE = 6378.137e3 # Earth radius [m]
def eccentric_to_mean(d_eta, d_e):
d_mean = d_eta - d_e * math.sin(d_eta)
return d_mean
def eccentric_to_true(d_eta, d_e):
d_true = 2.*math.atan(math.sqrt((1. + d_e)/(1. - d_e))*math.tan(d_eta/2.))
return d_true
def mean_to_eccentric(d_mean, d_e):
d_toll = 1.e-10
d_Eta_0 = d_mean
Delta_Eta = (d_mean - d_Eta_0 + d_e*math.sin(d_Eta_0)) / \
(1. - d_e*math.cos(d_Eta_0))
Eta = d_Eta_0 + Delta_Eta
while(math.fabs(Delta_Eta) >= d_toll):
d_Eta_0 = Eta
Delta_Eta = (d_mean - d_Eta_0 + d_e*math.sin(d_Eta_0)) / \
(1. - d_e*math.cos(d_Eta_0))
Eta = d_Eta_0 + Delta_Eta
Eta = d_Eta_0 + Delta_Eta
return Eta
def true_to_eccentric(d_true, d_e):
Eta = 2.*math.atan(math.sqrt((1. - d_e)/(1. + d_e))*math.tan(d_true/2.))
return Eta
def true_to_mean(d_true, d_e):
Eta = true_to_eccentric(d_true, d_e)
return eccentric_to_mean(Eta, d_e)
def mean_to_true(d_mean, d_e):
Eta = mean_to_eccentric(d_mean, d_e)
return eccentric_to_true(Eta, d_e)
def cartesian_to_elements(S):
E = [0.]*7
C1 = S[1]*S[5]-S[2]*S[4]
C2 = S[2]*S[3]-S[0]*S[5]
C3 = S[0]*S[4]-S[1]*S[3]
CC12 = C1*C1+C2*C2
CC = CC12 + C3*C3
C = math.sqrt(CC)
V02 = math.pow(S[3], 2.)+math.pow(S[4], 2.)+math.pow(S[5], 2.)
R0V0 = S[0]*S[3]+S[1]*S[4]+S[2]*S[5]
R02 = math.pow(S[0], 2.)+math.pow(S[1], 2.)+math.pow(S[2], 2.)
R0 = math.sqrt(R02)
X = R0*V02/_GM_earth
CX = CC/_GM_earth
STE = R0V0*C/(R0*_GM_earth)
CTE = CX/R0-1.
E[0] = R0/(2.-X)
E[1] = math.sqrt(STE*STE+CTE*CTE)
E[2] = math.atan2(math.sqrt(CC12), C3)
if(CC12 > CC*(1.e-20)):
U = math.atan2(C*S[2], S[1]*C1-S[0]*C2)
E[3] = math.atan2(C1, -C2)
if(E[1] > 1.e-20):
E[5] = math.atan2(STE, CTE)
E[4] = U - E[5]
else:
E[5] = U
E[4] = 0.
else:
d_sign = 1.
if(C3 < 0.):
d_sign = -1.
U = math.atan2(S[1], S[0])*d_sign
E[3] = 0.
if(E[1] > 1.e-20):
E[5] = math.atan2(STE, CTE)
E[4] = U - E[5]
else:
E[5] = U
E[4] = 0.
if(E[3] < 0.):
E[3] += (2.*math.pi)
if(E[4] < 0.):
E[4] += (2.*math.pi)
if(E[5] < 0.):
E[5] += (2.*math.pi)
E[6] = S[6]
E[0] = math.sqrt(_GM_earth/math.pow(E[0], 3.))*86400./(2.*math.pi)
E[5] = true_to_mean(E[5], E[1])
return E
def elements_to_cartesian(E):
E[0] = math.pow(_GM_earth/math.pow(2.*math.pi/86400.*E[0], 2.), 1./3.)
E5_true = mean_to_true(E[5], E[1])
p = E[0]*(1.-math.pow(E[1], 2.))
if(p < (1.e-30)):
p = 1.e-30
F = math.sqrt(_GM_earth)/math.sqrt(p)
CV = math.cos(E5_true)
ECV = 1. + E[1]*CV
R = p/ECV
U = E[4] + E5_true
CU = math.cos(U)
SU = math.sin(U)
CO = math.cos(E[3])
SO = math.sin(E[3])
CI = math.cos(E[2])
SI = math.sin(E[2])
COCU = CO*CU
SOSU = SO*SU
SOCU = SO*CU
COSU = CO*SU
FX = COCU - SOSU*CI
FY = SOCU + COSU*CI
FZ = SU*SI
VR = F*E[1]*math.sin(E5_true)
VU = F*ECV
S = [0.]*7
S[0] = R*FX
S[1] = R*FY
S[2] = R*FZ
S[3] = VR*FX - VU*(COSU + SOCU*CI)
S[4] = VR*FY - VU*(SOSU - COCU*CI)
S[5] = VR*FZ + VU*CU*SI
S[6] = E[6]
return S
# Retrieve a raw estimation of orbit summary parameters
def get_orbit_summary(x):
elem = cartesian_to_elements(_np.append(x, [0.]))
d_a = math.pow(_GM_earth/math.pow(2.*math.pi/86400.*elem[0], 2.), 1./3.)
d_e = elem[1]
d_T = 2.*math.pi/math.sqrt(_GM_earth/math.pow(d_a, 3.))
# Raw estimate of the altitude of perigee (km)
d_per_alt = d_a*(1. - d_e) - RE
return d_per_alt, d_T
def get_line_code(s_line):
if(len(s_line) < 69):
return None
i_code = int(s_line[2:7])
return i_code
|
maxmartinezruts/satellite-collision-avoidance-GUI
|
DataProcess/sgp4/tle_lib.py
|
tle_lib.py
|
py
| 4,482 |
python
|
en
|
code
| 3 |
github-code
|
6
|
43372057736
|
from Node import Node
#if greater we go right, if lower we go left
class BST:
def __init__(self, head_node_value):
self.head_node = Node(head_node_value) #this is an instance of the Node
self.sorted_node_values = []
def add_node(self, value, node=None):
new_node = Node(value)
if not node: #if there is not node, we want to access the head node
node = self.head_node
if value > node.value:
#What do we want to do if there is nothing to the right
if not node.right:
node.right = new_node
else:
self.add_node(value, node.right)
elif value < node.value:
if not node.left:
node.left = new_node
else:
self.add_node(value, node.left)
def get_min(self, node=None):
if not node:
node = self.head_node
if node.left:
return self.get_min(node.left)
else:
return node
def get_max(self, node=None):
if not node:
node = self.head_node
if node.right:
return self.get_max(node.right)
else:
return node
def search_node(self, target_value, node=None):
if not node:
node = self.head_node
if target_value == node.value:
return True
elif target_value > node.value:
#return self.search_node(target_value, node.right) if node.right else False
if node.right:
return self.search_node(target_value, node.right)
else:
return False
else:
#return self.search_node(target_value, node.left) if node.left else False
if node.left:
return self.search_node(target_value, node.left)
else:
return False
def print_in_order(self, node=None):
if not node:
node = self.head_node
if node.left:
self.print_in_order(node.left)
print(node.value)
if node.right:
self.print_in_order(node.right)
def store_sorted_values(self, node=None):
if not node:
node = self.head_node
if node.left:
self.store_sorted_values(node.left)
self.sorted_node_values.append(node.value)
if node.right:
self.store_sorted_values(node.right)
if __name__ == '__main__':
bst = BST(100)
print(bst)
bst.add_node(105)
bst.add_node(130)
bst.add_node(115)
bst.add_node(75)
bst.add_node(50)
bst.add_node(60)
print(bst.head_node.right.right.left)
print(bst.head_node.left, 75)
print(bst.head_node.left.left, 50)
print(bst.head_node.left.left.right, 60)
bst.add_node(40)
print(bst.get_min())
print(bst.get_max())
print(bst.search_node(75), True)
print(bst.search_node(50), True)
print(bst.search_node(115), True)
print(bst.search_node(17), False)
print(bst.search_node(23), False)
print(bst.print_in_order())
|
simachami/Week4-Day3
|
BST.py
|
BST.py
|
py
| 3,125 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1957129894
|
# import the modules to access their methods
import random
import time
import emoji
# Defined this user defined function
def ran():
inp = int(input("Guess the number Between 1 and 10\n"))
Guess = None
Guess = random.randrange(1,10)
if inp > 0 and inp < 11:
if inp == Guess:
# grinning face
print("Are You Guessed The Correct Number ")
time.sleep(3)
print('Hurrah! You Guessed correct one \U0001f600')
elif inp > Guess:
print("Are You Guessed The Correct Number ")
time.sleep(3)
print("Alas! You Gone Wrong,Please Try Another Time ")
print("You have guessed too high! \U0001F61C")
elif inp < Guess:
print("Are You Guessed The Correct Number ")
time.sleep(3)
print("Alas! You Gone Wrong,Please Try Another Time ")
print("You have guessed too low! \U0001F61C")
else:
raise IOError("Please Enter the no between 1 and 10")
return
def main():
# grinning face
print("\U0001f600")
print("Welcome \U0001f600 to Guess No Game ")
ran()
if __name__ == '__main__':
main()
|
RajeshKumar-1998/Guess-No-Game
|
Guess Num Game.py
|
Guess Num Game.py
|
py
| 1,258 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44427355496
|
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (assert_equal, assert_raises_rpc_error)
def read_dump(file_name, addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
# only read non comment lines
if line[0] != "#" and len(line) > 10:
# split out some data
key_label, comment = line.split("#")
# key = key_label.split(" ")[0]
keytype = key_label.split(" ")[2]
if len(comment) > 1:
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdmaster=1":
# ensure the old master is still available
assert(hd_master_addr_old == addr)
elif keytype == "hdmaster=1":
# ensure we have generated a new hd master key
assert(hd_master_addr_old != addr)
hd_master_addr_ret = addr
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=":
found_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-keypool=90"]]
def setup_network(self, split=False):
# Use 1 minute timeout because the initial getnewaddress RPC can take
# longer than the default 30 seconds due to an expensive
# CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in
# the test often takes even longer.
self.add_nodes(self.num_nodes, self.extra_args, timewait=60)
self.start_nodes()
def run_test(self):
unencrypted_dump = os.path.join(self.nodes[0].datadir, "wallet.unencrypted.dump")
encrypted_dump = os.path.join(self.nodes[0].datadir, "wallet.encrypted.dump")
# generate 20 addresses to compare against the dump
test_addr_count = 20
addrs = []
for i in range(0, test_addr_count):
addr = self.nodes[0].getnewaddress()
vaddr = self.nodes[0].validateaddress(
addr) # required to get hd keypath
addrs.append(vaddr)
# Should be a no-op:
self.nodes[0].keypoolrefill()
# dump unencrypted wallet
result = self.nodes[0].dumpwallet(
unencrypted_dump)
assert_equal(result['filename'], os.path.abspath(
unencrypted_dump))
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(unencrypted_dump, addrs, None)
# all keys must be in the dump
assert_equal(found_addr, test_addr_count)
# 50 blocks where mined
assert_equal(found_addr_chg, 50)
# 90 keys plus 100% internal keys
assert_equal(found_addr_rsv, 90 * 2)
# encrypt wallet, restart, unlock and dump
self.nodes[0].node_encrypt_wallet('test')
self.start_node(0)
self.nodes[0].walletpassphrase('test', 10)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(encrypted_dump)
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
read_dump(encrypted_dump,
addrs, hd_master_addr_unenc)
assert_equal(found_addr, test_addr_count)
# old reserve keys are marked as change now
assert_equal(found_addr_chg, 90 * 2 + 50)
assert_equal(found_addr_rsv, 90 * 2)
# Overwriting should fail
assert_raises_rpc_error(-8, "already exists",
self.nodes[0].dumpwallet, unencrypted_dump)
if __name__ == '__main__':
WalletDumpTest().main()
|
bitcoin-sv/bitcoin-sv
|
test/functional/wallet-dump.py
|
wallet-dump.py
|
py
| 4,774 |
python
|
en
|
code
| 597 |
github-code
|
6
|
6144518391
|
def number_stops(m,n,stops):
current = 0
nb =0
last = -1
while(current<=n):
last = current
while(current<=n) and (stops[current+1]-stops[last]<=m):
current += 1
if current == last:
return -1
elif(current<=n):
nb +=1
return nb
if __name__ =='__main__':
d = int(input())
m = int(input())
n = int(input())
stops = list(map(int,input().split()))
stops.insert(0,0)
stops.append(d)
print(number_stops(m,n,stops))
|
OualhaSlim/Algorithmic-Toolbox
|
greedy_algorithms/3_car_fueling.py
|
3_car_fueling.py
|
py
| 525 |
python
|
en
|
code
| 0 |
github-code
|
6
|
156567587
|
#-*- coding: utf-8 -*-
import numpy as np
from sklearn.cluster import AgglomerativeClustering as sk_AgglomerativeClustering
from sklearn.externals.joblib import Memory
from .clustering import Clustering
class AgglomerativeClustering(Clustering):
"""docstring for AgglomerativeClustering."""
def __init__(self, data, n_clusters = 2, affinity = 'euclidean',
memory = Memory(cachedir = None), connectivity = None,
compute_full_tree = 'auto', linkage = 'ward',
pooling_func = np.mean):
super(AgglomerativeClustering, self).__init__()
self.data = data
self.n_clusters = n_clusters
self.affinity = affinity
self.memory = memory
self.connectivity = connectivity
self.compute_full_tree = compute_full_tree
self.linkage = linkage
self.pooling_func = pooling_func
def execute(self):
"""Constroi o modelo de clusterizacao."""
self.model = sk_AgglomerativeClustering(n_clusters = self.n_clusters,
affinity = self.affinity,
memory = self.memory,
connectivity = self.connectivity,
compute_full_tree = self.compute_full_tree,
linkage = self.linkage,
pooling_func = self.pooling_func).fit(self.data)
self.clusters = super().make_clusters(self.data, self.model.labels_)
@property
def labels_(self):
"""Retorna os labels dos elementos do dataset."""
return self.model.labels_
@property
def clusters_(self):
"""Retorna um dicionaro onde os indices dos grupos sao as chaves."""
return self.clusters
@property
def model_(self):
"""Retorna o modelo de agrupamento."""
return self.model
|
netoaraujjo/hal
|
clustering/agglomerative_clustering.py
|
agglomerative_clustering.py
|
py
| 1,946 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42284509571
|
class Stack:
stack = ""
def __init___(self):
pass
def push(self, element):
self.stack += element
def pop(self):
element = None
if not self.is_empty():
element = self.stack[-1]
self.stack = self.stack[:-1]
return element
def __str__(self):
return self.stack
def is_empty(self):
if len(self.stack) <= 0:
return True
return False
stack_obj = Stack()
# print(stack_obj)
# stack_obj.push('a')
# stack_obj.push('b')
# stack_obj.pop()
# print(stack_obj)
# stack_obj.pop()
# stack_obj.pop()
# print(stack_obj)
closer_dct = {
"{": "}",
"(": ")",
"[": "]",
}
def check_if_popped_match(starting_b, ending_b):
''' check if the passed popped bracket matches its
corresponding starting bracket'''
if closer_dct[starting_b] == ending_b:
return True
return True
in_string = ""
"""
cases:
}(()){}
(()
))))))
"""
u = False
for element in in_string:
if element in ['{', '(', '[']:
stack_obj.push(element)
if element in ['}', ')', ']']:
if stack_obj.is_empty():
print('unbalanced')
u = True
break
popped_element = stack_obj.pop()
if not check_if_popped_match(popped_element, element):
print('unbalanced')
u = True
if not stack_obj.is_empty():
print('unbalanced')
u = True
if not u:
print('balanced')
"""
next_closer_str = ""
u = False
for i in range(len(in_string)):
print('--------\nitem:', in_string[i])
if i == 0 and in_string[i] not in closer_dct:
print('unbalanced')
u = True
break
if in_string[i] in closer_dct:
next_closer = closer_dct[in_string[i]]
next_closer_str += next_closer
# print('next_closer_str(opener): ', next_closer_str)
else:
# print('next_closer: ', next_closer)
# print('next_closer_str: ', next_closer_str)
if in_string[i] != next_closer:
print('unbalanced')
u = True
break
else:
if len(next_closer_str) == 0:
print('unbalanced')
u = True
break
next_closer_str = next_closer_str[:-1] if len(next_closer_str) > 1 else next_closer_str
print('next_closer_str: ', next_closer_str)
next_closer = next_closer_str[-1]
next_closer_str = "" if len(next_closer_str) == 1 else next_closer_str
# print('next_closer: ', next_closer)
if not u:
print('balanced')
"""
|
rijumone/compete_code
|
g4g/parantheses_checker.py
|
parantheses_checker.py
|
py
| 2,221 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29552040169
|
import numpy as np
import unittest
def canonize_labels(x, support=None):
'''
1. construct a dict
- if not support, construct using np.unique
- if support, check validation, then iterate to get the dict
2. use map to transform the data
'''
if type(x) == list:
x = np.array(x)
if support == None:
u_vals = np.unique(x)
dic = dict((u_vals[i], i) for i in range(len(u_vals)))
else:
if support[0] <= np.min(x) and support[1] >= np.max(x):
dic = dict((i, i-support[0]) for i in range(support[0], support[1]+1))
else:
return None
return np.array([dic[i] for i in x])
class Test(unittest.TestCase):
def test_canonize_labels(self):
y = ['apple', 'orange', 'apple']
t = [0, 1, 0]
self.assertListEqual(canonize_labels(y).tolist(), t)
y = [20, 30, 27]
t = [0, 10, 7]
self.assertListEqual(canonize_labels(y, [20, 30]).tolist(), t)
|
rafaelxiao/infinity
|
tools/process_canonize_labels.py
|
process_canonize_labels.py
|
py
| 985 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12722977840
|
#!/usr/bin/python3
import numpy as np
import random
"""
w/ decision tree we want to choose features based on greatest information gain (smallest entropy)
p_dot = P/(P+N)
Entropy: Imp(p_dot) = -p_dot*log_2(p_dot) - (1-p_dot)log_2(1-p_dot)
Combine entropy of branches with weights for total entropy after split
Lowest Entropy & lower than before split = Greatest information gain
We want the feature with greatest information gain
"""
class decisionTree(object):
def __init__(self):
self.feature = None
self.entropy = None
self.ent1 = None
self.ent2 = None
self.left = None
self.right = None
self.parent = None
self.range = None
self.isLeaf = False
self.leafVal = None
def get_range(self):
return self.range
def setParent(self, feature, entropy, left, right, parent):
self.parent = decisionTree()
self.parent.feature = feature
self.parent.entropy = entropy
self.parent.left = left
self.parent.right = right
self.parent.parent = None
def setChildren(self, left, right):
self.left = left
self.right = right
def getIsLeaf(self):
return self.isLeaf
def getLeafVal(self):
return self.leafVal
def get_parents(tree):
parents = []
get_parent_helper(tree, parents)
return parents
def get_parent_helper(tree, parents):
if tree.parent is not None:
parents.append(tree.parent)
get_parent_helper(tree.parent, parents)
else:
return parents
def getEntropyBin(c, label):
onePos = oneNeg = zeroPos = zeroNeg = 0
size = len(label)
for i in range(c.size):
if(c[i] == 1):
if(label[i] == 1):
onePos += 1
else:
oneNeg += 1
else: # c[i]=0
if(label[i] == 1):
zeroPos += 1
else:
zeroNeg += 1
if (onePos+oneNeg) != 0:
onepdot = onePos / float(onePos + oneNeg)
else:
onepdot = 0
if (zeroPos+zeroNeg) != 0:
zeropdot = zeroPos / float(zeroPos + zeroNeg)
else:
zeropdot = 0
if onepdot == 0 or onepdot == 1:
oneEnt = 0
else:
oneEnt = -onepdot*np.log2(onepdot) - (1-onepdot)*np.log2(1-onepdot)
if zeropdot == 0 or zeropdot == 1:
zeroEnt = 0
else:
zeroEnt = -zeropdot*np.log2(zeropdot) - \
(1-zeropdot)*np.log2(1-zeropdot)
Entropy = oneEnt*(onePos+oneNeg)/size + zeroEnt*(zeroPos+zeroNeg)/size
return Entropy, 0.5, zeroEnt, oneEnt, zeroPos, zeroNeg, onePos, oneNeg
def getEntropyInt(c, label, idx):
d = np.sort(c, axis=0, kind="quicksort")
split = []
for splitVal in range(len(label)-1):
split.append((d[splitVal]+d[splitVal+1])/2)
size = len(label)
leaf1PosFinal = leaf1NegFinal = leaf2PosFinal = leaf2NegFinal = 0
smallestEntropy = ent1Final = ent2Final = 1
splitRange = None
for splitVal in split: # iterating through trying to find best split val
leaf1Pos = leaf1Neg = leaf2Pos = leaf2Neg = 0
# for each split value calculating all the positive and negative labeled values for leaf1 and leaf2
for i in range(len(label)):
# int(c[i]) #if i replace with c again so entropy of floats too high and overall score without using float features, score higher
if c[i] <= splitVal:
if(label[i] == 1): # label says true
leaf1Pos += 1
else:
leaf1Neg += 1
else: # for leaf 2 when greater than the split value
if label[i] == 1: # label says true
leaf2Pos += 1
else:
leaf2Neg += 1
if (leaf1Pos+leaf1Neg) != 0:
pdot1 = leaf1Pos/float(leaf1Pos+leaf1Neg)
else:
pdot1 = 0
if (leaf2Pos+leaf2Neg) != 0:
pdot2 = leaf2Pos/float(leaf2Pos+leaf2Neg)
else:
pdot2 = 0
if(pdot1 == 0 or pdot1 == 1):
ent1 = 0
else:
ent1 = -pdot1*np.log2(pdot1) - (1-pdot1)*np.log2(1-pdot1)
if pdot2 == 0 or pdot2 == 1:
ent2 = 0
else:
ent2 = -pdot2*np.log2(pdot2) - (1-pdot2)*np.log2(1-pdot2)
Entropy = (ent1 * ((leaf1Pos+leaf1Neg)/size)) + \
(ent2*((leaf2Pos+leaf2Neg)/size))
if Entropy < smallestEntropy:
smallestEntropy = Entropy
ent1Final,ent2Final = ent1,ent2
splitRange = splitVal
leaf1PosFinal,leaf1NegFinal,leaf2PosFinal,leaf2NegFinal = leaf1Pos,leaf1Neg,leaf2Pos,leaf2Neg
return smallestEntropy, splitRange, ent1Final, ent2Final, leaf1PosFinal, leaf1NegFinal, leaf2PosFinal, leaf2NegFinal
def transformMatrix(c, idx):
if(idx == 8):
c = c*2
if(idx == 9):
c = c*5
if(idx == 10):
c = c*3
if(idx == 11 or idx == 12):
c = c*13
if(idx == 13):
c = c*13
if(idx == 14):
c = c*70
if(idx == 15):
c = c*130
if(idx == 16):
c = c*3000
if(idx == 17):
c = c*3000
return c
def getEntropyFloat(c, label, idx):
'''
sorts c in increasing order and splits inbetween each for the split values to test
use gain ratio to remove bias for information gain to attributes with a large number of values
'''
d = np.sort(c, axis=0, kind="quicksort")
split = []
for splitVal in range(len(label)-1):
split.append((d[splitVal]+d[splitVal+1])/2)
size = len(label)
smallestEntropy = 1
ent1Final = ent2Final = splitRange = None
leaf1PosFinal = leaf1NegFinal = leaf2PosFinal = leaf2NegFinal = 0
for splitVal in split: # iterating through trying to find best split val
leaf1Pos = leaf1Neg = leaf2Pos = leaf2Neg = 0
# for each split value calculating all the positive and negative labeled values for leaf1 and leaf2
for i in range(len(label)):
if c[i] <= splitVal:
if label[i] == 1: # label says true
leaf1Pos += 1
else:
leaf1Neg += 1
else: # for leaf 2 when greater than the split value
if label[i] == 1: # label says true
leaf2Pos += 1
else:
leaf2Neg += 1
if (leaf1Pos+leaf1Neg) != 0:
pdot1 = leaf1Pos/float(leaf1Pos+leaf1Neg)
else:
pdot1 = 0
if (leaf2Pos+leaf2Neg) != 0:
pdot2 = leaf2Pos/float(leaf2Pos+leaf2Neg)
else:
pdot2 = 0
if pdot1 == 0 or pdot1 == 1:
ent1 = 0
else:
ent1 = -pdot1*np.log2(pdot1) - (1-pdot1)*np.log2(1-pdot1)
if pdot2 == 0 or pdot2 == 1:
ent2 = 0
else:
ent2 = -pdot2*np.log2(pdot2) - (1-pdot2)*np.log2(1-pdot2)
Entropy = (ent1 * ((leaf1Pos+leaf1Neg)/size)) + \
(ent2*((leaf2Pos+leaf2Neg)/size))
if Entropy < smallestEntropy:
smallestEntropy = Entropy
ent1Final,ent2Final = ent1,ent2
splitRange = splitVal
leaf1PosFinal,leaf1NegFinal,leaf2PosFinal,leaf2NegFinal = leaf1Pos,leaf1Neg,leaf2Pos,leaf2Neg
return smallestEntropy, splitRange, ent1Final, ent2Final, leaf1PosFinal, leaf1NegFinal, leaf2PosFinal, leaf2NegFinal
def getNewData(train_data, train_labels, bestFeature, rnge, section):
'''
to get smaller matrix
section is 0 or 1
if 0 means this is left tree so we are comparing <= range and removing greater than range
if section 1 means we are comparing greater range and removing <= range
'''
c = train_data.T[bestFeature] # column
# td = transformMatrix(c,bestFeature) #column of bestfeature all transformed just incase it is float
removeRows = []
if(section == 0):
for i in range(train_labels.size):
if(c[i] > rnge):
# because will remove these indexes to only get remaining ones on left
removeRows.append(i)
else: # section1
# so will go through every row of the training data
for i in range(train_labels.size):
if c[i] <= rnge:
removeRows.append(i)
train_data_new = np.delete(train_data, removeRows, 0)
train_labels_new = np.delete(train_labels, removeRows, 0)
return train_data_new, train_labels_new
def build(tree, train_data, train_labels, flg, forestfeatures):
'''
flg
-0 for being built off left attribute
-1 for being built off right attribute
'''
forestfeatures=set(forestfeatures)
'''
parents = get_parents(tree)
parentsList = []
for parent in parents:
parentsList.append(parent.feature)
'''
if flg == 2:
Ent1BeforeSplit = Ent2BeforeSplit = 1
else:
Ent1BeforeSplit = tree.parent.ent1
Ent2BeforeSplit = tree.parent.ent2
minEntropy = 1
bestFeature = -1
leaf1PosFinal = leaf1NegFinal = leaf2PosFinal = leaf2NegFinal = thernge = 0
earlyStop = 20 # 4
ent1Final = ent2Final = 1
for i in range(train_data[0].size): # length of a row
# if(i not in parentsList): #save time because woudn't need feature already used by parent (maybe ignore this and allow more splits but a lot worse runtime
c = train_data.T[i] # a column
if (i <= 1 or i == 18) and i in forestfeatures:
entro, rnge, leaf1Entropy, leaf2Entropy, leaf1Pos, leaf1Neg, leaf2Pos, leaf2Neg = getEntropyBin(
c, train_labels)
elif (i >= 2 and i <= 7) and i in forestfeatures:
entro, rnge, leaf1Entropy, leaf2Entropy, leaf1Pos, leaf1Neg, leaf2Pos, leaf2Neg = getEntropyInt(
c, train_labels, i)
elif i in forestfeatures:
entro, rnge, leaf1Entropy, leaf2Entropy, leaf1Pos, leaf1Neg, leaf2Pos, leaf2Neg = getEntropyFloat(
c, train_labels, i)
else:
# not in forestfeature list so don't use (random forests implementation)
continue
if entro < minEntropy:
minEntropy = entro
thernge = rnge
bestFeature = i
leaf1PosFinal,leaf1NegFinal,leaf2PosFinal,leaf2NegFinal = leaf1Pos,leaf1Neg,leaf2Pos,leaf2Neg
ent1Final,ent2Final = leaf1Entropy,leaf2Entropy
# left branch so compare with left entropy before split
if flg == 0 and minEntropy > Ent1BeforeSplit:
tree.isLeaf = True
if(leaf1PosFinal+leaf2PosFinal > leaf1NegFinal + leaf2NegFinal):
tree.leafVal = 1
else:
tree.leafVal = 0
return
elif flg == 1 and minEntropy > Ent2BeforeSplit:
tree.isLeaf = True
if leaf1PosFinal+leaf2PosFinal > leaf1NegFinal + leaf2NegFinal:
tree.leafVal = 1
else:
tree.leafVal = 0
return
else:
tree.feature = bestFeature
tree.entropy = minEntropy
tree.ent1,tree.ent2 = ent1Final,ent2Final
tree.range = thernge
if leaf1PosFinal > leaf1NegFinal:
leaf1Prob = 1
else:
leaf1Prob = 0
if leaf2PosFinal > leaf2NegFinal:
leaf2Prob = 1
else:
leaf2Prob = 0
if minEntropy == 0: # both will be leaves
#print("both leaf1 and leaf2 leaves entrop 0 early stop")
tree.left = decisionTree()
tree.left.parent = tree
tree.left.isLeaf = True
tree.left.leafVal = leaf1Prob
tree.right = decisionTree()
tree.right.parent = tree
tree.right.isLeaf = True
tree.right.leafVal = leaf2Prob
else:
if leaf1PosFinal+leaf1NegFinal < earlyStop or ent1Final == 0:
if leaf2PosFinal+leaf2NegFinal < earlyStop or ent2Final == 0: # both leaves
#print("leaf1&2 early stop")
leafLeft = decisionTree()
leafLeft.isLeaf = True
leafLeft.leafVal = leaf1Prob
leafRight = decisionTree()
leafRight.isLeaf = True
leafRight.leafVal = leaf2Prob
tree.left = leafLeft
tree.right = leafRight
leafLeft.parent = tree
leafRight.parent = tree
else: # only left side leaf
#print("only leaf1 early stop")
leafLeft = decisionTree()
leafLeft.isLeaf = True
leafLeft.leafVal = leaf1Prob
tree.left = leafLeft
leafLeft.parent = tree
tree.right = decisionTree()
tree.right.parent = tree
trainData, trainLabels = getNewData(train_data, train_labels, bestFeature, tree.range, 1) # updates Matrix
build(tree.right, trainData, trainLabels, 1, forestfeatures)
else: # first part not leaf
if leaf2PosFinal+leaf2NegFinal < earlyStop or ent2Final == 0: # only right side leaf
#print("only leaf2 early stop")
leafRight = decisionTree()
leafRight.isLeaf = True
leafRight.leafVal = leaf2Prob
tree.right = leafRight
tree.left = decisionTree()
tree.left.parent = tree
trainData, trainLabels = getNewData(
train_data, train_labels, bestFeature, tree.range, 0) # updates Matrix
build(tree.left, trainData, trainLabels, 0, forestfeatures)
else: # both aren't leaves
#print("no early stop for either leaves")
tree.left = decisionTree()
tree.left.parent = tree
tree.right = decisionTree()
tree.right.parent = tree
trainDataOne, trainLabelsOne = getNewData(
train_data, train_labels, bestFeature, tree.range, 0) # updates Matrix
trainDataTwo, trainLabelsTwo = getNewData(
train_data, train_labels, bestFeature, tree.range, 1)
build(tree.left, trainDataOne,
trainLabelsOne, 0, forestfeatures)
build(tree.right, trainDataTwo,
trainLabelsTwo, 1, forestfeatures)
def solve(tree, test_data_row): # test_data row
if tree.getIsLeaf() == False:
transformed = test_data_row # temp seeing if don't need to transform
#transformed = transformMatrix(test_data_row,tree.feature)
if transformed[tree.feature] <= tree.get_range():
#print("transformed[tree.feature]",transformed[tree.feature],"original val",test_data_row[tree.feature],"divideval",tree.get_range())
return solve(tree.left, test_data_row)
else:
#print("when feature > range transformed[tree.feature]",transformed[tree.feature],"original val",test_data_row[tree.feature],tree.get_range())
return solve(tree.right, test_data_row)
else: # it is leaf so return val
return tree.leafVal
def print_tree(tree):
if tree.isLeaf:
print(tree.leafVal, "->leaf.")
else:
print(tree.feature, "->tree feature.")
print_tree(tree.left)
print_tree(tree.right)
def compareRandomForests(treeArr, trainData, trainLabels):
'''
get accuracy of each random forest and return tree with best accuracy
'''
accuracyFinal = 0
treeFinal = None
for tree in treeArr:
prediction = []
for row in trainData:
prediction.append(solve(tree, row))
accuracy = len([i for i in range(
len(prediction)) if prediction[i] == trainLabels[i]]) / float(len(prediction))
if accuracy > accuracyFinal:
accuracyFinal = accuracy
treeFinal = tree
#print("highest accuracy",accuracyFinal, "with",treeFinal)
return treeFinal
def run_train_test(training_data, training_labels, testing_data):
"""
Implement the training and testing procedure here. You are permitted
to use additional functions but DO NOT change this function definition.
Inputs:
training_data: List[List[float]]
training_label: List[int]
testing_data: List[List[float]]
Output:
testing_prediction: List[int]
Example:
return [1]*len(testing_data)
implement the decision tree and return the prediction
"""
trainData = np.array(training_data)
trainLabels = np.array(training_labels)
testData = np.array(testing_data)
#root = decisionTree()
forestFeatures = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
# build(root, trainData, trainLabels,2, forestFeatures) #original tree to test against
# treeArr=[root]
treeArr = []
#trainMatrix = np.insert(trainData,19,trainLabels,1)
for _ in range(8):
#trainSample = trainMatrix[np.random.choice(trainMatrix.shape[0], 100, replace=False)]
random.shuffle(forestFeatures) #For random forests, shuffling all features so it's built with 5 random features
anotherRoot = decisionTree()
build(anotherRoot, trainData, trainLabels, 2, forestFeatures[0:5])
treeArr.append(anotherRoot)
finalTree = compareRandomForests(treeArr, trainData, trainLabels)
# print_tree(root)
sol = []
for row in testData:
sol.append(solve(finalTree, row))
return sol
|
Mgla96/DiabeticRetinopathy
|
diabeticret.py
|
diabeticret.py
|
py
| 17,450 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39371296170
|
pcts = ("10", "12", "15") # Tuple
pcts_types = " or ".join(pcts)
print("Welcome to the tip calculator!")
bill = float(input("What was the total bill? $"))
tip = int(input(f"What percentage tip would you like to give? {pcts_types}? "))
people = int(input(f"How many people to split the bill? "))
amount = round(((bill * tip) / 100 + bill) / people, 2)
amount = "{:.2f}".format(amount)
print(f"Each person should pay: ${amount}")
|
carlohcs/100-days-of-code-python-pro-bootcamp-for-2022
|
days/03/tip_calculator.py
|
tip_calculator.py
|
py
| 430 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7769213718
|
import numpy as np
import torch
import random
from PIL import Image
#---------------------------------------------------------#
# 将图像转换成RGB图像,防止灰度图在预测时报错。
# 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB
#---------------------------------------------------------#
def cvtColor(image):
if len(np.shape(image)) == 3 and np.shape(image)[2] == 3:
return image
else:
image = image.convert('RGB')
return image
#---------------------------------------------------#
# 对输入图像进行resize
#---------------------------------------------------#
def resize_image(image, size, letterbox_image):
iw, ih = image.size
w, h = size
if letterbox_image:
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
else:
new_image = image.resize((w, h), Image.BICUBIC)
return new_image
def get_num_classes(annotation_path):
with open(annotation_path) as f:
dataset_path = f.readlines()
labels = []
for path in dataset_path:
path_split = path.split(";")
labels.append(int(path_split[0]))
num_classes = np.max(labels) + 1
return num_classes
#---------------------------------------------------#
# 获得学习率
#---------------------------------------------------#
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def preprocess_input(image):
image /= 255.0
return image
def show_config(**kwargs):
print('Configurations:')
print('-' * 70)
print('|%25s | %40s|' % ('keys', 'values'))
print('-' * 70)
for key, value in kwargs.items():
print('|%25s | %40s|' % (str(key), str(value)))
print('-' * 70)
def random_crop(image, crop_shape, padding=None):
oshape = np.shape(image)
if padding:
oshape = (oshape[2] + 2 * padding, oshape[3] + 2 * padding)
npad = ((0, 0), (0, 0), (padding, padding), (padding, padding))
image_pad = np.lib.pad(image, pad_width=npad, mode='constant', constant_values=0)
nh = random.randint(0, oshape[0] - crop_shape[0])
nw = random.randint(0, oshape[1] - crop_shape[1])
image_crop = image_pad[:, :, nh:nh + crop_shape[0], nw:nw + crop_shape[1]]
return image_crop
else:
print("WARNING!!! nothing to do!!!")
return image
def load_pretrained_model(net, resume_net):
print('Loading resume network...')
state_dict = torch.load(resume_net)
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
head = k[:7]
if head == 'module.':
name = k[7:] # remove `module.`
else:
name = k
new_state_dict[name] = v
net.load_state_dict(new_state_dict)
def load_pretrained_model_Filter(net, state_dict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
head = k[:7]
if head == 'module.':
name = k[7:] # remove `module.`
else:
name = k
new_state_dict[name] = v
net.load_state_dict(new_state_dict)
|
yangshunzhi1994/SCD
|
object verification/utils/utils.py
|
utils.py
|
py
| 3,489 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40687305933
|
import argparse
import json
from typing import List
from google.protobuf import json_format
from load_tests.common import (
benchmark_grpc_request,
make_full_request_type,
make_output_file_path,
)
from magma.common.service_registry import ServiceRegistry
from orc8r.protos.common_pb2 import Void
from orc8r.protos.directoryd_pb2 import (
DeleteRecordRequest,
DirectoryRecord,
GetDirectoryFieldRequest,
UpdateRecordRequest,
)
from orc8r.protos.directoryd_pb2_grpc import GatewayDirectoryServiceStub
DIRECTORYD_SERVICE_NAME = 'directoryd'
DIRECTORYD_SERVICE_RPC_PATH = 'magma.orc8r.GatewayDirectoryService'
DIRECTORYD_PORT = '127.0.0.1:50067'
PROTO_PATH = 'orc8r/protos/directoryd.proto'
def _load_subs(num_subs: int) -> List[DirectoryRecord]:
"""Load directory records"""
client = GatewayDirectoryServiceStub(
ServiceRegistry.get_rpc_channel(
DIRECTORYD_SERVICE_NAME, ServiceRegistry.LOCAL,
),
)
sids = []
for i in range(num_subs):
mac_addr = (str(i) * 2 + ":") * 5 + (str(i) * 2)
ipv4_addr = str(i) * 3 + "." + str(i) * 3 + "." + str(i) * 3 + "." + str(i) * 3
fields = {"mac-addr": mac_addr, "ipv4_addr": ipv4_addr}
sid = UpdateRecordRequest(
fields=fields,
id=str(i).zfill(15),
location=str(i).zfill(15),
)
client.UpdateRecord(sid)
sids.append(sid)
return sids
def _cleanup_subs():
"""Clear directory records"""
client = GatewayDirectoryServiceStub(
ServiceRegistry.get_rpc_channel(
DIRECTORYD_SERVICE_NAME, ServiceRegistry.LOCAL,
),
)
for record in client.GetAllDirectoryRecords(Void()).records:
sid = DeleteRecordRequest(
id=record.id,
)
client.DeleteRecord(sid)
def _build_update_records_data(num_requests: int, input_file: str):
update_record_reqs = []
for i in range(num_requests):
id = str(i).zfill(15)
location = str(i).zfill(15)
request = UpdateRecordRequest(
id=id,
location=location,
)
request_dict = json_format.MessageToDict(request)
update_record_reqs.append(request_dict)
with open(input_file, 'w') as file:
json.dump(update_record_reqs, file, separators=(',', ':'))
def _build_delete_records_data(record_list: list, input_file: str):
delete_record_reqs = []
for index, record in enumerate(record_list):
request = DeleteRecordRequest(
id=record.id,
)
request_dict = json_format.MessageToDict(request)
delete_record_reqs.append(request_dict)
with open(input_file, 'w') as file:
json.dump(delete_record_reqs, file, separators=(',', ':'))
def _build_get_record_data(record_list: list, input_file: str):
get_record_reqs = []
for index, record in enumerate(record_list):
request = GetDirectoryFieldRequest(
id=record.id,
field_key="mac-addr",
)
request_dict = json_format.MessageToDict(request)
get_record_reqs.append(request_dict)
with open(input_file, 'w') as file:
json.dump(get_record_reqs, file, separators=(',', ':'))
def _build_get_all_record_data(record_list: list, input_file: str):
request = Void()
get_all_record_reqs = json_format.MessageToDict(request)
with open(input_file, 'w') as file:
json.dump(get_all_record_reqs, file, separators=(',', ':'))
def update_record_test(args):
input_file = 'update_record.json'
_build_update_records_data(args.num_of_requests, input_file)
request_type = 'UpdateRecord'
benchmark_grpc_request(
proto_path=PROTO_PATH,
full_request_type=make_full_request_type(
DIRECTORYD_SERVICE_RPC_PATH, request_type,
),
input_file=input_file,
output_file=make_output_file_path(request_type),
num_reqs=args.num_of_requests, address=DIRECTORYD_PORT,
import_path=args.import_path,
)
_cleanup_subs()
def delete_record_test(args):
input_file = 'delete_record.json'
record_list = _load_subs(args.num_of_requests)
_build_delete_records_data(record_list, input_file)
request_type = 'DeleteRecord'
benchmark_grpc_request(
proto_path=PROTO_PATH,
full_request_type=make_full_request_type(
DIRECTORYD_SERVICE_RPC_PATH, request_type,
),
input_file=input_file,
output_file=make_output_file_path(request_type),
num_reqs=args.num_of_requests, address=DIRECTORYD_PORT,
import_path=args.import_path,
)
_cleanup_subs()
def get_record_test(args):
input_file = 'get_record.json'
record_list = _load_subs(args.num_of_requests)
_build_get_record_data(record_list, input_file)
request_type = 'GetDirectoryField'
benchmark_grpc_request(
proto_path=PROTO_PATH,
full_request_type=make_full_request_type(
DIRECTORYD_SERVICE_RPC_PATH, request_type,
),
input_file=input_file,
output_file=make_output_file_path(request_type),
num_reqs=args.num_of_requests, address=DIRECTORYD_PORT,
import_path=args.import_path,
)
_cleanup_subs()
def get_all_records_test(args):
input_file = 'get_all_records.json'
record_list = _load_subs(args.num_of_requests)
_build_get_all_record_data(record_list, input_file)
request_type = 'GetAllDirectoryRecords'
benchmark_grpc_request(
proto_path=PROTO_PATH,
full_request_type=make_full_request_type(
DIRECTORYD_SERVICE_RPC_PATH, request_type,
),
input_file=input_file,
output_file=make_output_file_path(request_type),
num_reqs=2000, address=DIRECTORYD_PORT,
import_path=args.import_path,
)
_cleanup_subs()
def create_parser():
"""
Creates the argparse subparser for all args
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
subparsers = parser.add_subparsers(title='subcommands', dest='cmd')
parser_update_record = subparsers.add_parser(
'update_record', help='Update record in directory',
)
parser_delete_record = subparsers.add_parser(
'delete_record', help='Delete record in directory',
)
parser_get_record = subparsers.add_parser(
'get_record', help='Get specific record in directory',
)
parser_get_all_records = subparsers.add_parser(
'get_all_records', help='Get all records in directory',
)
for subcmd in [
parser_update_record,
parser_delete_record,
parser_get_record,
parser_get_all_records,
]:
subcmd.add_argument(
'--num_of_requests', help='Number of total records in directory',
type=int, default=2000,
)
subcmd.add_argument(
'--import_path', default=None, help='Protobuf import path directory',
)
parser_update_record.set_defaults(func=update_record_test)
parser_delete_record.set_defaults(func=delete_record_test)
parser_get_record.set_defaults(func=get_record_test)
parser_get_all_records.set_defaults(func=get_all_records_test)
return parser
def main():
parser = create_parser()
# Parse the args
args = parser.parse_args()
if not args.cmd:
parser.print_usage()
exit(1)
# Execute the subcommand function
args.func(args)
if __name__ == "__main__":
main()
|
magma/magma
|
lte/gateway/python/load_tests/loadtest_directoryd.py
|
loadtest_directoryd.py
|
py
| 7,544 |
python
|
en
|
code
| 1,605 |
github-code
|
6
|
35317487695
|
from flask_socketio import send, emit, join_room
from flaskapp.blueprints.web.models.chat import ChatModel
def getChat(socketio):
# @socketio.on('connect')
# def connect():
# print('connect: ')
@socketio.on('joinPrivateGroup')
def joinPrivateGroup(data):
print('joinPrivateGroup', data)
cm = ChatModel()
prGr = cm.joinPrivateGroup(**data)
join_room(prGr['id'])
emit('joinPrivateGroup', prGr, json=True, room=prGr['id'])
@socketio.on('messagePrivate')
def messagePrivate(data):
cm = ChatModel()
msg = cm.messagePrivate(**data)
if (not msg is None):
emit('messagePrivate', msg, json=True, room=msg['chatId'])
@socketio.on_error()
def error_handler(e):
print('@socketio.on_error', e)
emit('error', {'error': str(e)}, json=True)
|
Igorok/flaskapp
|
flaskapp/blueprints/web/chat.py
|
chat.py
|
py
| 861 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.