seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
37080023999
|
def solution(n):
answer = 0
n_3 = ''
while n >= 3:
n_3 += str(n % 3)
n = n // 3
n_3 += str(n)
a = 1
for i in n_3[::-1]:
answer += int(i) * a
a *= 3
return answer
|
JeonggonCho/algorithm
|
프로그래머스/lv1/68935. 3진법 뒤집기/3진법 뒤집기.py
|
3진법 뒤집기.py
|
py
| 222 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38776144324
|
import os
import webbrowser
from shutil import copyfile
import random
import cv2
import pickle
from moviepy.editor import *
from flask import Flask, render_template, redirect, url_for, request
from flaskwebgui import FlaskUI
pickle_base = "C:\\Users\\AI\\AIVideo_Player\\data\\"
image_directory = 'C:\\Users\\AI\\Code\\VideoPlayer\\engine\\static\\images'
n_recent_files = 3
current_directory = ''
allowed_images = []
video_file_types = ['flv', 'mp4', 'avi', 'webm', 'mov', 'mpeg', 'wmv', 'mp3', 'MP4', 'mkv', 'MKV', 'AVI', 'MPEG', 'WEBM']
def pick(picklefile):
picklefile = pickle_base+picklefile
if os.path.isfile(picklefile):
with open(picklefile, 'rb') as f:
folders = pickle.load(f)
else:
folders = {}
return folders
def cache(item, picklefile):
picklefile = pickle_base+picklefile
with open(picklefile, 'wb') as f:
pickle.dump(item, f)
# ff = FFmpeg(executable='C:\\ffmpeg\\bin\\ffmpeg.exe', inputs={folder+folders[folder]['last_file']: None}, outputs={"C:\\Users\\AI\\AIVideo_Player\\data\\recntly_played\\thumbnail"+str(count)+".png": ['-vf', 'fps=1']})
# ff.run()
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
# do your logic as usual in Flask
@app.route("/")
def index():
favourites = pick('favourites.pickle')
folders = pick('cache.pickle')
backup_gif = ''
for file in favourites:
gif_filename = 'C:\\Users\\AI\\Code\\VideoPlayer\\engine\\static\\images\\' + os.path.basename(file) + '.gif'
if favourites[file]['changed'] or not os.path.isfile(gif_filename):
try:
seconds = favourites[file]['time'] - 3.5
clip = (VideoFileClip(file).subclip(seconds, seconds+7.5))
clip.write_gif(gif_filename)
except OSError:
pass
favourites[file]['changed'] = False
cache(favourites, 'favourites.pickle')
for folder in folders:
filename = 'C:\\Users\\AI\\Code\\VideoPlayer\\engine\\static\\images\\' + folders[folder]['filename'] + '.png'
backup_gif = folders[folder]['filename'] +'.gif'
gif_filename = 'C:\\Users\\AI\\Code\\VideoPlayer\\engine\\static\\images\\' + backup_gif
if not os.path.isfile(filename):
cap = cv2.VideoCapture(folders[folder]['full_path'])
cap.set(1, 100)
res, frame = cap.read()
cv2.imwrite(filename, frame)
try:
clip = (VideoFileClip(folders[folder]['full_path']).subclip((1, 7.7), (1, 14.12)))
clip.write_gif(gif_filename)
except OSError:
pass
print(favourites)
if favourites != {}:
favourite_gif = os.path.basename(random.choice(list(favourites)))+'.gif'
else:
favourite_gif = backup_gif
path = "index.html"
print(favourite_gif)
return render_template(path, folders=folders, favourite_gif=favourite_gif)
@app.route('/viewer', defaults={'_file_path': 'sample'})
@app.route('/viewer/<_file_path>')
def viewer(_file_path):
folders = pick('cache.pickle')
time_dict = pick('time_dict.pickle')
file_path = _file_path.replace('>', '\\')
dirname, filename = os.path.dirname(file_path), os.path.basename(file_path)
folders[dirname] = {
'full_path': str(file_path),
'filename': str(filename)
}
try:
last_time = time_dict[file_path]
except KeyError:
last_time = 0.0
time_dict[file_path] = 0.0
folders[dirname]['last_time'] = last_time
# folder_stack = pick('folder_stack.pickle')
folder_stack = list(folders)
folder_stack.append(dirname)
while len(folder_stack)>n_recent_files+1:
try:
del folders[folder_stack[0]]
folder_stack.remove(folder_stack[0])
except KeyError:
folder_stack.remove(folder_stack[0])
cache(folders, 'cache.pickle')
cache(time_dict, 'time_dict.pickle')
cache(folder_stack, 'folder_stack.pickle')
view_locaiton = os.getcwd()+url_for('static', filename='images/'+filename)
allowed_images.append(os.path.basename(view_locaiton))
try:
copyfile(file_path, view_locaiton)
except FileNotFoundError:
pass
path = "viewer.html"
filename = os.path.basename(view_locaiton)
while len(allowed_images)>4:
allowed_images.remove(allowed_images[0])
print(filename)
return render_template(path, file_name=url_for('static', filename='images/'+filename), full_file_path=_file_path, last_time=last_time, _filename=filename.replace('%20', ' '))
@app.route("/folders", defaults={'_path': '?'})
@app.route("/folders/<_path>")
def folders(_path):
folder_stack = pick('folder_stack.pickle')
path = _path.replace('>', '\\')
if any(path.endswith(_) for _ in video_file_types):
return redirect("http://127.0.0.1:5000/viewer/"+path.replace('\\', '>'))
elif path == '?':
try:
path = folder_stack[-1]
except KeyError:
path = 'C:\\'
elif path.endswith('<<'):
path = os.path.dirname(path)
elif path == '<<<':
path = 'C:\\'
f = lambda s: path+"\\"+s
try:
folders_full_path = list(map(f, os.listdir(path)))
folders_list = os.listdir((path))
except NotADirectoryError:
return "AIVIDEO_PLAYER does not support this file type"
return render_template('folders.html', folders_full_path=folders_full_path, folders_list=folders_list, directory=path)
@app.route("/changeVideo", defaults={'param': ' '})
@app.route("/changeVideo/", methods=['POST', 'GET'])
def changeVideo():
last_video = request.args.get('last_video')
last_video = last_video.replace('>', '\\')
last_video = last_video.replace('<', ' ')
last_time = request.args.get('last_time')
favourite = request.args.get('favourite')
favourite_time = request.args.get('favouriteTime')
command = request.args.get('command')
folders = pick('cache.pickle')
time_dict = pick('time_dict.pickle')
favourites = pick('favourites.pickle')
directory = os.path.dirname(last_video)
filename = os.path.basename(last_video)
if favourite == 'true':
print('adding to favourite')
favourites[last_video] = {'time':float(favourite_time),'changed':True}
cache(favourites, 'favourites.pickle')
folders[directory] = {
'full_path': str(last_video),
'filename': str(filename),
'last_time': float(last_time)
}
time_dict[last_video] = last_time
cache(time_dict, 'time_dict.pickle')
cache(folders, 'cache.pickle')
_dir_list = os.listdir(directory)
dir_list = [_ for _ in _dir_list if any(_.endswith(__) for __ in video_file_types)]
if command == 'next':
next_file = directory + "\\" + dir_list[dir_list.index(filename) + 1]
return redirect("http://127.0.0.1:5000/viewer/" + next_file.replace('\\', '>'))
elif command == 'previous':
previous_file = directory + "\\" + dir_list[dir_list.index(filename) - 1]
return redirect("http://127.0.0.1:5000/viewer/" + previous_file.replace('\\', '>'))
elif command == 'backspace':
return redirect('http://127.0.0.1:5000/')
elif command == 'exit':
for file in os.listdir(image_directory):
if file not in [os.path.basename(_)+'.gif' for _ in favourites] and not file.startswith('icons8') and file not in [folders[__]['filename'] for __ in folders] and file not in [folders[__]['filename']+'.gif' for __ in folders] and file not in [folders[__]['filename']+'.png' for __ in folders] and file not in allowed_images:
os.remove(image_directory+'\\'+file)
exit()
return ''
# call the 'run' method
app.run()
print('done')
|
olusegvn/VideoPlayer
|
engine/AIVideoPlayerBackend.py
|
AIVideoPlayerBackend.py
|
py
| 7,987 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44042814684
|
import pandas as pd
import re
import graphlab as gl
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from nltk.stem.wordnet import WordNetLemmatizer
from helper import *
class Registries(object):
def __init__(self, filepath):
self.filepath = filepath
self.data = None
def preprocess_registries_data(self):
self.data = pd.read_csv(self.filepath)
self.data['product_details'] = [x.strip('[]').split(',') for x in self.data['product_details']]
self.data['product_att'] = [x.strip('[]').split(',') for x in self.data['product_att']]
self.data['product_name'] = [p[0].strip('u\'').decode('unicode_escape').encode('ascii','ignore') for p in self.data.product_details]
self.data['product_url'] = [x[-1].strip(' u\'') for x in self.data.product_details]
self.data['product_id'] = [int(re.search(r'/(\d+)\?',x).group(1)) if x!='' else '' for x in self.data.product_url]
self.data = self.data[self.data.product_id != ''] # convert to integer for graphlab models
self.data['color'] = [x[0].strip(' u\'') for x in self.data.product_att]
self.data['color_scheme'] = ['NEUTRAL' if type(x) is float else 'BLUE' if 'BLUE' in x.split() else 'PINK' if 'PINK' in x.split() else 'NEUTRAL' for x in self.data.color]
self.data['size_others'] = [x[1].strip(' u\'') if type(x) is str else '' for x in self.data.product_att]
# self.data['price'] = self.data.price.astype(float).fillna(0.0)
self.data = self.data.drop(['product_details', 'product_att'], axis=1)
return self.data
def load_registry_data(self, data):
self.data = data
def create_registry_df(self):
# Create registries dataframe
self.registries = self.data[['id', 'product_id']]
self.registries['requested'] = 1
return self.registries
def create_items_df(self):
self.items = self.data[['product_id', 'product_name','color', 'color_scheme', 'size_others','price']]
self.get_item_category_with_NMF()
return self.items
def tfidf_item_desc(self):
self.items['desc'] = [x+' '+y for x, y in zip(self.items.product_name, self.items.size_others)]
corpus = self.items['desc'].values
wordnet = WordNetLemmatizer()
docs_wordnet = [[wordnet.lemmatize(word) for word in re.split('\W+', words)] for words in corpus]
stop_words = ['baby', 'child', 'infant', 'newborn', 'in', 'with', 'of', '+', '&', 'and', 'by']
self.items.vectorizer = TfidfVectorizer(stop_words=stop_words)
self.items.doc_term_mat = self.items.vectorizer.fit_transform(corpus)
# feature_words = self.items.vectorizer.get_feature_names()
return self.items.doc_term_mat
def get_item_category_with_NMF(self, num_category=4):
self.items.doc_term_mat = self.tfidf_item_desc()
nmf = NMF(n_components=num_category)
W_sklearn = nmf.fit_transform(self.items.doc_term_mat)
H_sklearn = nmf.components_
items_cat_ind = np.argsort(W_sklearn, axis=1)
self.items['category'] = items_cat_ind[:,-1] # get the top category
return self.items
def get_item_pairwise_dist(self, metric='cosine'):
tfidf_arr = self.items.doc_term_mat.toarray()
dist_mat = pairwise_distances(tfidf_arr, metric)
return dist_mat
def dummify(df,column_name, drop_first = False):
dummies = pd.get_dummies(df[column_name], prefix = column_name, drop_first = False)
df = df.drop(column_name, axis = 1)
return pd.concat([df,dummies], axis = 1)
def to_SFrame(self, categorical_cols):
'''
categorical_cols: list of column names for categorical variables
'''
items_gl = self.items.dropna()
reg_gl = self.registries.dropna()
for col in categorical_cols:
items_gl = dummify(items_gl, col)
items_gl = gl.SFrame(items_gl)
reg_gl = gl.SFrame(reg_gl)
return reg_gl, items_gl
def train_test_split(self, test_proportion = 0.2):
reg_gl, _ = self.to_SFrame
train, test = gl.recommender.util.random_split_by_user(dataset = reg_gl,
user_id = 'id',
item_id = 'product_id',
max_num_users = 1000,
item_test_proportion = 0.2,
random_seed = 100)
return train, test
|
vynguyent/Expecting-the-unexpected
|
Model/registries.py
|
registries.py
|
py
| 4,677 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40254170266
|
# Configuration file for jupyterHub...there's probably a better way of doing this
# Define the custom authentication class JupyterHub attempts to use
c.JupyterHub.authenticator_class = 'oauthenticator.LocalODROAuthenticator'
# Define the ODR server location
odr_base_url = '[[ ENTER ODR SERVER BASEURL HERE ]]'
c.ODROAuthenticator.token_url = odr_base_url + '/oauth/v2/token'
c.ODROAuthenticator.userdata_url = odr_base_url + '/api/v1/userdata.json'
c.ODROAuthenticator.username_key = 'jupyterhub_username'
# Define the JupyterHub server location
jupyterhub_base_url = '[[ ENTER JUPYTERHUB SERVER BASEURL HERE ]]'
c.ODROAuthenticator.oauth_callback_url = jupyterhub_base_url + '/hub/oauth_callback'
# Define parameters needed for the OAuth process
c.ODROAuthenticator.client_id = '[[ ENTER OAUTH CLIENT_ID HERE ]]'
c.ODROAuthenticator.client_secret = '[[ ENTER OAUTH CLIENT_SECRET HERE ]]'
# Instruct JupyterHub to create system users based on the OAuth server
c.LocalAuthenticator.create_system_users = True
# Needed to secure a route to the OAuth token manager...can use "openssl rand -hex 32". Shouldn't match other keys.
c.ODROAuthenticator.manager_token = '[[ ENTER SOME SECRET KEY HERE ]]'
c.ODROAuthenticator.manager_port = '8094'
# API tokens to allow JupyterHub services to communicate with JupyterHub's API...can use "openssl rand -hex 32".
c.JupyterHub.service_tokens = {
'[[ ENTER SOME OTHER SECRET KEY HERE ]]': 'odr_oauth_manager',
'[[ ENTER YET ANOTHER SECRET KEY HERE ]]': 'odr_external',
'[[ ENTER SECRET KEY #3 HERE ]]': 'odr_bridge',
}
# Needed to secure a route between ODR and jupyterhub
odr_bridge_token = '[[ ENTER SECRET KEY #4 HERE ]]'
odr_bridge_port = '9642'
# JupyterHub service definition
c.JupyterHub.services = [
{
'name': 'odr_oauth_manager',
'admin': False,
'command': ['python', 'odr_oauth_manager.py'],
'url': 'http://127.0.0.1:' + c.ODROAuthenticator.manager_port,
'environment': {
'port_number': c.ODROAuthenticator.manager_port,
'oauth_client_id': c.ODROAuthenticator.client_id,
'oauth_client_secret': c.ODROAuthenticator.client_secret,
'oauth_token_url': c.ODROAuthenticator.token_url,
'oauth_manager_token': c.ODROAuthenticator.manager_token,
},
},
{
'name': 'odr_external',
'admin': True, # Needs access to jupyterhub api
},
{
'name': 'odr_bridge',
'admin': False,
'command': ['python', 'odr_bridge.py'],
'url': 'http://127.0.0.1:' + odr_bridge_port,
'environment': {
'bridge_token': odr_bridge_token,
'port_number': odr_bridge_port,
},
}
]
|
OpenDataRepository/data-publisher
|
external/jupyterhub/jupyterhub_config.py
|
jupyterhub_config.py
|
py
| 2,732 |
python
|
en
|
code
| 14 |
github-code
|
6
|
36079540438
|
import atexit
import json
import logging
import os
# needs install
import websocket
from log.timeutil import *
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
import log.encoder
try:
import thread
except ImportError:
import _thread as thread
class BitWs:
'''logging utility using bitmex realtime(websockets) API'''
def __init__(self, log_file_dir=os.sep + "tmp", flag_file_name = os.sep + "tmp" + os.sep + "BITWS-FLG", id = None, fix_file=None):
self.last_action = None
self.log_file_root_name = None
self.log_file_name = None
self.ws = None
self.log_file_dir = log_file_dir
self.last_time = 0
self.compress = True
self.terminate_count = 200
self.terminated_by_peer = False
self.fix_file = fix_file
if id:
self.pid = id
else:
self.pid = str(os.getpid())
self.reset()
self.flag_file_name = flag_file_name
if not self.fix_file:
self.rotate_file()
def __del__(self):
# self.dump_message()
self.rotate_file()
self.remove_terminate_flag()
def reset(self):
self.last_message = None
self.reset_timestamp()
def reset_timestamp(self):
self.last_time = int(timestamp())
def get_flag_file_name(self):
return self.flag_file_name
def create_terminate_flag(self):
self.remove_terminate_flag()
file_name = self.get_flag_file_name()
with open(file_name + "tmp", "w") as file:
file.write(self.get_process_id())
file.close()
os.rename(file_name + "tmp", file_name)
def check_terminate_flag(self):
file_name = self.get_flag_file_name()
if os.path.isfile(file_name):
with open(file_name, "r") as file:
id = file.readline()
if id != self.get_process_id():
self.terminate_count = self.terminate_count - 1
if self.terminate_count == 0:
return True
return False
def get_process_id(self):
return self.pid
def remove_terminate_flag(self):
file_name = self.get_flag_file_name()
if os.path.isfile(file_name):
os.remove(file_name)
def rotate_file(self):
if self.log_file_name:
if os.path.isfile(self.log_file_name):
os.rename(self.log_file_name, self.log_file_root_name)
timestring = time_stamp_string().replace(":", "-").replace('+', '-')
self.log_file_root_name = self.log_file_dir + os.sep + 'BITLOG' + self.get_process_id() + '-' + timestring + ".log"
self.log_file_name = self.log_file_root_name + ".current"
def dump_message(self):
if self.last_message is None:
return
self.dump_message_line(self.last_message)
self.reset()
def dump_message_line(self, message):
message['TIME'] = self.last_time
if self.fix_file:
file_name = self.fix_file
else:
file_name = self.log_file_name
with open(file_name, "a") as file:
json_string = json.dumps(message, separators=(',', ':'))
if self.compress:
file.write(log.encoder.encode(json_string))
else:
file.write(json_string)
file.write('\n')
def remove_symbol(self, message):
for m in message['data']:
del (m['symbol'])
def on_message(self, ws, message):
message = json.loads(message)
table = message['table'] if 'table' in message else None
if table == "orderBookL2":
self.remove_symbol(message)
self.on_order_book_message(ws, message)
elif table == "funding":
self.remove_symbol(message)
self.on_funding_message(ws, message)
elif table == "trade":
self.remove_symbol(message)
self.on_trade_message(ws, message)
def on_trade_message(self, ws, message):
# logger.debug("trade")
self.dump_message_line(self.strip_trade_message(message))
def strip_trade_message(self, message):
data = message['data']
side = None
price = 0
size = 0
last_time_stamp = data[0]['timestamp']
for d in data:
if last_time_stamp != d['timestamp']:
break
side = d['side']
price = d['price']
size += d['size']
del(data[1:])
data[0]['side'] = side
data[0]['price'] = price
data[0]['size'] = size
del(data[0]['grossValue'], data[0]['homeNotional'], data[0]['trdMatchID'], data[0]['foreignNotional'])
return message
def on_funding_message(self, ws, message):
logger.debug("funding")
self.dump_message_line(message)
pass
def on_order_book_message(self, ws, message):
action = message['action'] if 'action' in message else None
if action == 'partial':
logger.debug("partial")
self.rotate_file()
self.create_terminate_flag()
current_time = int(timestamp())
if current_time == self.last_time and self.last_action == action and action != None:
if self.last_message != None:
self.last_message['data'] += message['data']
else:
self.last_message = message
else:
if self.last_message != None:
self.dump_message()
self.last_message = message
self.reset_timestamp()
self.last_action = action
if self.check_terminate_flag():
self.ws.close()
self.rotate_file()
self.terminated_by_peer = True
logger.debug("terminated")
def on_error(self, ws, error):
logger.debug(error)
def on_close(self, ws):
logger.debug("### closed ###")
def on_open(self, ws):
ws.send('{"op": "subscribe", "args": ["funding:XBTUSD", "orderBookL2:XBTUSD", "trade:XBTUSD"]}')
def start(self):
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp("wss://www.bitmex.com/realtime",
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
on_open=self.on_open)
self.ws.run_forever(ping_interval=70, ping_timeout=30)
if __name__ == "__main__":
bitmex = BitWs(fix_file='/tmp/bit.log')
atexit.register(bitmex.rotate_file)
bitmex.start()
|
yasstake/mmf
|
log/bitws.py
|
bitws.py
|
py
| 6,831 |
python
|
en
|
code
| 1 |
github-code
|
6
|
35863858862
|
n = int(input())
s = input()
ans = 0
ans_list = []
R_index = []
G_index = []
B_index = []
for i in range(n):
if s[i] == 'R':
R_index.append(i)
elif s[i] == 'G':
G_index.append(i)
elif s[i] == 'B':
B_index.append(i)
ans = len(R_index) * len(G_index) * len(B_index)
for j in range(1, n-1):
for i in range(min(n-j-1,j)+1):
# print(j,i)
# print(min(n-j-1,j))
#
# print(s[j-i],s[j],s[j+i])
if s[j-i] != s[j] and s[j] != s[j+i] and s[j-i] != s[j+i]:
# print('here')
ans -= 1
print(ans)
|
bokutotu/atcoder
|
ABC/162/d_.py
|
d_.py
|
py
| 589 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39688600504
|
# 55. Jump Game
# Time: O(len(nums))
# Space: O(1)
class Solution:
def canJump(self, nums: List[int]) -> bool:
if len(nums)<=1:
return True
max_pos = nums[0]
for index in range(len(nums)):
max_pos = max(max_pos, index+nums[index])
if index>=max_pos:
return False
if max_pos>=len(nums)-1:
return True
return False
|
cmattey/leetcode_problems
|
Python/lc_55_jump_game.py
|
lc_55_jump_game.py
|
py
| 435 |
python
|
en
|
code
| 4 |
github-code
|
6
|
73675802426
|
# This script fills the newly created point geofield
# coding=utf-8
import os, sys
proj_path = "/home/webuser/webapps/tigaserver/"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tigaserver_project.settings")
sys.path.append(proj_path)
os.chdir(proj_path)
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
import csv
import string
import random
from django.contrib.auth.models import User, Group
from tigaserver_app.models import EuropeCountry
USERS_FILE = '/home/webuser/Documents/filestigaserver/registre_usuaris_aimcost/test_users_14072020.csv'
def split_name(s):
split = s.split(" ")
name = split[0]
first_name = split[1]
return { "name": name, "last_name": first_name }
def get_username(s):
split = split_name(s)
elem1 = split['name'][0].lower()
elem2 = split['last_name'].lower().split("-")[0]
return elem1 + "." + elem2
def generate_password( size=6, chars= string.ascii_uppercase + string.ascii_lowercase + string.digits ):
return ''.join(random.choice(chars) for _ in range(size))
def delete_euro_users():
users = User.objects.filter(groups__name='eu_group_europe')
for u in users:
u.delete()
def delete_users():
with open(USERS_FILE) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader)
for row in csv_reader:
name = row[0]
username = get_username(name)
try:
user = User.objects.get(username=username)
user.delete()
except User.DoesNotExist:
print("User with username {0} not found".format(name))
def make_user_regional_manager(user, country):
user.userstat.national_supervisor_of = country
user.save()
def assign_user_to_country(user, country):
user.userstat.native_of = country
user.save()
def perform_checks():
with open(USERS_FILE) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader)
for row in csv_reader:
country_iso = row[7]
try:
print("Looking for country {0} with iso_code {1}".format(row[2], row[7]))
e = EuropeCountry.objects.get(iso3_code=country_iso)
print("Exists, doing nothing")
except EuropeCountry.DoesNotExist:
print("{0} country with iso_code {1} does not exist".format(row[2],row[7]))
try:
eu_group = Group.objects.get(name="eu_group_europe")
except Group.DoesNotExist:
print("Eu group does not exist, create")
eu_group = Group.objects.create(name="eu_group_europe")
eu_group.save()
try:
es_group = Group.objects.get(name="eu_group_spain")
except Group.DoesNotExist:
print("Es group does not exist, create")
es_group = Group.objects.create(name="eu_group_spain")
es_group.save()
def check_users_by_email(comparison_file, output_file_name):
ignore_list = ['[email protected]','[email protected]','[email protected]','[email protected]','[email protected]','[email protected]']
with open(comparison_file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader)
for row in csv_reader:
email = row[1]
if email not in ignore_list:
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
print("User with name {0} - {1} is not in database".format(row[0],row[1]))
def inactivate_euro_users():
euro_users = User.objects.filter(groups__name='eu_group_europe')
for user in euro_users:
user.is_active = False
user.save()
def create_users(add_users_to_euro_groups=True, ignore_regional_managers = False):
perform_checks()
experts_group = Group.objects.get(name="expert")
with open(USERS_FILE) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader)
for row in csv_reader:
name = row[0]
email = row[1]
country = row[2]
sp = split_name(name)
#username = get_username(name)
username = row[3]
password = row[4]
country_iso = row[7]
user = User.objects.create_user(username=username,first_name=sp['name'],last_name=sp['last_name'],email=email,password=password)
if add_users_to_euro_groups:
regional_group = Group.objects.get(name=row[5])
regional_group.user_set.add(user)
experts_group.user_set.add(user)
country = EuropeCountry.objects.get(iso3_code=country_iso)
assign_user_to_country(user,country)
if not ignore_regional_managers:
if row[6] == '1':
print("Making user regional manager")
make_user_regional_manager(user, country)
print("{0} {1} {2}".format( username, email, password ))
create_users(add_users_to_euro_groups=False, ignore_regional_managers = True)
#perform_checks()
#delete_users()
#check_users_by_email('/home/webuser/Documents/filestigaserver/registre_usuaris_aimcost/user_check.csv','')
|
Mosquito-Alert/mosquito_alert
|
util_scripts/create_aimsurv_experts.py
|
create_aimsurv_experts.py
|
py
| 5,288 |
python
|
en
|
code
| 6 |
github-code
|
6
|
24923567054
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 18:19:38 2013
@author: matz
"""
import math
import sys
import cvtype
import datatype
import document
import generator
import package
import test
# abbreviations
DT = test.Default()
# calcHistWrapper
dcl = document.Document()
dclIncludes = ["<opencv2/core/core.hpp>"]
dcl.text(
"""
void calcHist(const cv::Mat & input, cv::Mat & result, const float min, const float max, int size);
""")
dtnIncludes = ["<opencv2/imgproc/imgproc.hpp>"]
dtn = document.Document()
dtn.text(
"""
void calcHist(const cv::Mat & input, cv::Mat & result, const float min, const float max, int size)
{
int channels[] = {0};
float range[] = {min, max};
const float* ranges[] = {range};
cv::calcHist(&input, 1, channels, cv::Mat(), result, 1, &size, ranges);
}
""")
calcHistWrapper = package.Function(dcl, dclIncludes, dtn, dtnIncludes)
# minEnclosingCircleWrapper
dcl = document.Document()
dclIncludes = ["<opencv2/core/core.hpp>"]
dcl.text(
"""
void minEnclosingCircle(const cv::Mat & points, cv::Mat & result);
""")
dtnIncludes = ["<opencv2/imgproc/imgproc.hpp>"]
dtn = document.Document()
dtn.text(
"""
void minEnclosingCircle(const cv::Mat & points, cv::Mat & result)
{
cv::Point2f center;
float radius;
cv::minEnclosingCircle(points, center, radius);
result = cv::Mat(1, 3, CV_32F);
result.at<float>(0, 0) = center.x;
result.at<float>(0, 1) = center.y;
result.at<float>(0, 2) = radius;
}
""")
minEnclosingCircleWrapper = package.Function(dcl, dclIncludes, dtn, dtnIncludes)
# fitLineWrapper
dcl = document.Document()
dclIncludes = ["<opencv2/core/core.hpp>"]
dcl.text(
"""
void fitLine(const cv::Mat & points, cv::Mat & result, const int distType,
const double param, const double reps, const double aeps);
""")
dtnIncludes = ["<cmath>", "<opencv2/imgproc/imgproc.hpp>"]
dtn = document.Document()
dtn.text(
"""
void fitLine(const cv::Mat & points, cv::Mat & result, const int distType,
const double param, const double reps, const double aeps)
{
cv::Vec4f line;
cv::fitLine(points, line, distType, param, reps, aeps);
result = cv::Mat(1, 2, CV_32F);
result.at<float>(0, 0) = (line[1]*line[2] - line[0]*line[3]);
result.at<float>(0, 1) = std::atan2(line[0], line[1]) * 180 / M_PI;
}
""")
fitLineWrapper = package.Function(dcl, dclIncludes, dtn, dtnIncludes)
# extractRectangle
dcl = document.Document()
dclIncludes = ["<opencv2/core/core.hpp>"]
dcl.text(
"""
void extractRectangle(const cv::Mat & image, const cv::RotatedRect& rectangle, cv::Mat & result);
""")
dtnIncludes = ["<opencv2/imgproc/imgproc.hpp>"]
dtn = document.Document()
dtn.text(
"""
void extractRectangle(const cv::Mat & image, const cv::RotatedRect& rectangle, cv::Mat & result)
{
cv::Rect bbox = rectangle.boundingRect();
bbox.x = std::min(std::max(bbox.x, 0), image.cols - 1);
bbox.y = std::min(std::max(bbox.y, 0), image.rows - 1);
bbox.width = std::min(std::max(bbox.width, 1), image.cols - bbox.x);
bbox.height = std::min(std::max(bbox.height, 1), image.rows - bbox.y);
cv::Mat cropped = image(bbox);
float angle = rectangle.angle;
cv::Size size = rectangle.size;
if (rectangle.angle < -45.)
{
angle += 90.0;
std::swap(size.width, size.height);
}
cv::Point2f shiftedCenter = rectangle.center - cv::Point2f(bbox.x, bbox.y);
cv::Mat transform = cv::getRotationMatrix2D(shiftedCenter, angle, 1.0);
cv::Mat rotated;
cv::warpAffine(cropped, rotated, transform, cropped.size(), cv::INTER_CUBIC);
cv::getRectSubPix(rotated, rectangle.size, shiftedCenter, result);
}
""")
extractRectangleWrapper = package.Function(dcl, dclIncludes, dtn, dtnIncludes)
# initializations
initInCopy = document.Document((
"{1}->initializeImage({0}->width(), {0}->height(), {0}->stride(), "
"{1}->data(), {0}->pixelType());").format("srcCastedData", "dstCastedData"
))
initOutCopy = document.Document((
"{1}->initializeImage({1}->width(), {1}->height(), {1}->stride(), "
"{1}->data(), {0}->pixelType());").format("srcCastedData", "dstCastedData"
))
initInResize = document.Document((
"int width = int(m_dsizex) ? int(m_dsizex) : int(srcCastedData->width() * double(m_fx));\n"
"int height = int(m_dsizey) ? int(m_dsizey) : int(srcCastedData->height() * double(m_fy));\n"
"{1}->initializeImage(width, height, width * {0}->pixelSize(), "
"{1}->data(), {0}->pixelType());").format("srcCastedData", "dstCastedData")
)
initInDsize = document.Document((
"int width = int(m_dsizex);\n"
"int height = int(m_dsizey);\n"
"{1}->initializeImage(width, height, width * {0}->pixelSize(), "
"{1}->data(), {0}->pixelType());").format("srcCastedData", "dstCastedData")
)
initInDdepth = document.Document((
"runtime::Image::PixelType pixelType = cvsupport::computeOutPixelType("
"convertDdepth(m_ddepth), srcCastedData->pixelType());\n"
"unsigned int stride = runtime::Image::pixelSize(pixelType) * "
"srcCastedData->width();\n"
"{1}->initializeImage({0}->width(), {0}->height(), stride, "
"{1}->data(), pixelType);").format("srcCastedData", "dstCastedData"
))
initOutDdepth = document.Document((
"runtime::Image::PixelType pixelType = cvsupport::computeOutPixelType("
"convertDdepth(m_ddepth), srcCastedData->pixelType());\n"
"unsigned int stride = runtime::Image::pixelSize(pixelType) * "
"srcCastedData->width();\n"
"{1}->initializeImage({1}->width(), {1}->height(), stride, "
"{1}->data(), pixelType);").format("srcCastedData", "dstCastedData"
))
initInFloat32 = document.Document((
"unsigned int stride = {0}->cols() * runtime::Matrix::valueSize(runtime::Matrix::FLOAT_32);\n"
"{1}->initializeMatrix({0}->rows(), {0}->cols(), stride, "
"{1}->data(), runtime::Matrix::FLOAT_32);").format("srcCastedData",
"dstCastedData"
))
# arguments
srcImg = package.Argument(
"src", "Source", cvtype.Mat(), datatype.Image()
)
srcImgMono = package.Argument(
"src", "Source", cvtype.Mat(),
datatype.Image("runtime::Variant::MONO_IMAGE")
)
srcImgMono8bit = package.Argument(
"src", "Source", cvtype.Mat(),
datatype.Image("runtime::Variant::MONO_8_IMAGE")
)
dstImg = package.Argument(
"dst", "Destination", cvtype.Mat(), datatype.Image(), initIn = initInCopy,
initOut = initOutCopy
)
dstImgResize = package.Argument(
"dst", "Destination", cvtype.Mat(), datatype.Image(),
initIn = initInResize, initOut = initOutCopy
)
dstImgDsize = package.Argument(
"dst", "Destination", cvtype.Mat(), datatype.Image(),
initIn = initInDsize, initOut = initOutCopy
)
dstImgFloat32 = package.Argument(
"dst", "Destination", cvtype.Mat(), datatype.Float32Matrix(),
initIn = initInFloat32
)
ddepthDefault = package.Constant(
"-1"
)
ksizex = package.NumericParameter(
"ksizex", "Kernel size X", cvtype.Int(), datatype.UInt32(), default = 3,
minValue = 1
)
ksizey = package.NumericParameter(
"ksizey", "Kernel size Y", cvtype.Int(), datatype.UInt32(), default = 3,
minValue = 1
)
ksizexOdd = package.NumericParameter(
"ksizex", "Kernel size X", cvtype.Int(), datatype.UInt32(), default = 3,
minValue = 1, rules = [package.OddRule()]
)
ksizeyOdd = package.NumericParameter(
"ksizey", "Kernel size Y", cvtype.Int(), datatype.UInt32(), default = 3,
minValue = 1, rules = [package.OddRule()]
)
descriptions = [
package.EnumDescription("MORPH_RECT", "Rectangle"),
package.EnumDescription("MORPH_ELLIPSE", "Ellipse"),
package.EnumDescription("MORPH_CROSS", "Cross")
]
shape = package.EnumParameter(
"shape", "Kernel shape", descriptions = descriptions,
default = 0
)
kernel = package.Call(
"getStructuringElement(shapeCvData, cv::Size(ksizexCvData, ksizeyCvData))",
[ksizex, ksizey, shape]
)
anchor = package.Constant(
"cv::Point(-1, -1)"
)
defaultSize = package.Constant(
"cv::Size(-1, -1)"
)
iterations = package.NumericParameter(
"iterations", "Number of iterations", cvtype.Int(), datatype.UInt32(),
minValue = 1, default = 1
)
ksize = package.NumericParameter(
"ksize", "Kernel size", cvtype.Int(), datatype.UInt32(), minValue = 1,
step = 2, default = 3, rules = [package.OddRule()]
)
d = package.NumericParameter(
"d", "Pixel neigbourhood diameter", cvtype.Int(), datatype.UInt32(),
default = 9
)
dsizex = package.NumericParameter(
"dsizex", "Size X", cvtype.Int(), datatype.UInt32()
)
dsizey = package.NumericParameter(
"dsizey", "Size Y", cvtype.Int(), datatype.UInt32()
)
dx = package.NumericParameter(
"dx", "Order X derivative", cvtype.Int(), datatype.UInt32(), default = 1
)
dy = package.NumericParameter(
"dy", "Order Y derivative", cvtype.Int(), datatype.UInt32(), default = 0
)
sigmaColor = package.NumericParameter(
"sigmaColor", "Sigma color", cvtype.Float64(), datatype.Float64(),
default = 50.0
)
sigmaSpace = package.NumericParameter(
"sigmaSpace", "Sigma space", cvtype.Float64(), datatype.Float64(),
default = 50.0
)
sigmaX = package.NumericParameter(
"sigmaX", "Sigma X", cvtype.Float64(), datatype.Float64(), default = 0.0
)
sigmaY = package.NumericParameter(
"sigmaY", "Sigma Y", cvtype.Float64(), datatype.Float64(), default = 0.0
)
descriptions = [
package.EnumDescription("SAME", "Same as input", -1),
package.EnumDescription("DEPTH_8_BIT", "8-bit", "CV_8U"),
package.EnumDescription("DEPTH_16_BIT", "16-bit", "CV_16U")
]
ddepth = package.EnumParameter(
"ddepth", "Destination depth", descriptions = descriptions,
default = 0
)
scale = package.NumericParameter(
"scale", "Scale", cvtype.Float64(), datatype.Float64(), default = 1.0
)
delta = package.NumericParameter(
"delta", "Delta", cvtype.Float64(), datatype.Float64(), default = 0.0
)
dstImgDdepth = package.Argument(
"dst", "Destination", cvtype.Mat(), datatype.Image(),
initIn = initInDdepth, initOut = initOutDdepth
)
thresh = package.NumericParameter(
"threshold", "Threshold", cvtype.Float64(), datatype.Float64(),
default = 127.0
)
maxval = package.NumericParameter(
"maxval", "Maximal value", cvtype.Float64(), datatype.Float64(),
default = 255.0
)
blockSize = package.NumericParameter(
"blockSize", "Block size", cvtype.Int(), datatype.UInt32(),
default = 3, minValue = 1, rules = [package.OddRule()]
)
descriptions = [
package.EnumDescription("SIZE_3", "3","3"),
package.EnumDescription("SIZE_5", "5","5"),
package.EnumDescription("SIZE_PRECISE", "Precise", "CV_DIST_MASK_PRECISE")
]
maskSize = package.EnumParameter(
"maskSize", "Mask size", descriptions = descriptions,
default = 0
)
seedPointX = package.NumericParameter(
"seedPointX", "Seed point X", cvtype.Int(), datatype.UInt32()
)
seedPointY = package.NumericParameter(
"seedPointY", "Seed point Y", cvtype.Int(), datatype.UInt32()
)
newVal = package.NumericParameter(
"newVal", "New value", cvtype.Float64(), datatype.Float64()
)
harrisK = package.NumericParameter(
"k", "Harris parameter", cvtype.Float64(), datatype.Float64(),
default = 1
)
accumulatorThreshold = package.NumericParameter(
"threshold", "Accumulator threshold", cvtype.Int(), datatype.UInt32(),
default = 100
)
minLineLength = package.NumericParameter(
"minLineLength", "Minimum line length", cvtype.Float64(), datatype.Float64(),
default = 50
)
maxLineGap = package.NumericParameter(
"maxLineGap", "Maximum allowed gap", cvtype.Float64(), datatype.Float64(),
default = 5
)
pointMatrix = package.MatrixArgument(
"pointMatrix", "Point coordinates", cvtype.Mat(), datatype.Float32Matrix(),
cols = 2, visualization = datatype.Visualization.POINT
)
winSizeX = package.NumericParameter(
"winSizeX", "Width of search window", cvtype.Int(), datatype.UInt32(),
default = 5
)
winSizeY = package.NumericParameter(
"winSizeY", "Height of search window", cvtype.Int(), datatype.UInt32(),
default = 5
)
noArray = package.Constant(
"cv::noArray()"
)
# test data
lenna = test.ImageFile("lenna.jpg")
lenna_bw = test.ImageFile("lenna.jpg", grayscale = True)
edges = test.ImageFile("edges.png", grayscale = True)
affine_transformation = test.MatrixFile("affine.npy")
perspective_transformation = test.MatrixFile("perspective.npy")
camera_matrix = test.MatrixFile("camera_matrix.npy")
dist_coeffs = test.MatrixFile("dist_coeffs.npy")
memory = test.ImageBuffer(1000000)
bigMemory = test.ImageBuffer(10000000)
circle = test.ImageFile("circle.png", grayscale = True)
contours = test.ImageFile("contours.png", grayscale = True)
cornerImage = test.ImageFile("corners.png", grayscale = True)
cornerCoordinates = test.MatrixFile("corners.npy")
contour_1 = test.MatrixFile("contour_1.npy") # 32-bit integer coordinates
contour_2 = test.MatrixFile("contour_2.npy") # 32-bit integer coordinates
contour_f32 = test.MatrixFile("contour_f32.npy")
contour_f64 = test.MatrixFile("contour_f64.npy")
points_i32 = test.MatrixFile("points_i32.npy")
points_f32 = test.MatrixFile("points_f32.npy")
points_f64 = test.MatrixFile("points_f64.npy")
non_convex_f32 = test.MatrixFile("non_convex_f32.npy")
contourList = test.List(contour_1, contour_2)
rotated_rect = test.MatrixFile("rotated_rect.npy")
rotated_rect_top_right = test.MatrixFile("rotated_rect_top_right.npy")
rotated_rect_bottom_left = test.MatrixFile("rotated_rect_bottom_left.npy")
# bilateralFilter
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg), package.Output(dstImg), d, sigmaColor,
sigmaSpace],
tests = [
[lenna, memory, 9, 100, 75]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImg), d, sigmaColor,
sigmaSpace],
tests = [
[lenna, DT, DT, DT, DT],
[lenna_bw, DT, 9, 100, 75]
]
)
bilateralFilter = package.Method(
"bilateralFilter", options = [manual, allocate]
)
# blur
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg, True), package.Output(dstImg),
package.Size(ksizex, ksizey)],
tests = [
[lenna, memory, (3, 4)],
[lenna_bw, test.RefData(lenna), DT]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImg),
package.Size(ksizex, ksizey)],
tests = [
[lenna, DT, DT],
[lenna_bw, DT, DT]
]
)
inPlace = package.Option(
"inPlace", "In place",
[package.InputOutput(srcImg), package.RefInput(dstImg, srcImg),
package.Size(ksizex, ksizey)],
tests = [
[lenna, DT, DT]
]
)
blur = package.Method(
"blur", options = [manual, allocate, inPlace]
)
# boxFilter
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg, True), package.Output(dstImg), ddepthDefault,
package.Size(ksizex, ksizey)],
tests = [
[lenna, memory, DT, (5, 4)],
[lenna, test.RefData(lenna), DT, DT]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImg), ddepthDefault,
package.Size(ksizex, ksizey)],
tests = [
[lenna_bw, DT, DT, (4, 5)],
]
)
inPlace = package.Option(
"inPlace", "In place",
[package.InputOutput(srcImg), package.RefInput(dstImg, srcImg), ddepthDefault,
package.Size(ksizex, ksizey)],
tests = [
[lenna, DT, DT, DT],
]
)
boxFilter = package.Method(
"boxFilter", options = [manual, allocate, inPlace]
)
# dilate and erode
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg, True), package.Output(dstImg), kernel, anchor,
iterations],
tests = [
[lenna, memory, (3, 4, 1), DT, 2],
[lenna_bw, memory, DT, DT, DT]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImg), kernel, anchor,
iterations],
tests = [
[lenna, DT, DT, DT, DT]
]
)
inPlace = package.Option(
"inPlace", "In place",
[package.InputOutput(srcImg), package.RefInput(dstImg, srcImg), kernel,
anchor, iterations],
tests = [
[lenna_bw, DT, (DT, DT, 2), DT, DT]
]
)
dilate = package.Method(
"dilate", options = [manual, allocate, inPlace]
)
erode = package.Method(
"erode", options = [manual, allocate, inPlace]
)
# GaussianBlur
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg, True), package.Output(dstImg),
package.Size(ksizexOdd, ksizeyOdd), sigmaX, sigmaY],
tests = [
[lenna, memory, (3, 5), 1.5, 2.5],
[lenna, test.RefData(lenna), DT, DT, DT]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImg),
package.Size(ksizexOdd, ksizeyOdd), sigmaX, sigmaY],
tests = [
[lenna, DT, (3, 5), -1, -1]
]
)
inPlace = package.Option(
"inPlace", "In place",
[package.InputOutput(srcImg), package.RefInput(dstImg, srcImg),
package.Size(ksizexOdd, ksizeyOdd), sigmaX, sigmaY],
tests = [
[lenna, DT, DT, 0, 0]
]
)
GaussianBlur = package.Method(
"GaussianBlur", options = [manual, allocate, inPlace]
)
# medianBlur
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg, True), package.Output(dstImg), ksize],
tests = [
[lenna, memory, 3],
[lenna_bw, test.RefData(lenna), 5]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImg), ksize],
tests = [
[lenna_bw, DT, DT]
]
)
inPlace = package.Option(
"inPlace", "In place",
[package.InputOutput(srcImg), package.RefInput(dstImg, srcImg), ksize],
tests = [
[lenna, DT, DT]
]
)
medianBlur = package.Method(
"medianBlur", options = [manual, allocate, inPlace]
)
# morphologyEx
descriptions = [
package.EnumDescription("MORPH_OPEN", "Open"),
package.EnumDescription("MORPH_CLOSE", "Close"),
package.EnumDescription("MORPH_GRADIENT", "Gradient"),
package.EnumDescription("MORPH_TOPHAT", "Tophat"),
package.EnumDescription("MORPH_BLACKHAT", "Blackhat")
]
op = package.EnumParameter(
"op", "Operation", descriptions = descriptions,
default = 1
)
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg, True), package.Output(dstImg), op, kernel,
anchor, iterations],
tests = [
[lenna, memory, 0, (3, 4, 0), DT, DT],
[lenna, test.RefData(lenna), 2, (DT, DT, 1), DT, 3]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImg), op, kernel,
anchor, iterations],
tests = [
[lenna_bw, DT, 0, DT, DT, DT],
[lenna, DT, 3, (DT, DT, 2), DT, DT]
]
)
inPlace = package.Option(
"inPlace", "In place",
[package.InputOutput(srcImg), package.RefInput(dstImg, srcImg), op, kernel,
anchor, iterations],
tests = [
[lenna_bw, DT, 1, (DT, DT, 1), DT, DT],
[lenna, DT, 3, DT, DT, DT]
]
)
morphologyEx = package.Method(
"morphologyEx", options = [manual, allocate, inPlace]
)
# Laplacian
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg), package.Output(dstImgDdepth), ddepth,
ksize, scale, delta],
tests = [
[lenna, memory, 0, 3, DT, DT],
[lenna_bw, memory, 1, 3, 1, 0]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImgDdepth), ddepth,
ksize, scale, delta],
tests = [
[lenna_bw, DT, 2, 5, 100, 1000],
[lenna, DT, 2, 7, 50, 500]
]
)
laplacian = package.Method(
"Laplacian", options = [manual, allocate]
)
# Sobel
sobelKsize = package.NumericParameter(
"ksize", "Kernel size", cvtype.Int(), datatype.UInt32(), minValue = 1,
maxValue = 7, step = 2, default = 3, rules = [package.OddRule()]
)
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg), package.Output(dstImgDdepth), ddepth,
dx, dy, sobelKsize, scale, delta],
tests = [
[lenna, memory, 0, 1, 1, 1, 1, 0],
[lenna_bw, memory, 1, 2, 0, 3, 1, 0]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImgDdepth), ddepth,
dx, dy, sobelKsize, scale, delta],
tests = [
[lenna, DT, 0, DT, 2, 5, 2, DT],
[lenna_bw, DT, 2, DT, DT, DT, 100, DT]
]
)
sobel = package.Method(
"Sobel", options = [manual, allocate]
)
# Scharr
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg), package.Output(dstImgDdepth), ddepth,
dx, dy, scale, delta],
tests = [
[lenna, memory, 0, 0, 1, 1, 0],
[lenna_bw, memory, 1, 1, 0, 1, 0]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImgDdepth), ddepth,
dx, dy, scale, delta],
tests = [
[lenna, DT, 0, DT, DT, 2, DT],
[lenna_bw, DT, 2, 0, 1, 100, DT]
]
)
scharr = package.Method(
"Scharr", options = [manual, allocate]
)
# pyrDown
initInPyrDown = document.Document((
"int width = int((srcCastedData->width() + 1) / 2 );\n"
"int height = int((srcCastedData->height() + 1) / 2 );\n"
"{1}->initializeImage(width, height, width * {0}->pixelSize(), "
"{1}->data(), {0}->pixelType());").format("srcCastedData", "dstCastedData")
)
dstImgPyr = package.Argument(
"dst", "Destination", cvtype.Mat(), datatype.Image(),
initIn = initInPyrDown, initOut = initOutCopy
)
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg), package.Output(dstImgPyr)],
tests = [
[lenna, memory]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImgPyr)],
tests = [
[lenna_bw, DT]
]
)
pyrDown = package.Method(
"pyrDown", options = [manual, allocate]
)
# pyrUp
initInPyrUp = document.Document((
"int width = 2 * srcCastedData->width();\n"
"int height = 2 * srcCastedData->height();\n"
"{1}->initializeImage(width, height, width * {0}->pixelSize(), "
"{1}->data(), {0}->pixelType());").format("srcCastedData", "dstCastedData")
)
dstImgPyr = package.Argument(
"dst", "Destination", cvtype.Mat(), datatype.Image(),
initIn = initInPyrUp, initOut = initOutCopy
)
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg), package.Output(dstImgPyr)],
tests = [
[lenna, bigMemory]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImgPyr)],
tests = [
[lenna_bw, DT]
]
)
pyrUp = package.Method(
"pyrUp", options = [manual, allocate]
)
# resize
fx = package.NumericParameter(
"fx", "Scale X", cvtype.Float64(), datatype.Float64(), default = 1.0
)
fy = package.NumericParameter(
"fy", "Scale Y", cvtype.Float64(), datatype.Float64(), default = 1.0
)
descriptions = [
package.EnumDescription("INTER_NEAREST", "Nearest neighbour"),
package.EnumDescription("INTER_LINEAR", "Bilinear")
]
interpolation = package.EnumParameter(
"interpolation", "Interpolation", descriptions = descriptions,
default = 1
)
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg), package.Output(dstImgResize),
package.Size(dsizex, dsizey), fx, fy, interpolation],
tests = [
[lenna, memory, DT, DT, DT],
[lenna, memory, (100, 200), 0],
[lenna_bw, memory, (100, 200), 0.5, 0.3, DT]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImgResize),
package.Size(dsizex, dsizey), fx, fy, interpolation],
tests = [
[lenna_bw, DT, DT, 0.5, 0.3, DT]
]
)
resize = package.Method(
"resize", options = [manual, allocate]
)
# warpAffine
affineM = package.MatrixParameter(
"affineM", "Affine transformation", datatype.FloatMatrix(),
default = "cvsupport::Matrix::eye(2, 3, runtime::Matrix::FLOAT_32)", rows = 2,
cols = 3
)
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg), package.Output(dstImgDsize), affineM,
package.Size(dsizex, dsizey)],
tests = [
[lenna_bw, memory, affine_transformation, (400, 500)],
[lenna, memory, DT, (400, 500)]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImgDsize), affineM,
package.Size(dsizex, dsizey)],
tests = [
[lenna, DT, affine_transformation, (400, 500)]
]
)
warpAffine = package.Method(
"warpAffine", options = [manual, allocate]
)
# warpPerspective
perspectiveM = package.MatrixParameter(
"affineM", "Perspective transformation", datatype.FloatMatrix(),
default = "cvsupport::Matrix::eye(3, 3, runtime::Matrix::FLOAT_32)", rows = 3,
cols = 3
)
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg), package.Output(dstImgDsize), perspectiveM,
package.Size(dsizex, dsizey)],
tests = [
[lenna_bw, memory, perspective_transformation, (400, 500)],
[lenna, memory, DT, (400, 500)]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImgDsize), perspectiveM,
package.Size(dsizex, dsizey)],
tests = [
[lenna, DT, perspective_transformation, (400, 500)]
]
)
warpPerspective = package.Method(
"warpPerspective", options = [manual, allocate]
)
# undistort
cameraMatrix = package.MatrixParameter(
"cameraMatrix", "Camera matrix", datatype.FloatMatrix(),
default = "cvsupport::Matrix::eye(3, 3, runtime::Matrix::FLOAT_32)", rows = 3,
cols = 3
)
distCoeffs = package.MatrixParameter(
"distCoeffs", "Distortion coefficients", datatype.FloatMatrix(),
default = "cvsupport::Matrix::zeros(1, 5, runtime::Matrix::FLOAT_32)",
rows = 1, cols = 5
)
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg), package.Output(dstImg), cameraMatrix,
distCoeffs],
tests = [
[lenna_bw, memory, camera_matrix, dist_coeffs],
[lenna, memory, DT, DT]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Allocation(dstImg), cameraMatrix,
distCoeffs],
tests = [
[lenna, DT, camera_matrix, dist_coeffs]
]
)
undistort = package.Method(
"undistort", options = [manual, allocate]
)
# undistortPoints
srcPts = package.MatrixArgument(
"src", "Source", cvtype.Mat(channels = 2), datatype.Float32Matrix(),
cols = 2, visualization = datatype.Visualization.POINT
)
dstPts = package.MatrixArgument(
"dst", "Destination", cvtype.Mat(channels = 2), datatype.Float32Matrix(),
cols = 2, visualization = datatype.Visualization.POINT
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcPts), package.Allocation(dstPts), cameraMatrix,
distCoeffs],
tests = [
[points_f32, DT, camera_matrix, dist_coeffs],
[points_f32, DT, DT, DT]
]
)
undistortPoints = package.Method(
"undistortPoints", options = [allocate]
)
# adaptiveThreshold
descriptions = [
package.EnumDescription("THRESH_BINARY", "Binary"),
package.EnumDescription("THRESH_BINARY_INV", "Binary inverted")
]
adaptiveThresholdType = package.EnumParameter(
"thresholdType", "Threshold type", descriptions = descriptions,
default = 0
)
descriptions = [
package.EnumDescription("ADAPTIVE_THRESH_MEAN_C", "Mean of block"),
package.EnumDescription("ADAPTIVE_THRESH_GAUSSIAN_C",
"Weighted sum of block")
]
adaptiveMethod = package.EnumParameter(
"adaptiveMethod", "Adaptive method", descriptions = descriptions,
default = 0
)
subtractedC = package.Constant("0")
manual = package.Option(
"manual", "Manual",
[package.Input(srcImgMono8bit, True), package.Output(dstImg), maxval,
adaptiveMethod, adaptiveThresholdType, blockSize, subtractedC],
tests = [
[lenna_bw, memory, DT, DT, DT, DT, DT],
[lenna_bw, test.RefData(lenna_bw), 128, 1, 1, 5, DT]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImgMono8bit, True), package.Allocation(dstImg), maxval,
adaptiveMethod, adaptiveThresholdType, blockSize, subtractedC],
tests = [
[lenna_bw, DT, 200, 1, 0, 9, DT]
]
)
inPlace = package.Option(
"inPlace", "In place",
[package.InputOutput(srcImgMono8bit), package.RefInput(dstImg, srcImgMono8bit),
maxval, adaptiveMethod, adaptiveThresholdType, blockSize, subtractedC],
tests = [
[lenna_bw, DT, 80, 0, 1, 7, DT]
]
)
adaptiveThreshold = package.Method(
"adaptiveThreshold", options = [manual, allocate, inPlace]
)
# threshold
descriptions = [
package.EnumDescription("THRESH_BINARY", "Binary"),
package.EnumDescription("THRESH_BINARY_INV", "Binary inverted"),
package.EnumDescription("THRESH_TRUNC", "Truncate"),
package.EnumDescription("THRESH_TOZERO", "Truncate to zero"),
package.EnumDescription("THRESH_TOZERO_INV", "Truncate to zero inverted")
]
thresholdType = package.EnumParameter(
"thresholdType", "Threshold type", descriptions = descriptions,
default = 0
)
manual = package.Option(
"manual", "Manual",
[package.Input(srcImgMono, True), package.Output(dstImg), thresh, maxval,
thresholdType],
tests = [
[lenna_bw, memory, DT, DT, DT],
[lenna_bw, test.RefData(lenna_bw), 128, DT, 2]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImgMono), package.Allocation(dstImg), thresh, maxval,
thresholdType],
tests = [
[lenna_bw, DT, DT, DT, 3]
]
)
inPlace = package.Option(
"inPlace", "In place",
[package.InputOutput(srcImgMono), package.RefInput(dstImg, srcImgMono),
thresh, maxval, thresholdType],
tests = [
[lenna_bw, DT, DT, DT, 4]
]
)
threshold = package.Method(
"threshold", options = [manual, allocate, inPlace]
)
# distanceTransform
descriptions = [
package.EnumDescription("DIST_L1", "L1 distance","CV_DIST_L1"),
package.EnumDescription("DIST_L2", "L2 distance", "CV_DIST_L2"),
package.EnumDescription("DIST_C", "C", "CV_DIST_C")
]
distanceType = package.EnumParameter(
"distanceType", "Distance type", descriptions = descriptions,
default = 0
)
manual = package.Option(
"manual", "Manual",
[package.Input(srcImgMono), package.Output(dstImgFloat32), distanceType,
maskSize],
tests = [
[circle, memory, DT, DT]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImgMono), package.Allocation(dstImgFloat32), distanceType,
maskSize],
tests = [
[circle, DT, 2, 0],
[circle, DT, 1, 1],
[circle, DT, 0, 2]
]
)
distanceTransform = package.Method(
"distanceTransform", options = [manual, allocate]
)
# floodFill
seedPointX = package.NumericParameter(
"seedPointX", "Seed point X", cvtype.Int(), datatype.UInt32()
)
seedPointY = package.NumericParameter(
"seedPointY", "Seed point Y", cvtype.Int(), datatype.UInt32()
)
inPlace = package.Option(
"inPlace", "In place",
[package.InputOutput(srcImgMono), package.Point(seedPointX, seedPointY), newVal],
tests = [
[circle, (20, 10), 125.]
]
)
floodFill = package.Method(
"floodFill", options = [inPlace]
)
# integral
initInIntegral = document.Document((
"unsigned int stride = ({0}->cols() + 1) * runtime::Matrix::valueSize(runtime::Matrix::INT_32);\n"
"{1}->initializeMatrix({0}->rows() + 1, {0}->cols() + 1, stride, "
"{1}->data(), runtime::Matrix::INT_32);").format("srcCastedData",
"dstCastedData"
))
dstImgIntegral = package.Argument(
"dst", "Destination", cvtype.Mat(), datatype.Matrix(),
initIn = initInIntegral
)
manual = package.Option(
"manual", "Manual",
[package.Input(srcImgMono), package.Output(dstImgIntegral)],
tests = [
[lenna_bw, bigMemory]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImgMono), package.Allocation(dstImgIntegral)],
tests = [
[circle, DT]
]
)
integral = package.Method(
"integral", options = [manual, allocate]
)
# calcHist
histMin = package.NumericParameter(
"histMin", "Minimum", cvtype.Float32(), datatype.Float32(),
default = 0
)
histMax = package.NumericParameter(
"histMax", "Maximum", cvtype.Float32(), datatype.Float32(),
default = 256
)
histSize = package.NumericParameter(
"histSize", "Number of bins", cvtype.Int(), datatype.UInt32(),
default = 16
)
dstMatrix = package.Argument(
"dst", "Destination", cvtype.Mat(), datatype.Matrix(),
visualization = datatype.Visualization.HISTOGRAM
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImgMono), package.Allocation(dstMatrix), histMin, histMax, histSize],
tests = [
[circle, DT, 0, 256, 5],
[lenna_bw, DT, 0, 256, 20]
]
)
calcHist = package.Method(
"calcHist", namespace = "", options = [allocate]
)
# equalizeHist
manual = package.Option(
"manual", "Manual",
[package.Input(srcImgMono8bit, True), package.Output(dstImg)],
tests = [
[lenna_bw, memory],
[lenna_bw, test.RefData(lenna_bw)]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImgMono8bit), package.Allocation(dstImg)],
tests = [
[lenna_bw, DT]
]
)
inPlace = package.Option(
"inPlace", "In place",
[package.InputOutput(srcImgMono8bit), package.RefInput(dstImg, srcImgMono)],
tests = [
[lenna_bw, DT]
]
)
equalizeHist = package.Method(
"equalizeHist", options = [manual, allocate, inPlace]
)
# findContours
descriptions = [
package.EnumDescription("RETR_EXTERNAL", "Extreme outer contours", "CV_RETR_EXTERNAL"),
package.EnumDescription("RETR_LIST", "All contours", "CV_RETR_LIST")
]
findContoursMode = package.EnumParameter(
"mode", "Mode", descriptions = descriptions,
default = 0
)
descriptions = [
package.EnumDescription("CHAIN_APPROX_NONE", "Store all points", "CV_CHAIN_APPROX_NONE"),
package.EnumDescription("CHAIN_APPROX_SIMPLE", "Compress straight segments", "CV_CHAIN_APPROX_SIMPLE"),
package.EnumDescription("CHAIN_APPROX_TC89_L1", "Teh-Chin L1", "CV_CHAIN_APPROX_TC89_L1"),
package.EnumDescription("CHAIN_APPROX_TC89_KCOS", "Teh-Chin Kcos", "CV_CHAIN_APPROX_TC89_KCOS")
]
findContoursMethod = package.EnumParameter(
"method", "Method", descriptions = descriptions,
default = 0
)
dstListOfMatrices = package.Argument(
"dst", "Destination", cvtype.VectorOfMat(),
datatype.List(datatype.Int32Matrix()),
visualization = datatype.Visualization.POLYGON
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImgMono8bit), package.Allocation(dstListOfMatrices),
findContoursMode, findContoursMethod],
tests = [
[contours, DT, DT, DT],
[contours, DT, DT, 1]
]
)
findContours = package.Method(
"findContours", options = [allocate]
)
# drawContours
ch1 = package.NumericParameter(
"ch1", "Channel 1", cvtype.Int(), datatype.UInt8(), default = 0
)
ch2 = package.NumericParameter(
"ch2", "Channel 2", cvtype.Int(), datatype.UInt8(), default = 0
)
ch3 = package.NumericParameter(
"ch3", "Channel 3", cvtype.Int(), datatype.UInt8(), default = 0
)
thickness = package.NumericParameter(
"thickness", "Thickness", cvtype.Int(), datatype.Int32(), default = 1
)
listOfContours = package.Argument(
"contours", "Contours", cvtype.VectorOfMat(),
datatype.List(datatype.Float32Matrix()),
visualization = datatype.Visualization.POLYGON
)
drawContoursImage = package.Argument(
"img", "Image", cvtype.Mat(), datatype.Image()
)
inPlace = package.Option(
"inPlace", "In place",
[package.InputOutput(drawContoursImage), package.Input(listOfContours),
package.Constant(-1), package.Scalar(ch1, ch2, ch3), thickness],
tests = [
[lenna_bw, contourList, DT, (255, 0, 0), DT],
[lenna, contourList, DT, (255, 0, 0), -1]
]
)
drawContours = package.Method(
"drawContours", options = [inPlace]
)
# approxPolyDP
curve = package.MatrixArgument(
"curve", "Polygon", cvtype.Mat(channels = 2), datatype.Any32BitMatrix(),
visualization = datatype.Visualization.POLYGON_OR_POLYLINE, cols = 2
)
outCurve = package.MatrixArgument(
"outCurve", "Polygon", cvtype.Mat(channels = 2), datatype.Any32BitMatrix(),
visualization = datatype.Visualization.POLYGON_OR_POLYLINE, cols = 2
)
epsilon = package.NumericParameter(
"epsilon", "Maximal error in pixels", cvtype.Float64(), datatype.Float64(),
default = 10.0, minValue = 0.0
)
closed = package.Parameter(
"closed", "Curve is closed", cvtype.Bool(), datatype.Bool(), default = False
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(curve), package.Allocation(outCurve), epsilon, closed],
tests = [
[contour_1, DT, DT, DT],
[contour_f32, DT, 5.0, DT]
]
)
approxPolyDP = package.Method(
"approxPolyDP", options = [allocate]
)
# boundingRect
rect = package.MatrixArgument(
"rect", "Rectangle", cvtype.Rect(), datatype.Int32Matrix(),
cols = 4, rows = 1, visualization = datatype.Visualization.RECTANGLE
)
points = package.MatrixArgument(
"points", "Point set", cvtype.Mat(channels = 2), datatype.Any32BitMatrix(),
cols = 2, visualization = datatype.Visualization.POINT
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(points), package.ReturnValue(rect)],
tests = [
[points_i32, DT],
[points_f32, DT]
]
)
boundingRect = package.Method(
"boundingRect", options = [allocate]
)
# contourArea
points = package.MatrixArgument(
"contour", "Input points", cvtype.Mat(channels = 2), datatype.Any32BitMatrix(),
visualization = datatype.Visualization.POLYGON, cols = 2
)
area = package.Argument(
"area", "Area", cvtype.Float64(), datatype.Float64()
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(points), package.ReturnValue(area)],
tests = [
[non_convex_f32, DT],
[points_i32, DT]
]
)
contourArea = package.Method(
"contourArea", options = [allocate]
)
# convexHull
points = package.MatrixArgument(
"curve", "Input points", cvtype.Mat(channels = 2),
datatype.Any32BitMatrix(), cols = 2,
visualization = datatype.Visualization.POINT
)
hull = package.MatrixArgument(
"outCurve", "Convex hull", cvtype.Mat(channels = 2),
datatype.Any32BitMatrix(), cols = 2,
visualization = datatype.Visualization.POLYGON
)
epsilon = package.NumericParameter(
"epsilon", "Maximal error in pixels", cvtype.Float64(), datatype.Float64(),
default = 10.0, minValue = 0.0
)
clockwise = package.Parameter(
"clockwise", "Output orientation", cvtype.Bool(), datatype.Bool(),
default = False
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(points), package.Allocation(hull), clockwise],
tests = [
[non_convex_f32, DT, DT],
[points_i32, DT, DT]
]
)
convexHull = package.Method(
"convexHull", options = [allocate]
)
# fitEllipse
ellipse = package.MatrixArgument(
"ellipse", "Bounding box", cvtype.RotatedRect(), datatype.Float32Matrix(),
cols = 5, rows = 1, visualization = datatype.Visualization.ELLIPSE
)
points = package.MatrixArgument(
"points", "Point set", cvtype.Mat(channels = 2), datatype.Any32BitMatrix(),
cols = 2, visualization = datatype.Visualization.POINT
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(points), package.ReturnValue(ellipse)],
tests = [
[points_i32, DT],
[points_f32, DT]
]
)
fitEllipse = package.Method(
"fitEllipse", options = [allocate]
)
# fitLine
line = package.MatrixArgument(
"line", "Line (\\u03C1, \\u03B8)", cvtype.Mat(), datatype.Float32Matrix(),
cols = 3, rows = 1, visualization = datatype.Visualization.LINE
)
points = package.MatrixArgument(
"points", "Point set", cvtype.Mat(channels = 2), datatype.Any32BitMatrix(),
cols = 2, visualization = datatype.Visualization.POINT
)
descriptions = [
package.EnumDescription("DIST_L2", "L2", "CV_DIST_L2"),
package.EnumDescription("DIST_L1", "L1", "CV_DIST_L1"),
package.EnumDescription("DIST_L12", "L12", "CV_DIST_L12"),
package.EnumDescription("DIST_FAIR", "Fair", "CV_DIST_FAIR"),
package.EnumDescription("DIST_WELSCH", "Welsch", "CV_DIST_WELSCH"),
package.EnumDescription("DIST_HUBER", "Huber", "CV_DIST_HUBER")
]
distType = package.EnumParameter(
"distType", "Distance type", descriptions = descriptions,
default = 0
)
param = package.Constant("0")
reps = package.NumericParameter(
"reps", "Accuracy of \\u03C1", cvtype.Float64(), datatype.Float64(),
default = 0.01, minValue = 0.0
)
aeps = package.NumericParameter(
"aeps", "Accuracy of \\u03B8", cvtype.Float64(), datatype.Float64(),
default = 0.01, minValue = 0.0
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(points), package.Allocation(line), distType, param, reps,
aeps],
tests = [
[points_i32, DT, DT, DT, DT],
[points_f32, DT, DT, DT, DT]
]
)
fitLine = package.Method(
"fitLine", namespace = "", options = [allocate]
)
# minAreaRect
rect = package.MatrixArgument(
"rect", "Rectangle", cvtype.RotatedRect(), datatype.Float32Matrix(),
cols = 5, rows = 1,
visualization = datatype.Visualization.ROTATED_RECTANGLE
)
points = package.MatrixArgument(
"points", "Point set", cvtype.Mat(channels = 2), datatype.Any32BitMatrix(),
cols = 2, visualization = datatype.Visualization.POINT
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(points), package.ReturnValue(rect)],
tests = [
[points_i32, DT],
[points_f32, DT]
]
)
minAreaRect = package.Method(
"minAreaRect", options = [allocate]
)
# minEnclosingCircle
circle = package.MatrixArgument(
"circle", "Circle", cvtype.Mat(), datatype.Float32Matrix(),
cols = 3, rows = 1, visualization = datatype.Visualization.CIRCLE
)
points = package.MatrixArgument(
"points", "Point set", cvtype.Mat(channels = 2), datatype.Any32BitMatrix(),
cols = 2, visualization = datatype.Visualization.POINT
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(points), package.Allocation(circle)],
tests = [
[points_i32, DT],
[points_f32, DT]
]
)
minEnclosingCircle = package.Method(
"minEnclosingCircle", namespace = "", options = [allocate]
)
# Canny
threshold1 = package.NumericParameter(
"threshold1", "Threshold 1", cvtype.Float64(), datatype.Float64(),
default = 64
)
threshold2 = package.NumericParameter(
"threshold2", "Threshold 2", cvtype.Float64(), datatype.Float64(),
default = 128
)
manual = package.Option(
"manual", "Manual",
[package.Input(srcImgMono, True), package.InputOutput(dstImg), threshold1,
threshold2],
tests = [
[lenna_bw, memory, DT, DT],
[lenna_bw, test.RefData(lenna_bw), 64, 128]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImgMono), package.Allocation(dstImg), threshold1,
threshold2],
tests = [
[lenna_bw, DT, DT, DT]
]
)
inPlace = package.Option(
"inPlace", "In place",
[package.InputOutput(srcImgMono), package.RefInput(dstImg, srcImgMono), threshold1,
threshold2],
tests = [
[lenna_bw, DT, DT, DT]
]
)
canny = package.Method(
"Canny", options = [manual, allocate, inPlace]
)
# cornerHarris
manual = package.Option(
"manual", "Manual",
[package.Input(srcImgMono, False), package.Output(dstImgFloat32), blockSize,
ksize, harrisK],
tests = [
[lenna_bw, bigMemory, DT, DT, DT],
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImgMono), package.Allocation(dstImgFloat32), blockSize,
ksize, harrisK],
tests = [
[lenna_bw, DT, DT, DT, DT]
]
)
cornerHarris = package.Method(
"cornerHarris", options = [manual, allocate]
)
# cornerMinEigenVal
manual = package.Option(
"manual", "Manual",
[package.Input(srcImgMono, False), package.Output(dstImgFloat32), blockSize,
ksize],
tests = [
[lenna_bw, bigMemory, DT, DT, DT],
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImgMono), package.Allocation(dstImgFloat32), blockSize,
ksize],
tests = [
[lenna_bw, DT, DT, DT, DT]
]
)
cornerMinEigenVal = package.Method(
"cornerMinEigenVal", options = [manual, allocate]
)
# cornerSubPix
defaultTermCriteria = package.Constant(
"cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, -1, -1)"
)
inPlace = package.Option(
"inPlace", "In place",
[package.Input(srcImgMono), package.InputOutput(pointMatrix),
package.Size(winSizeX, winSizeY), defaultSize, defaultTermCriteria],
tests = [
[cornerImage, cornerCoordinates, (DT, DT)]
]
)
cornerSubPix = package.Method(
"cornerSubPix", options = [inPlace]
)
# goodFeaturesToTrack
useHarrisDetector = package.Parameter(
"useHarrisDetector", "Use Harris detector", cvtype.Bool(), datatype.Bool(),
default = False
)
maxCorners = package.NumericParameter(
"maxCorners", "Maximum number of corners", cvtype.Int(), datatype.UInt32(),
default = 10
)
qualityLevel = package.NumericParameter(
"qualityLevel", "Minimal accepted quality",
cvtype.Float64(), datatype.Float64(), default = 0.01
)
minDistance = package.NumericParameter(
"minDistance", "Minimal distance between corners",
cvtype.Float64(), datatype.Float64(), default = 1.0
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImgMono), package.Allocation(pointMatrix), maxCorners,
qualityLevel, minDistance, noArray, blockSize, useHarrisDetector, harrisK],
tests = [
[cornerImage, DT, DT, DT, DT, DT, DT]
]
)
goodFeaturesToTrack = package.Method(
"goodFeaturesToTrack", options = [allocate]
)
# HoughLinesP
dstMatrixLineSegments = package.MatrixArgument(
"dst", "Destination", cvtype.Mat(), datatype.Matrix(), cols = 4,
visualization = datatype.Visualization.LINE_SEGMENT
)
rho = package.NumericParameter(
"rho", "Distance resolution", cvtype.Float64(), datatype.Float64(),
default = 1.0
)
theta = package.NumericParameter(
"theta", "Angle resolution", cvtype.Float64(), datatype.Float64(),
default = math.pi / 180
)
lineSegmentsPostCall = document.Document(
"dstCvData = dstCvData.reshape(1, dstCvData.cols);"
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImgMono), package.Allocation(dstMatrixLineSegments), rho, theta,
accumulatorThreshold, minLineLength, maxLineGap],
tests = [
[edges, DT, DT, DT, DT, DT, DT]
],
postCall = lineSegmentsPostCall
)
houghLinesP = package.Method(
"HoughLinesP", options = [allocate]
)
# preCornerDetect
descriptions = [
package.EnumDescription("BORDER_DEFAULT", "Default"),
package.EnumDescription("BORDER_CONSTANT", "Constant"),
package.EnumDescription("BORDER_REFLECT", "Reflect"),
package.EnumDescription("BORDER_REPLICATE", "Replicate"),
]
borderType = package.EnumParameter(
"borderType", "Border type", descriptions = descriptions,
default = "BORDER_DEFAULT"
)
manual = package.Option(
"manual", "Manual",
[package.Input(srcImgMono8bit), package.Output(dstImgFloat32), sobelKsize,
borderType],
tests = [
[lenna_bw, bigMemory, DT, DT]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImgMono8bit), package.Allocation(dstImgFloat32),
sobelKsize, borderType],
tests = [
[lenna_bw, DT, 5, 2]
]
)
preCornerDetect = package.Method(
"preCornerDetect", options = [manual, allocate]
)
# ExtractRectangle
rect = package.MatrixArgument(
"rect", "Rectangle", cvtype.RotatedRect(), datatype.Float32Matrix(),
cols = 5, rows = 1, visualization = datatype.Visualization.ROTATED_RECTANGLE
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg), package.Input(rect), package.Allocation(dstImg)],
tests = [
[lenna, rotated_rect, DT],
[lenna, rotated_rect_top_right, DT],
[lenna, rotated_rect_bottom_left, DT]
]
)
extractRectangle = package.Method(
"extractRectangle", namespace = "", options = [allocate]
)
imgproc = package.Package(
"cvimgproc", 0, 1, 0,
methods = [
bilateralFilter,
blur,
boxFilter,
dilate,
erode,
GaussianBlur,
medianBlur,
morphologyEx,
laplacian,
pyrDown,
pyrUp,
scharr,
sobel,
resize,
adaptiveThreshold,
threshold,
warpAffine,
warpPerspective,
undistort,
undistortPoints,
distanceTransform,
floodFill,
integral,
calcHist,
equalizeHist,
findContours,
drawContours,
approxPolyDP,
boundingRect,
contourArea,
convexHull,
fitEllipse,
fitLine,
minAreaRect,
minEnclosingCircle,
canny,
cornerHarris,
cornerMinEigenVal,
cornerSubPix,
goodFeaturesToTrack,
houghLinesP,
preCornerDetect,
extractRectangle
],
functions = [
calcHistWrapper,
minEnclosingCircleWrapper,
fitLineWrapper,
extractRectangleWrapper
],
testFiles = [
"lenna.jpg",
"circle.png",
"affine.npy",
"perspective.npy",
"camera_matrix.npy",
"dist_coeffs.npy",
"edges.png",
"contours.png",
"corners.png",
"corners.npy",
"contour_1.npy",
"contour_2.npy",
"contour_f64.npy",
"contour_f32.npy",
"non_convex_f32.npy",
"points_i32.npy",
"points_f32.npy",
"points_f64.npy",
"rotated_rect.npy",
"rotated_rect_top_right.npy",
"rotated_rect_bottom_left.npy"
]
)
package = imgproc
if __name__ == '__main__':
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
generator.generateMethodFiles(package, globals()[arg])
else:
generator.generatePackageFiles(package)
|
uboot/stromx-opencv
|
opencv/cvimgproc.py
|
cvimgproc.py
|
py
| 50,787 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34711984830
|
# Coding Math Episode 2
# Display a sine wave
import pygame
import math
import numpy as np
pygame.init()
RED = pygame.color.THECOLORS['red']
screen = pygame.display.set_mode((800, 600))
screen_rect = screen.get_rect()
print(f"Size of the screen ({screen_rect.width}, {screen_rect.height})")
screen_fonts = pygame.font.SysFont("monospace", 12)
label = screen_fonts.render("Press key up or down to change the period...",
1, (255,255,0))
pygame.display.set_caption("Episode 2")
main_loop = True
amplifier = 200
angles = np.arange(0.0, math.pi * 4, 0.01)
while main_loop:
pygame.time.delay(100)
for event in pygame.event.get():
if (event.type == pygame.QUIT
or event.type == pygame.KEYDOWN
and event.key == pygame.K_ESCAPE):
main_loop = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
amplifier += 5
if event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
amplifier -= 5
screen.fill((0,0,0))
for angle in angles:
x = angle * amplifier
y = math.sin(angle) * amplifier
pygame.draw.rect(screen, RED, (x, -y + screen_rect.height/2, 2, 2), 1)
screen.blit(label, ((screen_rect.width - label.get_rect().width) // 2,
(screen_rect.height - 20)))
pygame.display.update()
pygame.quit()
|
piquesel/coding-math
|
ep2.py
|
ep2.py
|
py
| 1,397 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5671705163
|
import random
import uuid
import pytest
from aws.src.database.domain.dynamo_domain_objects import Tenure, HouseholdMember, TenuredAsset, Asset, AssetTenure, \
Patch, Person, PersonTenure
def test_generates_tenure(tenure_dict: dict):
tenure = Tenure.from_data(tenure_dict)
assert isinstance(tenure, Tenure)
assert tenure.id == str(uuid.uuid4())
assert isinstance(tenure.tenuredAsset, TenuredAsset)
assert tenure.tenuredAsset.id == str(uuid.uuid4())
assert isinstance(tenure.householdMembers[0], HouseholdMember)
assert tenure.householdMembers[0].fullName == 'FAKE_First FAKE_Last'
def test_generates_asset(asset_dict: dict):
asset = Asset.from_data(asset_dict)
assert isinstance(asset, Asset)
assert asset.id == asset_dict.get('id')
assert asset.assetAddress.get('addressLine1') == asset_dict.get('assetAddress').get('addressLine1')
assert isinstance(asset.tenure, AssetTenure)
assert asset.tenure.id == asset_dict.get('tenure').get('id')
assert isinstance(asset.patches[0], Patch)
assert asset.patches[0].id == asset_dict.get('patches')[0].get('id')
def test_generates_person(person_dict: dict):
person = Person.from_data(person_dict)
assert isinstance(person, Person)
assert person.id == person_dict.get('id')
assert isinstance(person.tenures[0], PersonTenure)
assert person.tenures[0].id == person_dict.get('tenures')[0].get('id')
@pytest.fixture
def tenure_dict():
return {
"id": str(uuid.uuid4()),
"charges": {
"billingFrequency": "Weekly",
"combinedRentCharges": 0,
"combinedServiceCharges": 0,
"currentBalance": 3019.14,
"originalRentCharge": 0,
"originalServiceCharge": 0,
"otherCharges": 0,
"rent": 0,
"serviceCharge": 0,
"tenancyInsuranceCharge": 0
},
"endOfTenureDate": "2017-11-06",
"evictionDate": "1900-01-01",
"householdMembers": [
{
"id": str(uuid.uuid4()),
"dateOfBirth": "1066-07-29",
"fullName": "FAKE_First FAKE_Last",
"isResponsible": True,
"personTenureType": "Tenant",
"type": "person"
}
],
"informHousingBenefitsForChanges": False,
"isMutualExchange": False,
"isSublet": False,
"legacyReferences": [
{
"name": "uh_tag_ref",
"value": f"{random.randint(10 ** 7, 10 ** 8 - 1)}/01"
},
{
"name": "u_saff_tenancy",
"value": ""
}
],
"notices": [
{
"effectiveDate": "1900-01-01",
"endDate": None,
"expiryDate": "1900-01-01",
"servedDate": "1900-01-01",
"type": ""
}
],
"paymentReference": str(random.randint(10 ** 10, 10 ** 11 - 1)),
"potentialEndDate": "1900-01-01",
"startOfTenureDate": "2017-05-30",
"subletEndDate": "1900-01-01",
"successionDate": "1900-01-01",
"tenuredAsset": {
"id": str(uuid.uuid4()),
"fullAddress": "THE HACKNEY SERVICE CENTRE 1 Hackney Service Centre E8 1DY",
"propertyReference": str(random.randint(10 ** 7, 10 ** 8 - 1)),
"type": "Dwelling",
"uprn": str(random.randint(10 ** 12, 10 ** 13 - 1))
},
"tenureType": {
"code": "THO",
"description": "Temp Hostel"
},
"terminated": {
"isTerminated": True,
"reasonForTermination": ""
}
}
@pytest.fixture
def asset_dict():
return {
"id": str(uuid.uuid4()),
"assetAddress": {
"addressLine1": "FLAT 10 220 TEST ROAD",
"addressLine2": "HACKNEY",
"addressLine3": "LONDON",
"postCode": "E8 1AA",
"uprn": str(random.randint(10 ** 12, 10 ** 13 - 1))
},
"assetCharacteristics": {
"numberOfBedrooms": 1,
"numberOfLifts": 0,
"numberOfLivingRooms": 0,
"yearConstructed": "0"
},
"assetId": str(random.randint(10 ** 12, 10 ** 13 - 1)),
"assetLocation": {
"parentAssets": [
{
"id": str(uuid.uuid4()),
"name": "Hackney Homes",
"type": "NA"
}
],
"totalBlockFloors": 0
},
"assetManagement": {
"isCouncilProperty": False,
"isNoRepairsMaintenance": False,
"isTMOManaged": False,
"managingOrganisation": "London Borough of Hackney",
"managingOrganisationId": str(uuid.uuid4()),
"owner": "KUS",
"propertyOccupiedStatus": "VR"
},
"assetType": "Dwelling",
"isActive": 0,
"parentAssetIds": str(uuid.uuid4()),
"patches": [
{
"id": str(uuid.uuid4()),
"domain": "MMH",
"name": "SN4",
"parentId": str(uuid.uuid4()),
"patchType": "patch",
"responsibleEntities": [
{
"id": str(uuid.uuid4()),
"name": "Fake_First Fake_Last",
"responsibleType": "HousingOfficer"
}
],
"versionNumber": None
}
],
"rootAsset": "ROOT",
"tenure": {
"id": str(uuid.uuid4()),
"endOfTenureDate": "2050-12-12T00:00:00Z",
"paymentReference": str(random.randint(10 ** 12, 10 ** 13 - 1)),
"startOfTenureDate": "2030-12-12T00:00:00Z",
"type": "Secure"
},
"versionNumber": 3
}
@pytest.fixture
def person_dict():
return {
"id": str(uuid.uuid4()),
"dateOfBirth": "1962-04-18T00:00:00.0000000Z",
"firstName": "FAKE_First",
"lastModified": "2022-09-06T06:31:03.5321566Z",
"links": [
],
"personTypes": [
"Tenant",
"HouseholdMember"
],
"preferredFirstName": "FAKE_First",
"preferredSurname": "FAKE_Last",
"preferredTitle": "Reverend",
"surname": "FAKE_Last",
"tenures": [
{
"id": str(uuid.uuid4()),
"assetFullAddress": "2 Fake Road, N16 1AA",
"assetId": str(uuid.uuid4()),
"endDate": None,
"paymentReference": str(random.randint(10 ** 10, 10 ** 11 - 1)),
"propertyReference": str(random.randint(10 ** 7, 10 ** 8 - 1)),
"startDate": "2013-12-23",
"type": "Secure",
"uprn": "100021063882"
},
{
"id": str(uuid.uuid4()),
"assetFullAddress": "75 Fake Road, E5 1AA",
"assetId": str(uuid.uuid4()),
"endDate": "2012-10-26",
"paymentReference": str(random.randint(10 ** 10, 10 ** 11 - 1)),
"propertyReference": str(random.randint(10 ** 7, 10 ** 8 - 1)),
"startDate": "2012-04-19",
"type": "Temp Annex",
"uprn": str(random.randint(10 ** 12, 10 ** 13 - 1))
},
{
"id": str(uuid.uuid4()),
"assetFullAddress": "15 Fake Road N16 1AA",
"assetId": str(uuid.uuid4()),
"endDate": None,
"paymentReference": str(random.randint(10 ** 10, 10 ** 11 - 1)),
"propertyReference": str(random.randint(10 ** 7, 10 ** 8 - 1)),
"startDate": "1997-07-24T00:00:00.0000000Z",
"type": "Leasehold (RTB)",
"uprn": str(random.randint(10 ** 12, 10 ** 13 - 1))
}
],
"title": "Reverend",
"versionNumber": 1
}
|
LBHackney-IT/mtfh-scripts
|
aws/tests/domain/test_dynamo_domain_objects.py
|
test_dynamo_domain_objects.py
|
py
| 8,127 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22561698639
|
import os
from bazelrio_gentool.utils import (
TEMPLATE_BASE_DIR,
render_templates,
)
from bazelrio_gentool.dependency_helpers import BaseDependencyWriterHelper
def write_shared_root_files(
module_directory,
group,
include_raspi_compiler=False,
test_macos=True,
include_windows_arm_compiler=True,
):
template_files = [
".github/workflows/build.yml",
".github/workflows/lint.yml",
".github/workflows/publish.yml",
# "generate/WORKSPACE",
".bazelignore",
".bazelrc-buildbuddy",
".bazelversion",
".bazelrc",
".gitignore",
"BUILD.bazel",
"README.md",
"WORKSPACE.bzlmod",
".styleguide",
".styleguide-license",
]
if os.path.exists(os.path.join(module_directory, "generate", "auto_update.py")):
template_files.append(".github/workflows/auto_update.yml")
render_templates(
template_files,
module_directory,
os.path.join(TEMPLATE_BASE_DIR, "shared"),
group=group,
include_raspi_compiler=include_raspi_compiler,
include_windows_arm_compiler=include_windows_arm_compiler,
test_macos=test_macos,
)
def write_shared_test_files(module_directory, group):
template_files = [
".bazelrc-buildbuddy",
".bazelversion",
".bazelrc",
"WORKSPACE.bzlmod",
]
render_templates(
template_files,
os.path.join(module_directory, "tests"),
os.path.join(TEMPLATE_BASE_DIR, "shared"),
group=group,
)
class BazelDependencySetting(BaseDependencyWriterHelper):
def __init__(
self,
repo_name,
version,
sha,
needs_stripped_prefix=False,
old_release_style=False,
):
BaseDependencyWriterHelper.__init__(
self,
repo_name,
version,
sha,
"https://github.com/bazelbuild",
old_release_style=old_release_style,
needs_stripped_prefix=needs_stripped_prefix,
)
def download_repository(self, indent_num, maybe=True):
if self.repo_name == "googletest":
return f"""http_archive(
name = "googletest",
sha256 = "{self.sha}",
strip_prefix = "googletest-{self.version}",
urls = ["https://github.com/google/googletest/archive/refs/tags/v{self.version}.tar.gz"],
)"""
# if self.use_long_form:
# return self.temp_longform_http_archive(indent_num, maybe)
return self.http_archive(indent_num=indent_num, maybe=maybe, native=False)
# def temp_longform_http_archive(self, indent_num, maybe):
# indent = " " * indent_num
# file_extension = "zip" if self.use_zip else "tar.gz"
# output = f"""{indent}{self.repo_name.upper()}_COMMITISH = "{self.version}"
# {self.repo_name.upper()}_SHA = "{self.sha}"
# """
# if maybe:
# output += f"maybe(\n http_archive,"
# else:
# output += f"http_archive("
# output += f"""
# {indent} name = "{self.repo_name}",
# {indent} sha256 = {self.repo_name.upper()}_SHA,
# {indent} strip_prefix = "{self.repo_name}-{{}}".format({self.repo_name.upper()}_COMMITISH),
# {indent} url = "https://github.com/bazelbuild/{self.repo_name}/archive/{{}}.{file_extension}".format({self.repo_name.upper()}_COMMITISH),
# )"""
# return output
def get_bazel_dependencies():
def add_dep(repo_name, sha="", **kwargs):
output[repo_name] = BazelDependencySetting(repo_name, sha=sha, **kwargs)
output = {}
add_dep(repo_name="platforms", version="0.0.7", sha="")
add_dep(
repo_name="rules_python",
version="0.24.0",
sha="0a8003b044294d7840ac7d9d73eef05d6ceb682d7516781a4ec62eeb34702578",
needs_stripped_prefix=True,
)
add_dep(
repo_name="rules_java",
version="6.4.0",
sha="27abf8d2b26f4572ba4112ae8eb4439513615018e03a299f85a8460f6992f6a3",
# use_long_form=True,
)
add_dep(
repo_name="rules_jvm_external",
version="5.3",
sha="d31e369b854322ca5098ea12c69d7175ded971435e55c18dd9dd5f29cc5249ac",
needs_stripped_prefix=True,
# use_zip=True,
# use_long_form=True,
)
add_dep(repo_name="rules_cc", version="0.0.8", sha="")
add_dep(
repo_name="googletest",
version="1.14.0",
sha="8ad598c73ad796e0d8280b082cebd82a630d73e73cd3c70057938a6501bba5d7",
)
add_dep(
repo_name="rules_proto",
version="5.3.0-21.7",
sha="dc3fb206a2cb3441b485eb1e423165b231235a1ea9b031b4433cf7bc1fa460dd",
old_release_style=True,
needs_stripped_prefix=True,
)
add_dep(
repo_name="bazel_skylib",
version="1.4.2",
sha="66ffd9315665bfaafc96b52278f57c7e2dd09f5ede279ea6d39b2be471e7e3aa",
)
return output
|
bzlmodRio/gentool
|
bazelrio_gentool/generate_shared_files.py
|
generate_shared_files.py
|
py
| 4,947 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70281053308
|
from typing import Dict, Any, Union, Optional, List
import torch
import numpy as np
from overrides import overrides
from transformers import ViltProcessor
from PIL import Image
from allennlp.data.fields.field import DataArray
from allennlp.data.fields.metadata_field import MetadataField
class ViltField(MetadataField):
"""
A class representing a tensor, which could have arbitrary dimensions.
A batch of these tensors are padded to the max dimension length in the batch
for each dimension.
"""
__slots__ = ["metadata", "vilt_processor", "vilt_half_precision"]
def __init__(self, metadata: Any,
vilt_processor: ViltProcessor,
vilt_half_precision: bool = True) -> None:
super(ViltField, self).__init__(metadata)
self.metadata = metadata
self.vilt_processor = vilt_processor
self.vilt_half_precision = vilt_half_precision
@overrides
def batch_tensors(self, tensor_list: List[DataArray]) -> List[DataArray]: # type: ignore
texts = []
images = []
for tensor in tensor_list:
text = tensor['text']
texts.append(text)
image = tensor['image']
image_data = Image.open(image).convert("RGB")
images.append(image_data)
processed = self.vilt_processor(text = texts,
images=images,
return_tensors='pt',
padding=True)
to_ret = {}
for k, v in processed.items():
if self.vilt_half_precision and (isinstance(v, torch.FloatTensor) or isinstance(v, torch.cuda.FloatTensor)):
processed[k] = v.half()
to_ret[k] = processed[k]
return to_ret
|
esteng/ambiguous_vqa
|
models/allennlp/data/fields/vilt_field.py
|
vilt_field.py
|
py
| 1,820 |
python
|
en
|
code
| 5 |
github-code
|
6
|
72014782588
|
class Graph:
def __init__(self):
self.dict = {}
def addVertex(self, vertex):
if vertex not in self.dict.keys():
self.dict[vertex] = []
return True
return False
def BFS(self, vertex):
queue = [vertex]
visited = [vertex]
while queue:
p = queue.pop(0)
print(p)
for adjacentVertex in self.dict[p]:
if adjacentVertex not in visited:
visited.append(adjacentVertex)
queue.append(adjacentVertex)
def DFS(self, vertex):
stack = [vertex]
visited = [vertex]
while stack:
p = stack.pop()
print(p)
for adjacentVertex in self.dict[p]:
if adjacentVertex not in visited:
stack.append(adjacentVertex)
visited.append(adjacentVertex)
def removeVertex(self, vertex):
if vertex in self.dict.keys():
for value in self.dict[vertex]:
self.dict[value].remove(vertex)
del self.dict[vertex]
return True
return False
def print_graph(self):
for vertex in self.dict:
print(vertex, ":", self.dict[vertex])
def addEdge(self, vertex1, vertex2):
if vertex1 and vertex2 in self.dict.keys():
if vertex2 not in self.dict[vertex1]:
self.dict[vertex1].append(vertex2)
if vertex1 not in self.dict[vertex2]:
self.dict[vertex2].append(vertex1)
return True
return False
def removeEdge(self, vertex1, vertex2):
if vertex1 and vertex2 in self.dict.keys():
try:
self.dict[vertex1].remove(vertex2)
self.dict[vertex2].remove(vertex1)
except ValueError:
pass
return True
return False
graph = Graph()
graph.addVertex("a")
graph.addVertex("b")
graph.addVertex("c")
graph.addVertex("d")
graph.addVertex("e")
graph.addVertex("f")
graph.addEdge("a", "b")
graph.addEdge("a", "c")
graph.addEdge("b", "d")
graph.addEdge("b", "e")
graph.addEdge("c", "e")
graph.addEdge("d", "e")
graph.addEdge("d", "f")
graph.addEdge("e", "f")
# # graph.removeEdge("a", "c")
# graph.removeVertex("c")
graph.print_graph()
# graph.BFS("a")
graph.DFS("a")
|
jetunp/Practice
|
graph.py
|
graph.py
|
py
| 2,374 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19700262291
|
'''
NOTAS
":.0f" continua sendo valor float, apesar de mostrar um valor inteiro.
A funcionalidade do int() e do trunc() é a mesma.
Para arredondamento preciso de acordo com as regras matemáticas, usar round().
'''
def Inteiro():
n=float(input('Digite um número quebrado: '))
print('O valor transformado em inteiro é {}'.format(int(n)))
print('O tipo do número é {}'.format(type(int(n))))
def Quebrar():
n=float(input('Digite um número quebrado: '))
print('O valor quebrado como inteiro é {:.0f}'.format(n))
print('O tipo do número é {}'.format(type(n)))
def Truncar():
from math import trunc
n=float(input('Digite um número quebrado: '))
print('O valor truncado é {}'.format(trunc(n)))
print('O tipo do número é {}'.format(type(trunc(n))))
def Arredondar():
n=float(input('Digite um número quebrado: '))
print('O valor arredondado é {}.'.format(round(n)))
print('O tipo do número é {}'.format(type(round(n))))
def Menu():
Escolha = int(input('''Digite a função que você quer rodar
0 = Inteiro
1 = Quebrar
2 = Truncar
3 = Arredondar
Sua escolha é: '''))
if Escolha == 0:
Inteiro()
elif Escolha == 1:
Quebrar()
elif Escolha == 2:
Truncar()
elif Escolha == 3:
Arredondar()
else:
print('Selecione uma opção válida')
Menu()
Menu()
|
PR1905/Estudos-Python
|
desafio016 - Arredondamento e Menus.py
|
desafio016 - Arredondamento e Menus.py
|
py
| 1,380 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
26095879865
|
import os
import sys
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import Optimizer
writer = SummaryWriter('./runs')
grad_clip = 1.0 # clip gradients at an absolute value of
save_prefix=''
def clip_gradient(optimizer, grad_clip):
# """
# 剪辑反向传播期间计算的梯度,以避免梯度爆炸。
#
# param optimizer:具有要剪裁的渐变的优化器
#
# :参数梯度剪辑:剪辑值
# """
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
def train(train_iter, dev_iter, model, args):
# global args
global save_prefix
save_dir = args.save_dir
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filename = args.snapshot
save_prefix = os.path.join(save_dir, filename)
if args.snapshot:
snapshot = os.path.join(args.save_dir, args.snapshot)
if os.path.exists(snapshot):
print('\nLoading model from {}...\n'.format(snapshot))
model = torch.load(snapshot)['model']
optimizer=torch.load(snapshot)['optimizer']
else:
optimizer = Optimizer.Optimizer(
torch.optim.Adam(model.parameters(), betas=(0.9, 0.98), eps=1e-09))
if args.cuda:
model.cuda()
# optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
steps = 0
best_acc = 0
last_step = 0
model.train()
for epoch in range(1, args.epochs + 1):
for batch in train_iter:
feature, target = batch.text, batch.label
feature.t_(), target.sub_(1)
# w.add_graph(model, (feature,))
if args.cuda:
feature, target = feature.cuda(), target.cuda()
optimizer.zero_grad()
logits = model(feature)
loss = F.cross_entropy(logits, target)
loss.backward()
# Clip gradients
clip_gradient(optimizer.optimizer, grad_clip)
optimizer.step()
steps += 1
if steps % args.log_interval == 0:
corrects = (torch.max(logits, 1)[1].view(target.size()).data == target.data).sum()
train_acc = corrects / batch.batch_size
sys.stdout.write(
'\rBatch[{}] - loss: {:.6f} acc: {:.4f}({}/{})'.format(steps,
loss.item(),
train_acc,
corrects,
batch.batch_size))
writer.add_scalar('Batch/train_loss', loss.item() ,optimizer.step_num)
writer.add_scalar('Batch/learning_rate', optimizer.lr, optimizer.step_num)
if steps % args.test_interval == 0:
dev_acc = eval(dev_iter, model, args,optimizer)
if dev_acc > best_acc:
best_acc = dev_acc
last_step = steps
if args.save_best:
print('Saving best model, acc: {:.4f}\n'.format(best_acc))
save(model, best_acc,optimizer)
writer.add_scalar('best/acc', best_acc, optimizer.step_num)
elif steps - last_step >= args.early_stopping:
print('\nearly stop by {} steps, acc: {:.4f}'.format(args.early_stopping, best_acc))
raise KeyboardInterrupt
else:
# print(type(model.fc.weight),type(torch.load(save_prefix)['model'].fc.weight))
# print(torch.load(save_prefix)['model'].fc.weight==model.fc.weight)
w=model.fc.weight+ torch.load(save_prefix)['model'].fc.weight
# print('1')
b=model.fc.bias+ torch.load(save_prefix)['model'].fc.bias
model.fc.weight=torch.nn.Parameter(w/2)
model.fc.bias = torch.nn.Parameter(b / 2)
def eval(data_iter, model, args,optimizer):
model.eval()
corrects, avg_loss = 0, 0
for batch in data_iter:
feature, target = batch.text, batch.label
feature.t_(), target.sub_(1)
if args.cuda:
feature, target = feature.cuda(), target.cuda()
logits = model(feature)
loss = F.cross_entropy(logits, target)
avg_loss += loss.item()
corrects += (torch.max(logits, 1)
[1].view(target.size()).data == target.data).sum()
size = len(data_iter.dataset)
avg_loss /= size
accuracy = corrects / size
print('\nEvaluation - loss: {:.6f} acc: {:.4f}({}/{}) \n'.format(avg_loss,
accuracy,
corrects,
size))
writer.add_scalar('Evaluation/train_loss', avg_loss, optimizer.step_num)
writer.add_scalar('Evaluation/learning_rate', optimizer.lr, optimizer.step_num)
return accuracy
def save(model, best_acc,optimizer):
state = {
'best_acc': best_acc,
'model': model,
'optimizer':optimizer}
torch.save(state, save_prefix)
|
dubochao/CNN-sentiment-analysis
|
train.py
|
train.py
|
py
| 5,496 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9369376357
|
import pyodbc
cnxn = pyodbc.connect("DRIVER={ODBC Driver 17 for SQL Server};"
"Server=DESKTOP-0A2HT13;"
"Database=Databricks;"
"UID=prajwal;"
"PWD=Prajwal082;"
"Trusted_Connection=yes;")
cursor = cnxn.cursor()
cursor.execute('SELECT * FROM [dbo].[Customer]')
for row in cursor:
print('row = %r' % (row,))
# import pyodbc
# conn_str = pyodbc.connect(
# 'Driver={org.postgresql.Driver};'
# 'Server=localhost;'
# 'Port=5432;'
# 'Database=Test;'
# 'UID=postgres;'
# 'PWD=1234;'
# )
# conn = pyodbc.connect(conn_str, autocommit=True) # Error occurs here
# cursor = cnxn.cursor()
# cursor.execute('select * from students')
# for row in cursor:
# print('row = %r' % (row,))
|
Prajwal082/Main
|
postgres.py
|
postgres.py
|
py
| 814 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11415062176
|
"""
[
[
[
"M: How long have you been teaching in this middle school?",
"W: For ten years. To be frank, I'm tired of teaching the same textbook for so long though I do enjoy being a teacher. I'm considering trying something new."
],
[
{
"question": "What's the woman probably going to do?",
"choice": [
"To teach a different textbook.",
"To change her job.",
"To learn a different textbook."
],
"answer": "To change her job."
},
{
"question": "If the man and his wife go on the recommended package tour, how much should they pay?",
"choice": [
"$1,088.",
"$1,958.",
"$2,176."
],
"answer": "$1,958."
}
],
"14-349"
],
...
"""
import json
import argparse
from pathlib import Path
from typing import Dict, List, Mapping, Generator, Optional, Union
from copy import deepcopy
import itertools
import re
import logging
from .reader import DatasetReader
from .types import (Sample, SingleQuestionSample,
SingleQuestionSingleOptionSample, NLIWithOptionsSample,
PureNLISample)
from dataclasses import dataclass, asdict
logger = logging.getLogger(__name__)
class DreamReader(DatasetReader):
def __init__(self,
input_type: str = 'DreamJSON',
output_type: str = 'SingleQuestionSample'):
if input_type != 'DreamJSON':
raise ValueError(f"{input_type} unsupported")
self.input_type = input_type
self.output_type = output_type
self.fitb_pattern = re.compile(r'_+')
def _read_data(self, path: Path) -> Dict:
with open(path) as f:
samples = json.load(f)
return samples
def read(self, path: Path,
return_dict: bool = False) -> List[Union[Sample, Dict]]:
def reader_func(p: Path) -> List[Sample]:
samples = self._read_data(p)
# Give names to fields
json_samples = []
for s in samples:
json_samples.append({
'passage': s[0],
'questions': s[1],
'id': s[2]
})
return json_samples
if self.output_type == 'SingleQuestionSample':
def sample_converter(x: Dict) -> Dict:
# Do some preprocessing here
# combine the dialogue sentences
x['passage'] = ' '.join(x['passage'])
# fix fitb format
for q_n, q in enumerate(x['questions']):
x['questions'][q_n]['question'] = self.fitb_pattern.sub(
'_', x['questions'][q_n]['question'])
# number the answer
for q_n, question in enumerate(x['questions']):
# this will throw if answer does not match one of the
# choices exactly
idx = question['choice'].index(question['answer'])
question['answer'] = idx
return x # do nothing
def aggregate_converter(
x: List[Dict]) -> List[SingleQuestionSample]:
all_res = []
for s in x:
para = s['passage']
for q_n, q in enumerate(s['questions']):
all_res.append(
SingleQuestionSample(
id=s['id'] + f"_{q_n}",
question=q['question'],
article=para,
options=q['choice'],
answer=q['answer']))
return all_res
else:
raise ValueError(f"outpu_type {self.output_type} not supported")
input_samples = [sample_converter(s) for s in reader_func(path)]
output_samples = aggregate_converter(input_samples)
if return_dict:
return [s.__dict__ for s in output_samples]
else:
return output_samples
|
nli-for-qa/conversion
|
qa2nli/qa_readers/dream.py
|
dream.py
|
py
| 4,251 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22241072161
|
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from typing import List
class Convolution(nn.Module):
def __init__(self, in_ch, out_ch):
super(Convolution, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, 1, 1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, 1, 1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, input):
return self.conv(input)
class Curvature(torch.nn.Module):
def __init__(self, ratio):
super(Curvature, self).__init__()
weights = torch.tensor([[[[-1/16, 5/16, -1/16], [5/16, -1, 5/16], [-1/16, 5/16, -1/16]]]])
self.weight = torch.nn.Parameter(weights).cuda()
self.ratio = ratio
def forward(self, x):
B, C, H, W = x.size()
x_origin = x
x = x.reshape(B*C,1,H,W)
out = F.conv2d(x, self.weight)
out = torch.abs(out)
p = torch.sum(out, dim=-1)
p = torch.sum(p, dim=-1)
p=p.reshape(B, C)
_, index = torch.topk(p, int(self.ratio*C), dim=1)
selected = []
for i in range(x_origin.shape[0]):
selected.append(torch.index_select(x_origin[i], dim=0, index=index[i]).unsqueeze(0))
selected = torch.cat(selected, dim=0)
return selected
class Entropy_Hist(nn.Module):
def __init__(self, ratio, win_w=3, win_h=3):
super(Entropy_Hist, self).__init__()
self.win_w = win_w
self.win_h = win_h
self.ratio = ratio
def calcIJ_new(self, img_patch):
total_p = img_patch.shape[-1] * img_patch.shape[-2]
if total_p % 2 != 0:
tem = torch.flatten(img_patch, start_dim=-2, end_dim=-1)
center_p = tem[:, :, :, int(total_p / 2)]
mean_p = (torch.sum(tem, dim=-1) - center_p) / (total_p - 1)
if torch.is_tensor(img_patch):
return center_p * 100 + mean_p
else:
return (center_p, mean_p)
else:
print("modify patch size")
def histc_fork(ij):
BINS = 256
B, C = ij.shape
N = 16
BB = B // N
min_elem = ij.min()
max_elem = ij.max()
ij = ij.view(N, BB, C)
def f(x):
with torch.no_grad():
res = []
for e in x:
res.append(torch.histc(e, bins=BINS, min=min_elem, max=max_elem))
return res
futures : List[torch.jit.Future[torch.Tensor]] = []
for i in range(N):
futures.append(torch.jit.fork(f, ij[i]))
results = []
for future in futures:
results += torch.jit.wait(future)
with torch.no_grad():
out = torch.stack(results)
return out
def forward(self, img):
with torch.no_grad():
B, C, H, W = img.shape
ext_x = int(self.win_w / 2) # 考虑滑动窗口大小,对原图进行扩边,扩展部分长度
ext_y = int(self.win_h / 2)
new_width = ext_x + W + ext_x # 新的图像尺寸
new_height = ext_y + H + ext_y
# 使用nn.Unfold依次获取每个滑动窗口的内容
nn_Unfold=nn.Unfold(kernel_size=(self.win_w,self.win_h),dilation=1,padding=ext_x,stride=1)
# 能够获取到patch_img,shape=(B,C*K*K,L),L代表的是将每张图片由滑动窗口分割成多少块---->28*28的图像,3*3的滑动窗口,分成了28*28=784块
x = nn_Unfold(img) # (B,C*K*K,L)
x= x.view(B,C,3,3,-1).permute(0,1,4,2,3) # (B,C*K*K,L) ---> (B,C,L,K,K)
ij = self.calcIJ_new(x).reshape(B*C, -1) # 计算滑动窗口内中心的灰度值和窗口内除了中心像素的灰度均值,(B,C,L,K,K)---> (B,C,L) ---> (B*C,L)
fij_packed = self.histc_fork(ij)
p = fij_packed / (new_width * new_height)
h_tem = -p * torch.log(torch.clamp(p, min=1e-40)) / math.log(2)
a = torch.sum(h_tem, dim=1) # 对所有二维熵求和,得到这张图的二维熵
H = a.reshape(B,C)
_, index = torch.topk(H, int(self.ratio*C), dim=1) # Nx3
selected = []
for i in range(img.shape[0]):
selected.append(torch.index_select(img[i], dim=0, index=index[i]).unsqueeze(0))
selected = torch.cat(selected, dim=0)
return selected
class Network(nn.Module):
def __init__(self, in_ch=3, mode='ori', ratio=None):
super(Network, self).__init__()
self.mode = mode
if self.mode == 'ori':
self.ratio = [0,0]
if self.mode == 'curvature':
self.ratio = ratio
self.ife1 = Curvature(self.ratio[0])
self.ife2 = Curvature(self.ratio[1])
if self.mode == 'entropy':
self.ratio = ratio
self.ife1 = Entropy_Hist(self.ratio[0])
self.ife2 = Entropy_Hist(self.ratio[1])
# ---- U-Net ----
self.conv1 = Convolution(in_ch, 64)
self.pool1 = nn.MaxPool2d(2) # feature map = shape(m/2,n/2,64)
self.conv2 = Convolution(64, 128)
self.pool2 = nn.MaxPool2d(2) # feature map = shapem/4,n/4,128)
self.conv3 = Convolution(128, 256)
self.pool3 = nn.MaxPool2d(2) # feature map = shape(m/8,n/8,256)
self.conv4 = Convolution(256, 512)
self.pool4 = nn.MaxPool2d(2) # feature map = shape(m/16,n/16,512)
self.conv5 = Convolution(512, 1024) # feature map = shape(m/16,n/16,1024)
self.up_conv1 = nn.ConvTranspose2d(in_channels=1024, out_channels=512, kernel_size=2, stride=2, padding=0, output_padding=0)
self.conv6 = Convolution(1024, 512) # feature map = shape(m/8,n/8,512)
self.up_conv2 = nn.ConvTranspose2d(512, 256, 2, 2, 0, 0)
self.conv7 = Convolution(int(256*(2+self.ratio[1])), 256) # feature map = shape(m/4,n/4,256)
self.up_conv3 = nn.ConvTranspose2d(256, 128, 2, 2, 0, 0)
self.conv8 = Convolution(int(128*(2+self.ratio[0])), 128) # feature map = shape(m/2,n/2,128)
self.up_conv4 = nn.ConvTranspose2d(128, 64, 2, 2, 0, 0)
self.conv9 = Convolution(128, 64) # feature map = shape(m,n,64)
self.out_conv1 = nn.Conv2d(64, 1, 1, 1, 0)
def forward(self, x):
c1 = self.conv1(x)
p1 = self.pool1(c1)
c2 = self.conv2(p1)
p2 = self.pool2(c2)
c3 = self.conv3(p2)
p3 = self.pool3(c3)
c4 = self.conv4(p3)
p4 = self.pool4(c4)
c5 = self.conv5(p4)
if self.mode != 'ori':
c2 = torch.cat([c2, self.ife1(c2)])
c3 = torch.cat([c3, self.ife2(c3)])
up1 = self.up_conv1(c5)
merge1 = torch.cat([up1, c4], dim=1)
c6 = self.conv6(merge1)
up2 = self.up_conv2(c6)
merge2 = torch.cat([up2, c3], dim=1)
c7 = self.conv7(merge2)
up3 = self.up_conv3(c7)
merge3 = torch.cat([up3, c2], dim=1)
c8 = self.conv8(merge3)
up4 = self.up_conv4(c8)
merge4 = torch.cat([up4, c1], dim=1)
c9 = self.conv9(merge4)
S_g_pred = self.out_conv1(c9)
return S_g_pred
|
yezi-66/IFE
|
unet_github/lib/Network.py
|
Network.py
|
py
| 7,331 |
python
|
en
|
code
| 26 |
github-code
|
6
|
19340985778
|
class EvenTree(object):
def __init__(self, graph={}):
self.graph = graph
self.visited_node = []
self.total_forest = 0
def calculate_forest(self):
for k,v in self.graph.items():
if k not in self.visited_node:
key1 = k
key_list = [key1]
self.visited_node.append(key1)
new_dict = defaultdict(list)
count, new_dict = self.total_count(key_list, self.graph, self.visited_node, new_dict)
#If count is even number, increase number of total_forest
if count % 2 == 0:
self.total_forest += 1
for key,values in new_dict.iteritems():
for i in values:
if i in new_dict.keys():
#If node has odd number of child
#It means that EVEN Tree can be created by taking node as parent
if len(new_dict[i]) % 2 != 0:
self.total_forest += 1
return self.total_forest
'''
Recursively count total nodes for node in key_list including the key node
e.g. graph {2: [1], 3: [1], 4: [3], 5: [2], 6: [1], 7: [2], 8: [6], 9: [8], 10: [8]}
if we take node 2, the count is 3. since, 2 is the parent of 2 nodes 7 and 5.
'''
def total_count(self, key_list, graph, visited_node, new_dict, count=1):
if key_list:
key1 = key_list.pop(0)
for key,values in graph.iteritems():
if key1 == values[0]:
#pushing child node in key_list to get nodes originating from it
#Since we want to cut the tree from the original key node
key_list.append(key)
#mark each child node as visited node
self.visited_node.append(key)
new_dict[values[0]].append(key)
count += 1
#Recursive call to function for each child node
count, new_dict = self.total_count(key_list, graph, self.visited_node, new_dict, count)
return ( count, new_dict )
class GraphTests(unittest.TestCase):
def test_graph1(self):
graph1 = {2: [1], 3: [1], 4: [3], 5: [2], 6: [1], 7: [2], 8: [6], 9: [8], 10: [8]}
obj1 = EvenTree(graph1)
self.assertEqual(obj1.calculate_forest(), 2)
def test_graph2(self):
graph2 = {2: [1], 3: [1], 4: [3], 5: [2], 6: [5],
7: [1], 8: [1], 9: [2], 10: [7], 11: [10],
12: [3], 13: [7], 14: [8], 15: [12], 16: [6],
17: [6], 18: [10], 19: [1], 20: [8]}
obj2 = EvenTree(graph2)
self.assertEqual(obj2.calculate_forest(),4)
if __name__ == "__main__":
import unittest
from collections import defaultdict, Iterable
import itertools
suite = unittest.TestLoader().loadTestsFromTestCase(GraphTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
sunilchauhan/EvenTree
|
EvenTree.py
|
EvenTree.py
|
py
| 3,088 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31272615348
|
# -*- coding: utf-8 -*-
"""Image transformation test meant to be run with pytest."""
import sys
import pytest
from confmap import ImageTransform
from confmap import HyperbolicTiling
sys.path.append("tests")
def test_tilesAndTransform():
im=ImageTransform('./examples/sample1.png',0,data=None
,c=1.*(1.+0.j),r=1.*(1.+0.j)
,d=0.08+0.55j,output_width=750
,output_height=1000,blur=False,smoothshift=-0,shift=0.)
im.mirror(Y=2,X=1)
res=im.transform(print_and_save=False)
HT=HyperbolicTiling('./examples/sample1.png',prefix='./examples/',suffix='0',
output_width=1550,output_height=640,data=res)
im=ImageTransform(HT,d=0.04)
im.arctan()
im.similitude(c=1.9)
HT.transform(c=0.95,d=0.+0.0j,backcolor=True,vanishes=False,
nbit=25,delta=0e-3,print_and_save=True,
sommets=(6,4,4,4,6,4,4,4))
return True
if __name__ == "__main__":
pytest.main()
|
FCoulombeau/confmap
|
tests/test_tilesAndTransforms.py
|
test_tilesAndTransforms.py
|
py
| 1,016 |
python
|
en
|
code
| 8 |
github-code
|
6
|
21666698024
|
#https://leetcode.com/problems/valid-sudoku/
class Solution:
#Traverse the entire board once, and check each cell to see if there's another cell with the same value
#in the same row, column, and square. Immediately return False if such a cell is found
def isValidSudoku(self, board: list[list[str]]) -> bool:
#Helper method that takes the row and column of a cell, and returns which square it's on (0 through 8)
def getSquare(row: int, col: int) -> int:
if (row // 3) < 1: #If it's in the first three rows
return (col // 3)
elif (row // 3) >= 1 and (row //3) < 2: #If it's in the middle three rows
return 3 + (col // 3)
else: #If it's in the last three rows
return 6 + (col // 3)
#More efficient helper method to return square number for a given cell. We're numbering the squares so that
#its number is the number of squares above and to its left
def getSquare2(row: int, col: int) -> int:
return (3 * (row // 3)) + (col // 3)
rowSetList = [set() for _ in range(9)] #Use list comprehension to create list of nine sets
colSetList = [set() for _ in range(9)]
squareSetList = [set() for _ in range(9)]
for row in range(len(board)):
for col in range(len(board[0])):
value = board[row][col]
if not value.isdigit() : continue #We ignore non-numeric cells
square = getSquare(row,col) #Square range from 0 to 8
if (value in rowSetList[row]) or (value in colSetList[col]) or (value in squareSetList[square]):
#print(f'Value {value} at board[{row}][{col}] is duplicate')
return False
else:
rowSetList[row].add(value)
colSetList[col].add(value)
squareSetList[square].add(value)
#If we make it here, then our board is a valid sodoku board
return True
def main():
board = [["8","3",".",".","7",".",".",".","."]
,["6",".",".","1","9","5",".",".","."]
,[".","9","8",".",".",".",".","6","."]
,["8",".",".",".","6",".",".",".","3"]
,["4",".",".","8",".","3",".",".","1"]
,["7",".",".",".","2",".",".",".","6"]
,[".","6",".",".",".",".","2","8","."]
,[".",".",".","4","1","9",".",".","5"]
,[".",".",".",".","8",".",".","7","9"]]
solution = Solution()
print(solution.isValidSudoku(board)) #False since top left square has two cells with value 8
if __name__ == "__main__": #Entry point
main() #Calling main method
|
Adam-1776/Practice
|
DSA/validSodoku/solution.py
|
solution.py
|
py
| 2,643 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19575911667
|
def bissextile():
try:
n = int(date.get())
if n%4==0 and (n//100)%4==0:
tmp = "Is Bissextile"
else:
tmp = "Is Not Bissextile"
txt.set(tmp)
except:
txt.set("The value isn't an integer")
from tkinter import *
from keyboard import *
def kInput():
if is_pressed('enter'):
bissextile()
main = Tk()
main.resizable(False, False)
main.title("Bissextile")
date = Entry(main, width=25, justify=CENTER)
date.grid(row = 1, column = 1)
txt = StringVar()
screenReturn = Label(main,width=25, textvariable=txt)
screenReturn.grid(row= 2, column = 1)
testButton = Button(main, text="Test if bissextile", command=bissextile)
testButton.grid(row=3, column=1)
def shutdown():
global run
run = False
main.quit()
main.protocol("WM_DELETE_WINDOW", shutdown)
run=True
while run:
kInput()
main.update()
|
OJddJO/NSI
|
bissextile.py
|
bissextile.py
|
py
| 934 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28654892890
|
import re
def day3_2():
with open("day3 - input.txt") as input:
# with open("day3 - input1.txt") as input:
# with open("day3 - input2.txt") as input:
wires = [num.strip() for num in input.read().split()]
wire_0 = wires[0].split(",")
wire_1 = wires[1].split(",")
wire_0_hor = []
wire_0_ver = []
wire_1_hor = []
wire_1_ver = []
intersections = []
# Go through the wire and add both the old and new xy coordinates along with
# the total previous magnitude to its respective horizontal and vertical move lists
def rldu(wire, wire_hor, wire_ver, pairx, pairy, total_mag):
for each in wire:
path = re.search(r'(\w)(.*)', each)
direction = path.group(1)
magnitude = int(path.group(2))
oldx = pairx
oldy = pairy
if direction == "R":
pairx += magnitude
wire_hor.append([(oldx, oldy), (pairx, oldy), total_mag])
elif direction == "L":
pairx -= magnitude
wire_hor.append([(oldx, oldy), (pairx, oldy), total_mag])
elif direction == "U":
pairy += magnitude
wire_ver.append([(oldx, oldy), (oldx, pairy), total_mag])
elif direction == "D":
pairy -= magnitude
wire_ver.append([(oldx, oldy), (oldx, pairy), total_mag])
total_mag +=magnitude
# Go through wire 0's horizontal list to find the wire 1's horizontal intersections
# Append the intersection coordinate and the total magnitude to get there
def find_intersections( wire_x_hor, wire_y_ver, intersections):
for hor in wire_x_hor:
minx = min(hor[0][0], hor[1][0])
maxx = max(hor[0][0], hor[1][0])
for ver in wire_y_ver:
miny = min(ver[0][1], ver[1][1])
maxy = max(ver[0][1], ver[1][1])
x = ver[0][0]
y = hor[0][1]
if minx <= x and x <= maxx and miny <= y and y <= maxy:
w0_mag = hor[2]+abs(hor[0][0]-x)
w1_mag = ver[2]+abs(ver[0][1]-y)
tm = w0_mag + w1_mag
intersections.append((ver[0][0], hor[0][1], tm))
rldu(wire_0, wire_0_hor, wire_0_ver, 0, 0, 0)
rldu(wire_1, wire_1_hor, wire_1_ver, 0, 0, 0)
find_intersections(wire_0_hor, wire_1_ver, intersections)
find_intersections(wire_1_hor, wire_0_ver, intersections)
# The first intersection is (0, 0), which isn't what we want
intersections.pop(0)
min_steps = intersections[0][2]
for each in intersections:
if each[2] < min_steps:
min_steps = each[2]
print("min amount of steps is: {}".format(min_steps))
day3_2()
# Really cool implementation by redditor jadenPete
# !/usr/bin/env python3
# with open("day3.txt", "r") as file:
# def crawl_wire():
# loc = [0, 0]
# steps = 0
# for move in file.readline().split(","):
# delta = {"L": (0, -1), "R": (0, 1), "U": (1, 1), "D": (1, -1)}[move[0]]
# for _ in range(int(move[1:])):
# loc[delta[0]] += delta[1]
# steps += 1
# yield tuple(loc), steps
# visited = {}
# for loc, steps in crawl_wire():
# if loc not in visited:
# visited[loc] = steps
# print(min(steps + visited[loc] for loc, steps in crawl_wire() if loc in visited))
|
mellumfluous/AdventOfCode-2019
|
day3_2.py
|
day3_2.py
|
py
| 3,391 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74142898108
|
from django.http import JsonResponse
from django.shortcuts import render
# Create your views here.
from django.views.generic import View
from django_redis import get_redis_connection
from redis import StrictRedis
from apps.goods.models import GoodsSKU
from utils.common import LoginRequiredViewMixin, BaseCartView
class CartAddView(BaseCartView):
def post(self, request, command='add'):
"""添加商品到购物车"""
params = super().post(request, command)
# 接受数据:user_id,sku_id,count
user_id, sku_id, count, sku = params['user_id'], \
params['sku_id'], \
params['count'], \
params['sku']
# print(user_id, sku_id, count)
# print('+'*50)
# 添加商品到购物车,如果redis中已有该商品的id,那么就增加它的数量
strict_redis = get_redis_connection()
# strict_redis = StrictRedis()
key = 'cart_%s' % user_id
val = strict_redis.hget(key, sku_id)
if val:
count += int(val)
# 库存逻辑判断
if count > sku.stock:
return JsonResponse({'code':5, 'errmsg':'库存不足'})
# 操作redis数据库存储商品到购物车
strict_redis.hset(key, sku_id, count)
total_count = 0
vals = strict_redis.hvals(key)
for val in vals:
total_count += int(val)
context = {
'code':0,
'total_count':total_count,
}
return JsonResponse(context)
class CartInfoView(LoginRequiredViewMixin, View):
"""购物车显示界面:需要先登录"""
def get(self, request):
# 查询当前登录用户添加到购物车中的所有商品
strict_redis = get_redis_connection()
key = 'cart_%s' % request.user.id
# 获取购物车中所有商品,返回一个字典,包含sku_id和对应的数量count
cart_dict = strict_redis.hgetall(key)
# 保存购物车中所有的商品对象
skus = []
# 商品总数量
total_count = 0
# 商品总金额
total_amount = 0
for sku_id, count in cart_dict.items():
try:
# 根据sku_id获取sku对象
sku = GoodsSKU.objects.get(id=sku_id)
# 列表中新增一个商品对象
skus.append(sku)
except Exception as e:
print(e)
# sku对象动态新增一个实例属性:count
sku.count = int(count)
# sku对象动态新增一个实例属性:amount
sku.amount = sku.price * sku.count
# 累加购物车中所有商品的数量和总金额
total_count += sku.count
total_amount += sku.amount
context = {
'skus': skus,
'total_count': total_count,
'total_amount': total_amount,
}
return render(request, 'cart.html', context)
class CartUpdateView(LoginRequiredViewMixin, BaseCartView):
def post(self, request, command='update'):
"""修改购物车商品数量"""
# print(CartUpdateView.mro())
# print('-' * 50)
params = super().post(request, command)
sku_id = params['sku_id']
count = params['count']
# print(sku_id)
# print(count)
# print('-' * 50)
# todo:业务处理:保存购物车商品数量
strict_redis = get_redis_connection()
key = 'cart_%s' % request.user.id
strict_redis.hset(key, sku_id, count)
# 响应json
return JsonResponse({'code': 0, 'message': '修改商品数量成功',})
class CartDeleteView(LoginRequiredViewMixin, BaseCartView):
def post(self, request, command='delete'):
"""删除购物车中的商品"""
# 获取请求参数:sku_id
sku_id = super().post(request, command)['sku_id']
# 业务处理:从redis中删除商品
strict_redis = get_redis_connection()
key = 'cart_%s' % request.user.id
strict_redis.hdel(key, sku_id)
# 响应请求
return JsonResponse({'code':0, 'message':'删除成功!'})
|
xmstu/dailyfresh2
|
dailyfresh/apps/cart/views.py
|
views.py
|
py
| 4,266 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13042124891
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 5 16:42:07 2018
@author: lud
"""
import matplotlib
#import matplotlib.pyplot as plt
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import tkinter as Tk
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import numpy as np
import pandas as pd
from argparse import ArgumentParser
import os
def cuboid_data2(o, size=(1,1,1)):
X = [[[0, 1, 0], [0, 0, 0], [1, 0, 0], [1, 1, 0]],
[[0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0]],
[[1, 0, 1], [1, 0, 0], [1, 1, 0], [1, 1, 1]],
[[0, 0, 1], [0, 0, 0], [0, 1, 0], [0, 1, 1]],
[[0, 1, 0], [0, 1, 1], [1, 1, 1], [1, 1, 0]],
[[0, 1, 1], [0, 0, 1], [1, 0, 1], [1, 1, 1]]]
X = np.array(X).astype(float)
for i in range(3):
X[:,:,i] *= size[i]
X += np.array(o)
return X
def plotCubeAt2(positions,sizes=None,colors=None, **kwargs):
if not isinstance(colors,(list,np.ndarray)): colors=["C0"]*len(positions)
if not isinstance(sizes,(list,np.ndarray)): sizes=[(1,1,1)]*len(positions)
g = []
for p,s,c in zip(positions,sizes,colors):
g.append( cuboid_data2(p, size=s) )
return Poly3DCollection(np.concatenate(g),
facecolors=np.repeat(colors,6), **kwargs)
def main(path, width, depth, height):
#get all data files
source_files = []
for file in os.listdir(path):
if file.endswith(".csv"):
source_files.append(os.path.join(path, file))
#get data
def getData(df):
if len(df.columns < 7):
df['6'] = 0
sizes = [tuple(x) for x in df.iloc[:,[1,2,3]].values]
positions = [tuple(x) for x in df.iloc[:,[4,5,6]].values]
colors = ["limegreen"]*df.shape[0]
pc = plotCubeAt2(positions,sizes,colors=colors, edgecolor="k", linewidth = 0.4)
return pc
#create figure
fig = Figure()
root = Tk.Tk()
root.wm_title("Plot boxes")
canvas = FigureCanvasTkAgg(fig, master=root)
ax = fig.add_subplot(111,projection='3d')
ax.set_aspect('equal')
ax.set_xlim([0,width])
ax.set_ylim([0,depth])
ax.set_zlim([0,height])
if len(source_files) > 0:
box_data = pd.read_csv(source_files[0], header = None)
else:
box_data = pd.DataFrame(np.full((1,6),0,dtype = int))
ax.add_collection3d(getData(box_data))
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def refresh(df):
ax.collections.clear()
ax.add_collection(getData(df))
canvas.draw()
def ok():
newfile = tkvar.get()
box_data = pd.read_csv(newfile, header = None)
refresh(box_data)
def option_changed(*args):
newfile = tkvar.get()
box_data = pd.read_csv(newfile, header = None)
refresh(box_data)
# Create a Tkinter variable
tkvar = Tk.StringVar(root)
if len(source_files) > 0:
tkvar.set(source_files[0])
else:
tkvar.set('No file')
tkvar.trace("w", option_changed)
popupMenu = Tk.OptionMenu(root, tkvar, '', *source_files)
popupMenu.pack(side=Tk.TOP)
def on_key_event(event):
print('you pressed %s' % event.key)
key_press_handler(event, canvas, toolbar)
canvas.mpl_connect('key_press_event', on_key_event)
def _quit():
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
button = Tk.Button(master=root, text='Quit', command=_quit)
button.pack(side=Tk.BOTTOM)
root.mainloop()
# main('E:\\Projects\\BinPacking\\test',800,1200,2055)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-p", "--path", dest="layer_data_path",
help="find data from path", metavar="PATH")
parser.add_argument("-w", "--width", dest="width", type = int, default=800,
help="plane width, default 800")
parser.add_argument("-d", "--depth", dest="depth", type = int, default=1200,
help="plane depth, default 1200")
parser.add_argument("-hei", "--height", dest="height", type = int, default=2055,
help="bin height, default 2055")
args = parser.parse_args()
main(args.layer_data_path, args.width, args.depth, args.height)
|
stevenluda/cuboidPlotter
|
PlotCuboids.py
|
PlotCuboids.py
|
py
| 4,848 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75177510266
|
import os
import string
import json
from collections import namedtuple
from sys import stdout
from lex.oed.languagetaxonomy import LanguageTaxonomy
from apps.tm.models import Lemma, Wordform, Definition, Language, ProperName
from apps.tm.build import buildconfig
LEMMA_FIELDS = buildconfig.LEMMA_FIELDS
BlockData = namedtuple('BlockData', LEMMA_FIELDS)
def populate_db():
"""
Populate the database table for Language, Lemma, Wordform, and Definition
"""
stdout.write('Emptying the tables...\n')
empty_tables()
stdout.write('Populating Language records...\n')
populate_language()
stdout.write('Populating Lemma, Wordform, and Definition records...\n')
populate_lexical()
stdout.write('Populating ProperName records...\n')
populate_proper_names()
def empty_tables():
"""
Empty the database tables of any existing content
"""
Wordform.objects.all().delete()
Lemma.objects.all().delete()
Definition.objects.all().delete()
Language.objects.all().delete()
ProperName.objects.all().delete()
def populate_language():
"""
Populate the Language table
"""
taxonomy = LanguageTaxonomy()
taxonomy.families = set(buildconfig.LANGUAGE_FAMILIES)
max_length = Language._meta.get_field('name').max_length
language_objects = []
for language in taxonomy.languages():
name = language.name[:max_length]
language_objects.append(Language(id=language.id, name=name, family=None))
Language.objects.bulk_create(language_objects)
for language in taxonomy.languages():
family = taxonomy.family_of(language.name)
if family is not None:
src = Language.objects.get(id=language.id)
target = Language.objects.get(id=family.id)
src.family = target
src.save()
def populate_lexical():
"""
Populate the Lemma, Wordform, and Definition tables
"""
in_dir = os.path.join(buildconfig.FORM_INDEX_DIR, 'refined')
frequency_cutoff = buildconfig.FREQUENCY_CUTOFF
taxonomy = LanguageTaxonomy()
lemma_counter = 0
definition_counter = 0
for letter in string.ascii_lowercase:
stdout.write('Inserting data for %s...\n' % letter)
blocks = []
in_file = os.path.join(in_dir, letter + '.json')
with open(in_file, 'r') as filehandle:
for line in filehandle:
data = json.loads(line.strip())
blocks.append(BlockData(*data))
lemmas = []
wordforms = []
definitions = []
for i, block in enumerate(blocks):
lang_node = taxonomy.node(language=block.language)
if lang_node is None:
language_id = None
else:
language_id = lang_node.id
if block.definition and block.f2000 < frequency_cutoff:
definition_counter += 1
definitions.append(Definition(id=definition_counter,
text=block.definition[:100]))
definition_id = definition_counter
else:
definition_id = None
lemma_counter += 1
lemmas.append(Lemma(id=lemma_counter,
lemma=block.lemma,
sort=block.sort,
wordclass=block.wordclass,
firstyear=block.start,
lastyear=block.end,
refentry=block.refentry,
refid=block.refid,
thesaurus_id=block.htlink,
language_id=language_id,
definition_id=definition_id,
f2000=_rounder(block.f2000),
f1950=_rounder(block.f1950),
f1900=_rounder(block.f1900),
f1850=_rounder(block.f1850),
f1800=_rounder(block.f1800),
f1750=_rounder(block.f1750),))
for typelist in (block.standard_types,
block.variant_types,
block.alien_types):
for typeunit in typelist:
wordforms.append(Wordform(sort=typeunit[0],
wordform=typeunit[1],
wordclass=typeunit[2],
lemma_id=lemma_counter,
f2000=_rounder(typeunit[4]),
f1900=_rounder(typeunit[5]),
f1800=_rounder(typeunit[6]),))
if i % 1000 == 0:
Definition.objects.bulk_create(definitions)
Lemma.objects.bulk_create(lemmas)
Wordform.objects.bulk_create(wordforms)
definitions = []
lemmas = []
wordforms = []
Definition.objects.bulk_create(definitions)
Lemma.objects.bulk_create(lemmas)
Wordform.objects.bulk_create(wordforms)
def populate_proper_names():
"""
Populate the ProperName table
"""
in_dir = os.path.join(buildconfig.FORM_INDEX_DIR, 'proper_names')
in_file = os.path.join(in_dir, 'all.txt')
names = []
counter = 0
with open(in_file) as filehandle:
for line in filehandle:
data = line.strip().split('\t')
if len(data) == 3:
counter += 1
sortable, name, common = data
if common.lower() == 'true':
common = True
else:
common = False
names.append(ProperName(lemma=name,
sort=sortable,
common=common))
if counter % 1000 == 0:
ProperName.objects.bulk_create(names)
names = []
ProperName.objects.bulk_create(names)
def _rounder(n):
n = float('%.2g' % n)
if n == 0 or n > 1:
return int(n)
else:
return n
|
necrop/wordrobot
|
apps/tm/build/lexicon/populatedb.py
|
populatedb.py
|
py
| 6,316 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72623372987
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import mock
import unittest
from cloudshell.networking.brocade.cli.brocade_cli_handler import BrocadeCliHandler
from cloudshell.networking.brocade.runners.brocade_state_runner import BrocadeStateRunner
class TestBrocadeStateRunner(unittest.TestCase):
def setUp(self):
cli_handler = mock.MagicMock()
logger = mock.MagicMock()
resource_config = mock.MagicMock()
api = mock.MagicMock()
super(TestBrocadeStateRunner, self).setUp()
self.tested_instance = BrocadeStateRunner(cli=cli_handler,
logger=logger,
resource_config=resource_config,
api=api)
def tearDown(self):
super(TestBrocadeStateRunner, self).tearDown()
del self.tested_instance
def test_cli_handler_property(self):
""" Check that property return correct instance. Should return BrocadeCliHandler """
self.assertIsInstance(self.tested_instance.cli_handler, BrocadeCliHandler)
|
QualiSystems/cloudshell-networking-brocade
|
tests/networking/brocade/runners/test_brocade_state_runner.py
|
test_brocade_state_runner.py
|
py
| 1,123 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35153932961
|
age = input("What is your current age?")
#4680 weeks in 90 years
daysold = int(age) * 365
weeksold = int(age) * 52
monthsold = int(age) * 12
days = (365*90) - daysold
weeks = 4680 - weeksold
months = (12*90) - monthsold
print("You have " + str(days) + " days, " + str(weeks) + " weeks, and " + str(months) + " months left.")
|
georgewood749/life_in_weeks_calculator
|
main.py
|
main.py
|
py
| 329 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3277704581
|
import os
from playwright.sync_api import sync_playwright
key = "2731"
os.makedirs(f"res/{key}", exist_ok=True)
def main():
with sync_playwright() as p:
browser = p.chromium.launch(headless=False, slow_mo= 5000)
page = browser.new_page()
page.goto("https://mri.cts-mrp.eu/portal/details?productnumber=NL/H/2731/001")
# with page.expect_download() as download_info:
# page.get_by_text("Download excel").click()
# download = download_info.value
# download.save_as(f"res/{key}/{key}.xlsx")
# Selector for document download buttons: .mat-button-base.ng-star-inserted
# STUDY LOCATOR METHODS, esp. "nth" in iterator
elements = page.get_by_role("listitem").get_by_role("button").all()
count = elements.count()
print(f"Number of detected elements is: {count}")
# for doc in elements:
# for i in range(count):
# elements.nth(i).click(modifiers=["Control", "Shift"])
# handles = page.query_selector_all(".documents-list .mat-button-wrapper .mat-icon-no-color")
# with page.expect_download() as download_info:
# doc.click()
# download = download_info.value
# doc_name = download.suggested_filename
# download.save_as(f"res/{key}/{doc_name}.pdf")
browser.close()
main()
|
ReCodeRa/MRI_02
|
MRI/pw_down_sync_single_pdf.py
|
pw_down_sync_single_pdf.py
|
py
| 1,397 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9736948830
|
import pickle
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, accuracy_score
from tensorflow import keras
import matplotlib.pyplot as plt
import tensorflow_addons as tfa
import health_doc
import matplotlib.pyplot as plt
import gc
from imp import reload
from doc_preprocessing import get_data_from_kfold
import BERT
reload(BERT)
from BERT import make_model, model_fit, model_save, model_load
from BERT import get_tokenizer, get_tokenized_data, get_model_result, calc_score
# model
# 0: Normal multi-label classification
# 1: Knowledge Distillation
mode = 0
if (mode):
# ### Get Teacher model prediction
with open('id_teacher_predict','rb') as f:
id_teacher_predict = pickle.load(f)
if __name__ == '__main__':
# ### Loading HealthDoc dataset
dataset_path = "../dataset/HealthDoc/"
dataset_id, dataset_label, dataset_content, dataset_label_name = health_doc.loading(dataset_path)
# ### Loading K-fold list
with open('k_id', 'rb') as f:
k_id = pickle.load(f)
with open('k_label', 'rb') as f:
k_label = pickle.load(f)
K = len(k_id)
tokenizer = get_tokenizer() # get BERT tokenizer
for cv_times in range(10):
cv_micro_f1 = []
cv_macro_f1 = []
cv_accuray = []
cv_weighted_f1 = []
cv_label_f1 = []
for testing_time in range(K):
# ### Split data for train and test
subset_test = [testing_time]
subset_train = np.delete(np.arange(K), subset_test)
x_train, y_train = get_data_from_kfold(k_id, k_label, subset_train)
x_test, y_test = get_data_from_kfold(k_id, k_label, subset_test)
model_path = f'/content/model/{subset_test[0]}/'
# ### Training Model
#x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.15)
# get tokenized data with BERT input format
x_train_vec = get_tokenized_data(x_train, dataset_content, tokenizer)
x_test = get_tokenized_data(x_test, dataset_content, tokenizer)
#x_val = getTokenized(x_val, dataset_content, tokenizer)
tf.keras.backend.clear_session()
model = make_model(9)
if (mode):
y_train_teacher = np.empty(x_train.shape+(9,))
for i, x in enumerate(x_train):
y_train_teacher[i,:] = id_teacher_predict[x]
print('Training Multi-label model with KD')
history = model_fit(model, x_train_vec, y_train_teacher)
else:
print('Training Multi-label model without KD')
history = model_fit(model, x_train_vec, y_train)
gc.collect()
# ### Predict Result
y_pred = get_model_result(model, x_test)
# ### Calculate Predict Reslut
micro_f1, macro_f1, weighted_f1, subset_acc = calc_score(y_test, y_pred)
cv_micro_f1.append(micro_f1)
cv_macro_f1.append(macro_f1)
cv_weighted_f1.append(weighted_f1)
cv_accuray.append(subset_acc)
label_f1=[]
for i, label_name in enumerate(dataset_label_name):
label_f1.append(f1_score(y_test[:,i], y_pred[:,i]))
print(f'{label_name:<15}:{label_f1[-1]: .4f}')
cv_label_f1.append(label_f1)
with open('multi-times cv result.csv', 'a') as f:
f.write(f'{sum(cv_micro_f1)/K: .4f},')
f.write(f'{sum(cv_macro_f1)/K: .4f},')
f.write(f'{sum(cv_weighted_f1)/K: .4f},')
f.write(f'{sum(cv_accuray)/K: .4f},')
label_f1_mean = np.mean(cv_label_f1, axis=0)
for f1_mean in label_f1_mean:
f.write(f'{f1_mean: .4f},')
f.write('\n')
|
Szu-Chi/NLP_Final_Hierarchical_Transfer_Learning
|
BERT_multi_student.py
|
BERT_multi_student.py
|
py
| 4,067 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25018394942
|
import datetime
import hashlib
import json
from urllib.parse import urlparse
import requests
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding
import config
import crypto
class Blockchain:
def __init__(self, key_path=None):
# Initialize a chain which will contain blocks
self.chain = [] # a simple list containing blovks
# Create a list which contains a list of transactions before they
# are added to the block. Think of it as a cache of transactions which
# happened, but are not yet written to a block in a blockchain.
self.transactions = []
# Create a genesis block - the first block
# Previous hash is 0 because this is a genesis block!
self.create_block(proof=1, previous_hash='0')
# Create a set of nodes
self.nodes = set()
if key_path:
self.private_key = crypto.load_private_key(key_path)
self.address = self.generate_address(self.private_key.public_key())
def create_block(self, proof, previous_hash):
# Define block as a dictionary
block = {'index': len(self.chain) + 1,
'timestamp': str(datetime.datetime.now()),
'proof': proof,
'previous_hash': previous_hash,
# Here we can add any additional data related to the currency
'transactions': self.transactions
}
# Now we need to empty the transactions list, since all those transactions
# are now contained in the block.
self.transactions = []
# Append block to the blockchain
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def get_address(self):
return self.address
def proof_of_work(self, previous_proof):
new_proof = 1 # nonce value
check_proof = False
while check_proof is False:
# Problem to be solved (this makes the minig hard)
# operation has to be non-symetrical!!!
hash_operation = hashlib.sha256(str(config.BLOCKCHAIN_PROBLEM_OPERATION_LAMBDA(
previous_proof, new_proof)).encode()).hexdigest()
# Check if first 4 characters are zeros
if hash_operation[:len(config.LEADING_ZEROS)] == config.LEADING_ZEROS:
check_proof = True
else:
new_proof += 1
# Check proof is now true
return new_proof
def hash_of_block(self, block):
# Convert a dictionary to string (JSON)
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
# 1 Check the previous hash
block = chain[block_index]
if block['previous_hash'] != self.hash_of_block(previous_block):
return False
# 2 Check all proofs of work
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(config.BLOCKCHAIN_PROBLEM_OPERATION_LAMBDA(
previous_proof, proof)).encode()).hexdigest()
if hash_operation[:len(config.LEADING_ZEROS)] != config.LEADING_ZEROS:
return False
# Update variables
previous_block = block
block_index += 1
return True
def add_transaction(self, sender, receiver, amount, private_key):
# Create a transaction dictionary
transaction = {
'sender': sender,
'receiver': receiver,
'amount': amount
}
# Sign the transaction
signature = private_key.sign(
json.dumps(transaction, sort_keys=True).encode(),
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
# Add the signature and public key to the transaction
transaction['signature'] = signature
transaction['public_key'] = private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
# Add the transaction to the list of transactions
self.transactions.append(transaction)
# Return the index of the next block in the blockchain
previous_block = self.get_previous_block()
return previous_block['index'] + 1
def add_node(self, address):
parsed_url = urlparse(address)
# Add to the list of nodes
# parsed_url() method returns ParseResult object which has an attribute netloc
# which is in a format adress:port eg. 127.0.0.1:5000
self.nodes.add(parsed_url.netloc)
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
# Find the largest chain (send a request)
response = requests.get(f'http://{node}/get-chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
# Check chain if it is the longest one and also a valid one
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
# Replace the chain
self.chain = longest_chain
return True
# Otherwise, the chain is not replaced
return False
def save_blockchain(self, filename):
with open(filename, 'w') as file:
json.dump(self.chain, file, indent=4)
def load_blockchain(self, filename):
with open(filename, 'r') as file:
self.chain = json.load(file)
def generate_address(self, public_key):
public_key_bytes = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
return hashlib.sha256(public_key_bytes).hexdigest()
|
ivana-dodik/Blockchain
|
EP -- zadatak 03/bez master key/blockchain.py
|
blockchain.py
|
py
| 6,409 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70170907387
|
import numpy as np
#%%
def continuous_angle(x):
last = 0
out = []
for angle in x:
while angle < last-np.pi: angle += 2*np.pi
while angle > last+np.pi: angle -= 2*np.pi
last = angle
out.append(angle)
return np.array(out)
#%%
def dist2agent(data):
for i in range(1,10):
x = data[' x%d'%i]
y = data[' y%d'%i]
xa = data[' x0']
ya = data[' y0']
data[' dist%d'%i] = np.sqrt((x-xa)**2+(y-ya)**2)
return data
def poly2(data):
for i in range(10):
x = data[' x%d' % i]
y = data[' y%d' % i]
data[' x2%d' % i] = x**2
data[' y2%d' % i] = y**2
data[' xy%d' % i] = x*y
return data
def speed_direction(data):
for i in range(10):
speed = np.zeros(11)
sin_dir = np.zeros(11)
cos_dir = np.zeros(11)
x = data[' x%d' % i]
y = data[' y%d' % i]
speed[0] = np.sqrt((x[1]-x[0])**2+(y[1]-y[0])**2)
direction = np.arctan2(y[1]-y[0],x[1]-x[0])
sin_dir[0] = np.sin(direction)
cos_dir[0] = np.cos(direction)
speed[10] = np.sqrt((x[10]-x[9])**2+(y[10]-y[9])**2)
direction = np.arctan2(y[10]-y[9],x[10]-x[9])
sin_dir[10] = np.sin(direction)
cos_dir[10] = np.cos(direction)
for t in range(1,10):
speed[t] = np.sqrt((x[t+1]-x[t-1])**2+(y[t+1]-y[t-1])**2)/2
direction = np.arctan2(y[t+1]-y[t-1],x[t+1]-x[t-1])
sin_dir[t] = np.sin(direction)
cos_dir[t] = np.cos(direction)
data[' speed%d' % i] = speed
data[' sin(dir)%d' % i] = sin_dir
data[' cos(dir)%d' % i] = cos_dir
return data
def acceleration(data):
for i in range(10):
a = np.zeros(11)
speed = data[' speed%d' % i]
a[0] = speed[1]-speed[0]
a[10] = speed[10]-speed[9]
for t in range(1,10):
a[t] = (speed[t+1]-speed[t-1])/2
data[' acceleration%d' % i] = a
return data
def turning(data):
for i in range(10):
turn = np.zeros(11)
sin_dir = data[' sin(dir)%d' % i]
cos_dir = data[' cos(dir)%d' % i]
direction = np.arctan2(sin_dir, cos_dir)
direction = continuous_angle(direction)
turn[0] = direction[1]-direction[0]
turn[10] = direction[10]-direction[9]
for t in range(1,10):
turn[t] = (direction[t+1]-direction[t-1])/2
data[' turning%d' % i] = turn
return data
def replace_agent(data):
for i in range(10):
if data[' role%d' % i][0] == ' agent':
temp = data[[' id0',' role0',' type0',' x0',' y0',' present0']]
data[[' id0',' role0',' type0',' x0',' y0',' present0']] = data[[' id%d'%i,' role%d'%i,' type%d'%i,' x%d'%i,' y%d'%i,' present%d'%i]]
data[[' id%d'%i,' role%d'%i,' type%d'%i,' x%d'%i,' y%d'%i,' present%d'%i]] = temp
return data
def empty_fix(data, x_max=30, y_max=10):
xs = np.array([2*x_max,2*x_max,-2*x_max,-2*x_max,3*x_max,3*x_max,-3*x_max,-3*x_max,4*x_max])
ys = np.array([2*y_max,-2*y_max,2*y_max,-2*y_max,3*y_max,-3*y_max,3*y_max,-3*y_max,-4*y_max])
j = 0
for i in range(1,10):
if data[' present%d'%i][0] == 0:
data[' x%d'%i] = xs[j]*np.ones(11)
data[' y%d'%i] = ys[j]*np.ones(11)
j += 1
data[' role%d'%i] = ' others'
data[' type%d'%i] = ' car'
return data
|
aliseyfi75/Autonomous-Driving
|
Codes/add_features.py
|
add_features.py
|
py
| 3,966 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8381595021
|
from os import system
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
from mpl_toolkits.axes_grid import make_axes_locatable
##############################################################################
# matplotlib configuration
linewidth = 2.0
fontsize = 12
params = { # 'backend': 'ps',
'axes.labelsize': fontsize,
'text.fontsize': fontsize,
'legend.fontsize': 0.9*fontsize,
'xtick.labelsize': 0.9*fontsize,
'ytick.labelsize': 0.9*fontsize,
'text.usetex': False,
# 'figure.figsize': fig_size
}
matplotlib.rcParams.update(params)
markers = ['o', 's', '^', 'd', 'v', '*', 'h', '<', '>']
markersize = 8
nodesize = 1000
##############################################################################
def init_plot(is_tight_layout=False, ind_fig=0, **kwargs):
plt.close("all")
fig = plt.figure(ind_fig, **kwargs)
ax = fig.add_subplot(111)
if is_tight_layout:
fig.tight_layout()
return ax
def new_plot(is_tight_layout=False, ind_fig=0):
fig = plt.figure(ind_fig)
ax = fig.add_subplot(111)
if is_tight_layout:
fig.tight_layout()
ind_fig += 1
return ax, ind_fig
def save_fig(figname, is_adjust_border=False):
#ffigname = figname+".png"
# plt.savefig(ffigname,format='PNG')
ffigname = figname+".pdf"
if is_adjust_border:
plt.subplots_adjust(left=0.12, bottom=0.1, right=0.86, top=0.9, wspace=0.2, hspace=0.2)
plt.savefig(figname+".pdf", format='PDF')
# plt.savefig(figname+".eps",format='eps',transparent=True)
#system("ps2pdf -dEPSCrop "+figname+".eps "+figname+".pdf")
#system("rm "+figname+".eps")
return ffigname
|
ngctnnnn/DRL_Traffic-Signal-Control
|
sumo-rl/sumo/tools/contributed/sumopy/agilepy/lib_misc/matplotlibtools.py
|
matplotlibtools.py
|
py
| 1,749 |
python
|
en
|
code
| 17 |
github-code
|
6
|
4495169101
|
# -*- coding: utf-8 -*-
"""
Tests for CSV Normalizer
"""
import csv
from io import StringIO
from _pytest.capture import CaptureFixture
from pytest_mock import MockFixture
from src.csv_normalizer import main
def test_outputs_normalized_csv(mocker: MockFixture, capsys: CaptureFixture[str]) -> None:
with open("tests/sample.csv", encoding="utf-8", newline="") as csv_file:
mocker.patch("sys.stdin", csv_file)
main()
captured = capsys.readouterr()
assert len(captured.out) > 0
assert len(captured.err) == 0
written_csv = csv.reader(StringIO(captured.out))
with open("tests/output-sample.csv", encoding="utf-8", newline="") as expected_csv_file:
expected_csv = csv.reader(expected_csv_file)
for written_line, expected_line in zip(written_csv, expected_csv):
assert written_line == expected_line
def test_handles_error_properly(mocker: MockFixture, capsys: CaptureFixture[str]) -> None:
with open("tests/sample-with-broken-fields.csv", encoding="utf-8", newline="") as csv_file:
mocker.patch("sys.stdin", csv_file)
main()
captured = capsys.readouterr()
assert len(captured.err) > 0
expected_errors = [
"Invalid timestamp: 4/1/11 11:00:00 �M",
"invalid literal for int() with base 10: '9412�'",
"Duration is in an invalid format: 123:32.123",
"Duration has an invalid value: 1:a:32.123",
"Duration is in an invalid format: 132:33.123",
"Duration has an invalid value: 1:a:33.123",
]
errors = captured.err.splitlines()
assert len(errors) == len(expected_errors)
for error, expected_error in zip(errors, expected_errors):
assert error == expected_error
assert len(captured.out) > 0
written_csv = csv.reader(StringIO(captured.out))
with open(
"tests/output-sample-with-broken-fields.csv", encoding="utf-8", newline=""
) as expected_csv_file:
expected_csv = csv.reader(expected_csv_file)
for written_line, expected_line in zip(written_csv, expected_csv):
assert written_line == expected_line
|
felipe-lee/csv_normalization
|
tests/test_csv_normalizer.py
|
test_csv_normalizer.py
|
py
| 2,253 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33595739631
|
from flask import Flask, render_template, request, redirect, url_for
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Movie
app = Flask(__name__)
engine = create_engine('sqlite:///books-collection.db?check_same_thread=False')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route('/')
@app.route('/movies')
def showMovies():
movies = session.query(Movie).all()
return render_template("movies.html", movies=movies)
@app.route('/movies/new/', methods=['GET', 'POST'])
def newMovie():
if request.method == 'POST':
newMovie = Movie(title=request.form['name'], author=request.form['author'], cast=request.form['cast'], price=request.form['price'])
session.add(newMovie)
session.commit()
return redirect(url_for('showMovies'))
else:
return render_template('newMovie.html')
# Эта функция позволит нам обновить книги и сохранить их в базе данных.
@app.route("/movies/<int:movie_id>/edit/", methods=['GET', 'POST'])
def editMovie(movie_id):
editedMovie = session.query(Movie).filter_by(id=movie_id).one()
if request.method == 'POST':
if request.form['name'] or request.form['author'] or request.form['cast'] or request.form['price']:
editedMovie.title = request.form['name']
editedMovie.title = request.form['author']
editedMovie.title = request.form['cast']
editedMovie.title = request.form['price']
return redirect(url_for('showMovies'))
else:
return render_template('editMovie.html', movie=editedMovie)
# Эта функция для удаления книг
@app.route('/movies/<int:movie_id>/delete/', methods=['GET', 'POST'])
def deleteMovie(movie_id):
movieToDelete = session.query(Movie).filter_by(id=movie_id).one()
if request.method == 'POST':
session.delete(movieToDelete)
session.commit()
return redirect(url_for('showMovies', movie_id=movie_id))
else:
return render_template('deleteMovie.html', movie=movieToDelete)
if __name__ == '__main__':
app.debug = True
app.run(port=4996)
|
mrSlavik22mpeitop/stepik_selenium
|
flask_app_mpei.py
|
flask_app_mpei.py
|
py
| 2,261 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29104292358
|
import numpy as np
import pandas as pd
np.random.seed(123)
data = pd.DataFrame({'A': np.random.normal(0, 1, 50),
'B': np.random.normal(0, 1, 50),
'C': np.random.normal(0, 1, 50)})
# extract a single column from the DataFrame
col = data['C']
threshold = 0.5
# filter out the outliers in the selected column
col_filtered = col[np.abs(col) <= threshold]
# use the .loc accessor to filter out the outliers in the selected column
col_filtered1 = col.loc[np.abs(col) <= threshold]
print("Original column:\n", col)
print("\nFiltered column:\n", col_filtered)
print("\nFiltered column (.loc):\n", col_filtered1)
|
shifa309/-Deep-Learning-BWF-Shifa-Imran
|
Task15/Shifa_6.py
|
Shifa_6.py
|
py
| 653 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16439987677
|
import math
from datetime import datetime, timedelta
from decimal import Decimal
from financial.input import (
FinancialDataInput,
FinancialStatisticsInput,
NullFinancialDataInput,
NullFinancialStatisticsInput,
)
from financial.model import FinancialData, db
class FinancialDataInputValidationService:
def __init__(self, request_args):
self.validation_errors = []
self.financial_data = self.validate_and_parse_financial_data_input(request_args)
def validate_and_parse_financial_data_input(
self, request_args
) -> FinancialDataInput | NullFinancialDataInput:
# default start_date is 14 days ago
start_date = request_args.get(
"start_date", (datetime.now() + timedelta(days=-14)).strftime("%Y-%m-%d")
)
# default end_date is today
end_date = request_args.get("end_date", datetime.now().strftime("%Y-%m-%d"))
for field_name, date in (("start_date", start_date), ("end_date", end_date)):
try:
datetime.strptime(date, "%Y-%m-%d")
except ValueError:
self.validation_errors.append(f"{field_name} is not a valid date")
return NullFinancialDataInput()
start_date = datetime.strptime(start_date, "%Y-%m-%d").date()
end_date = datetime.strptime(end_date, "%Y-%m-%d").date()
if start_date > end_date:
self.validation_errors.append("start_date is after end_date")
return NullFinancialDataInput()
# use "IBM" as default symbol
symbol = request_args.get("symbol", "IBM")
if symbol not in ["IBM", "AAPL"]:
self.validation_errors.append("symbol is not valid")
return NullFinancialDataInput()
limit = request_args.get("limit", "5")
# Use 1 as default page number. Page 1 is the first page.
page = request_args.get("page", "1")
for field_name, value in [("limit", limit), ("page", page)]:
try:
int(value)
except ValueError:
self.validation_errors.append(f"{field_name} is not a valid integer")
return NullFinancialDataInput()
return FinancialDataInput(
start_date=start_date,
end_date=end_date,
symbol=symbol,
limit=int(limit),
page=int(page),
)
class FinancialStatisticsInputValidationService:
def __init__(self, request_args):
self.validation_errors = []
self.financial_statistics = self.validate_and_parse_financial_statistics_input(
request_args
)
def validate_and_parse_financial_statistics_input(
self, request_args
) -> FinancialStatisticsInput | NullFinancialStatisticsInput:
# check if all required fields are present
for required_field in ("start_date", "end_date", "symbol"):
if required_field not in request_args:
self.validation_errors.append(f"{required_field} is required")
return NullFinancialStatisticsInput()
start_date = request_args.get("start_date")
end_date = request_args.get("end_date")
for field_name, date in (("start_date", start_date), ("end_date", end_date)):
try:
datetime.strptime(date, "%Y-%m-%d")
except ValueError:
self.validation_errors.append(f"{field_name} is not a valid date")
return NullFinancialStatisticsInput()
start_date = datetime.strptime(start_date, "%Y-%m-%d").date()
end_date = datetime.strptime(end_date, "%Y-%m-%d").date()
if start_date > end_date:
self.validation_errors.append("start_date is after end_date")
return NullFinancialStatisticsInput()
symbol = request_args.get("symbol")
# symbol only allows IBM and AAPL
if symbol not in ("IBM", "AAPL"):
self.validation_errors.append("symbol is not valid")
return NullFinancialStatisticsInput()
return FinancialStatisticsInput(
start_date=start_date, end_date=end_date, symbol=symbol
)
class GetFinancialDataService:
"""Service to get financial data from database"""
def __init__(self, financial_data_input: FinancialDataInput):
self.financial_data_input = financial_data_input
self.financial_data_output = []
self.pagination = {}
def get_financial_data(self) -> None:
financial_data = db.session.scalars(
db.select(FinancialData)
.where(
FinancialData.symbol == self.financial_data_input.symbol,
FinancialData.date >= self.financial_data_input.start_date,
FinancialData.date <= self.financial_data_input.end_date,
)
.order_by(FinancialData.date)
).all()
self.format_pagination(len(financial_data))
self.format_financial_data(financial_data)
def format_financial_data(self, financial_data: list[FinancialData]) -> None:
start_index = (
self.financial_data_input.page - 1
) * self.financial_data_input.limit
end_index = start_index + self.financial_data_input.limit
self.financial_data_output = [
{
"symbol": row.symbol,
"date": row.date.strftime("%Y-%m-%d"),
"open_price": row.open_price,
"close_price": row.close_price,
"volume": row.volume,
}
for row in financial_data[start_index:end_index]
]
def format_pagination(self, total_length: int) -> None:
# page starts at 1
self.pagination = {
"total": total_length,
"limit": self.financial_data_input.limit,
"page": self.financial_data_input.page,
"pages": math.ceil(total_length / self.financial_data_input.limit),
}
class CalculateFinancialStatisticsService:
"""Service to get financial data from database and calculate financial statistics"""
def __init__(self, financial_statistics_input: FinancialStatisticsInput):
self.financial_statistics_input = financial_statistics_input
self.financial_statistics_output = {}
def calculate_financial_statistics(self) -> None:
financial_data = db.session.scalars(
db.select(FinancialData).where(
FinancialData.symbol == self.financial_statistics_input.symbol,
FinancialData.date >= self.financial_statistics_input.start_date,
FinancialData.date <= self.financial_statistics_input.end_date,
)
).all()
self.format_financial_statistics(financial_data)
def format_financial_statistics(self, financial_data: list[FinancialData]) -> None:
self.financial_statistics_output = {
"symbol": self.financial_statistics_input.symbol,
"start_date": self.financial_statistics_input.start_date.strftime(
"%Y-%m-%d"
),
"end_date": self.financial_statistics_input.end_date.strftime("%Y-%m-%d"),
"average_daily_open_price": str(
self.calculate_average_daily_open_price(financial_data)
),
"average_daily_close_price": str(
self.calculate_average_daily_close_price(financial_data)
),
"average_daily_volume": str(
self.calculate_average_daily_volume(financial_data)
),
}
def calculate_average_daily_volume(
self, financial_data: list[FinancialData]
) -> Decimal:
"""Calculate average daily volume. Round to nearest integer"""
return round(sum(row.volume for row in financial_data) / len(financial_data))
def calculate_average_daily_open_price(
self, financial_data: list[FinancialData]
) -> Decimal:
"""Calculate average daily open price. Round to 2 decimal places"""
return round(
(sum(row.open_price for row in financial_data) / len(financial_data)), 2
)
def calculate_average_daily_close_price(
self, financial_data: list[FinancialData]
) -> Decimal:
"""Calculate average daily close price. Round to 2 decimal places"""
return round(
(sum(row.close_price for row in financial_data) / len(financial_data)), 2
)
|
pevenc12/python_assignment
|
financial/services.py
|
services.py
|
py
| 8,468 |
python
|
en
|
code
| null |
github-code
|
6
|
21480418170
|
from collections import namedtuple
from functools import partial
from itertools import count, groupby, zip_longest
import bpy
import numpy as np
import re
from .log import log, logd
from .helpers import (
ensure_iterable,
get_context,
get_data_collection,
get_layers_recursive,
load_property,
reshape,
save_property,
select_only,
swap_names,
titlecase,
)
logs = partial(log, category="SAVE")
custom_prop_pattern = re.compile(r'(.+)?\["([^"]+)"\]')
prop_pattern = re.compile(r'(?:(.+)\.)?([^"\.]+)')
class GRET_OT_property_warning(bpy.types.Operator):
"""Changes won't be saved"""
bl_idname = 'gret.property_warning'
bl_label = "Not Overridable"
bl_options = {'INTERNAL'}
def draw_warning_if_not_overridable(layout, bid, data_path):
"""Adds a warning to a layout if the requested property is not available or not overridable."""
if bid and bid.override_library:
try:
if not bid.is_property_overridable_library(data_path):
layout.operator(GRET_OT_property_warning.bl_idname,
icon='ERROR', text="", emboss=False, depress=True)
return True
except TypeError:
pass
return False
class PropertyWrapper(namedtuple('PropertyWrapper', 'struct prop_name is_custom')):
"""Provides read/write access to a property given its data path."""
__slots__ = ()
@classmethod
def from_path(cls, struct, data_path):
# To set a property given a data path it's necessary to split the struct and attribute name.
# `struct.path_resolve(path, False)` returns a bpy_prop, and bpy_prop.data holds the struct.
# Unfortunately it knows but doesn't expose the attribute name (see `bpy_prop.__str__`)
# It's also necessary to determine if it's a custom property, the interface is different.
# Just parse the data path with a regular expression instead.
try:
prop_match = custom_prop_pattern.fullmatch(data_path)
if prop_match:
if prop_match[1]:
struct = struct.path_resolve(prop_match[1])
prop_name = prop_match[2]
if prop_name not in struct:
return None
return cls(struct, prop_name, True)
prop_match = prop_pattern.fullmatch(data_path)
if prop_match:
if prop_match[1]:
struct = struct.path_resolve(prop_match[1])
prop_name = prop_match[2]
if not hasattr(struct, prop_name):
return None
return cls(struct, prop_name, False)
except ValueError:
return None
@property
def data_path(self):
return f'["{self.prop_name}"]' if self.is_custom else self.prop_name
@property
def title(self):
if self.is_custom:
return titlecase(self.prop_name) # Custom property name should be descriptive enough
else:
return f"{getattr(self.struct, 'name', self.struct.bl_rna.name)} {titlecase(self.prop_name)}"
@property
def default_value(self):
if self.is_custom:
return self.struct.id_properties_ui(self.prop_name).as_dict()['default']
else:
prop = self.struct.bl_rna.properties[self.prop_name]
if getattr(prop, 'is_array', False):
return reshape(prop.default_array, prop.array_dimensions)
return getattr(prop, 'default', None)
@property
def value(self):
if self.is_custom:
return self.struct[self.prop_name]
else:
return save_property(self.struct, self.prop_name)
@value.setter
def value(self, new_value):
if self.is_custom:
self.struct[self.prop_name] = new_value
else:
load_property(self.struct, self.prop_name, new_value)
class PropOp(namedtuple('PropOp', 'prop_wrapper value')):
__slots__ = ()
def __new__(cls, struct, data_path, value=None):
prop_wrapper = PropertyWrapper.from_path(struct, data_path)
if not prop_wrapper:
raise RuntimeError(f"Couldn't resolve {data_path}")
saved_value = prop_wrapper.value
if value is not None:
prop_wrapper.value = value
return super().__new__(cls, prop_wrapper, saved_value)
def revert(self, context):
self.prop_wrapper.value = self.value
class PropForeachOp(namedtuple('PropForeachOp', 'collection prop_name values')):
__slots__ = ()
def __new__(cls, collection, prop_name, value=None):
assert isinstance(collection, bpy.types.bpy_prop_collection)
if len(collection) == 0:
# Can't investigate array type if there are no elements (would do nothing anyway)
return super().__new__(cls, collection, prop_name, np.empty(0))
prop = collection[0].bl_rna.properties[prop_name]
element_type = type(prop.default)
num_elements = len(collection) * prop.array_length
saved_values = np.empty(num_elements, dtype=element_type)
collection.foreach_get(prop_name, saved_values)
if value is not None:
values = np.full(num_elements, value, dtype=element_type)
collection.foreach_set(prop_name, values)
return super().__new__(cls, collection, prop_name, saved_values)
def revert(self, context):
if self.values.size > 0:
self.collection.foreach_set(self.prop_name, self.values)
class CallOp(namedtuple('CallOp', 'func args kwargs')):
__slots__ = ()
def __new__(cls, func, *args, **kwargs):
assert callable(func)
return super().__new__(cls, func, args, kwargs)
def revert(self, context):
self.func(*self.args, **self.kwargs)
class SelectionOp(namedtuple('SelectionOp', 'selected_objects active_object collection_hide '
'layer_hide object_hide')):
__slots__ = ()
def __new__(cls, context):
return super().__new__(cls,
selected_objects=context.selected_objects[:],
active_object=context.view_layer.objects.active,
collection_hide=[(cl, cl.hide_select, cl.hide_viewport, cl.hide_render)
for cl in bpy.data.collections],
layer_hide=[(layer, layer.hide_viewport, layer.exclude)
for layer in get_layers_recursive(context.view_layer.layer_collection)],
object_hide=[(obj, obj.hide_select, obj.hide_viewport, obj.hide_render)
for obj in bpy.data.objects])
def revert(self, context):
for collection, hide_select, hide_viewport, hide_render in self.collection_hide:
try:
collection.hide_select = hide_select
collection.hide_viewport = hide_viewport
collection.hide_render = hide_render
except ReferenceError:
pass
for layer, hide_viewport, exclude in self.layer_hide:
try:
layer.hide_viewport = hide_viewport
layer.exclude = exclude
except ReferenceError:
pass
for obj, hide_select, hide_viewport, hide_render in self.object_hide:
try:
obj.hide_select = hide_select
obj.hide_viewport = hide_viewport
obj.hide_render = hide_render
except ReferenceError:
pass
select_only(context, self.selected_objects)
try:
context.view_layer.objects.active = self.active_object
except ReferenceError:
pass
class CollectionOp(namedtuple('CollectionOp', 'collection remove_func_name items is_whitelist')):
__slots__ = ()
def __new__(cls, collection, items=None):
assert isinstance(collection, bpy.types.bpy_prop_collection)
# Find out if there's a remove-like function available
for func_name in ('remove', 'unlink', ''):
func = collection.bl_rna.functions.get(func_name)
if (func is not None
and sum(param.is_required for param in func.parameters) == 1
and func.parameters[0].type == 'POINTER'):
break
if not func_name:
raise RuntimeError(f"'{collection.bl_rna.name}' is not supported")
if items is None:
# On reverting, remove all but the current items
return super().__new__(cls, collection, func_name, set(collection), True)
else:
# On reverting, remove the specified items
return super().__new__(cls, collection, func_name, set(items), False)
def revert(self, context):
# Allow passing in object names instead of object references
# Compare types, don't use `isinstance` as that will throw on removed objects
items = set(self.collection.get(el) if type(el) == str else el for el in self.items)
items.discard(None)
remove_func = getattr(self.collection, self.remove_func_name)
if self.is_whitelist:
# Remove items not in the set
for item in set(self.collection) - items:
logs("Removing", item)
remove_func(item)
else:
# Remove items in the set
for item in items:
try:
logs("Removing", item)
remove_func(item)
except ReferenceError:
pass
class RenameOp(namedtuple('RenameOp', 'bid name other_bid')):
__slots__ = ()
def __new__(cls, bid, name, start_num=0, name_format="{name}{num}"):
data_collection = get_data_collection(bid)
if data_collection is None:
raise RuntimeError(f"Type {type(bid).__name__} is not supported")
saved_name = bid.name
bid.tag = True # Not strictly necessary, tagging allows custom naming format to work
for num in count(start=start_num):
new_name = name if (num == start_num) else name_format.format(name=name, num=num)
other_bid = data_collection.get(new_name)
if not other_bid or bid == other_bid:
bid.name = new_name
return super().__new__(cls, bid, saved_name, None)
elif other_bid and not other_bid.tag:
swap_names(bid, other_bid)
return super().__new__(cls, bid, saved_name, other_bid)
def revert(self, context):
if self.other_bid:
try:
swap_names(self.bid, self.other_bid)
except ReferenceError:
pass
self.bid.name = self.name # Ensure the name is reverted if swap_names failed
self.bid.tag = False
class SaveState:
"""Similar to an undo stack. See SaveContext for example usage."""
def __init__(self, context, name, refresh=False):
self.context = context
self.name = name
self.refresh = refresh
self.operations = []
def revert(self):
while self.operations:
self._pop_op()
if self.refresh:
# Might be necessary in some cases where context.scene.view_layers.update() is not enough
self.context.scene.frame_set(self.context.scene.frame_current)
def _push_op(self, op_cls, *args, **kwargs):
try:
self.operations.append(op_cls(*args, **kwargs))
logs("Push", self.operations[-1], max_len=90)
except Exception as e:
logs(f"Error pushing {op_cls.__name__}: {e}")
def _pop_op(self):
op = self.operations.pop()
try:
logs("Pop", op, max_len=90)
op.revert(self.context)
except Exception as e:
logs(f"Error reverting {op.__class__.__name__}: {e}")
def prop(self, struct, data_paths, values=[None]):
"""Save the specified properties and optionally assign new values."""
if isinstance(data_paths, str):
data_paths = data_paths.split()
if not isinstance(values, list):
values = [values]
if len(values) != 1 and len(values) != len(data_paths):
raise ValueError("Expected either a single value or as many values as data paths")
for data_path, value in zip_longest(data_paths, values, fillvalue=values[0]):
self._push_op(PropOp, struct, data_path, value)
def prop_foreach(self, collection, prop_name, value=None):
"""Save the specified property for all elements in the collection."""
self._push_op(PropForeachOp, collection, prop_name, value)
def selection(self):
"""Save the current object selection."""
self._push_op(SelectionOp, self.context)
def temporary(self, collection, items):
"""Mark one or more items for deletion."""
self._push_op(CollectionOp, collection, ensure_iterable(items))
def temporary_bids(self, bids):
"""Mark one or more IDs for deletion."""
for bid_type, bids in groupby(ensure_iterable(bids), key=lambda bid: type(bid)):
if bid_type is not type(None):
self._push_op(CollectionOp, get_data_collection(bid_type), bids)
def keep_temporary_bids(self, bids):
"""Keep IDs that were previously marked for deletion."""
bids = set(ensure_iterable(bids))
for op in reversed(self.operations):
if isinstance(op, CollectionOp) and not op.is_whitelist:
op.items.difference_update(bids)
def collection(self, collection):
"""Remember the current contents of a collection. Any items created later will be removed."""
self._push_op(CollectionOp, collection)
def viewports(self, header_text=None, show_overlays=None, **kwargs):
"""Save and override 3D viewport settings."""
for area in self.context.screen.areas:
if area.type == 'VIEW_3D':
# Don't think there's a way to find out the current header text, reset on reverting
self._push_op(CallOp, area.header_text_set, None)
area.header_text_set(header_text)
for space in area.spaces:
if space.type == 'VIEW_3D':
if show_overlays is not None:
self._push_op(PropOp, space.overlay, 'show_overlays', show_overlays)
for field_name, field_value in kwargs.items():
self._push_op(PropOp, space.shading, field_name, field_value)
def rename(self, bid, name):
"""Save the IDs current name and give it a new name."""
self._push_op(RenameOp, bid, name)
def clone_obj(self, obj, to_mesh=False, parent=None, reset_origin=False):
"""Clones or converts an object. Returns a new, visible scene object with unique data."""
if to_mesh:
dg = self.context.evaluated_depsgraph_get()
new_data = bpy.data.meshes.new_from_object(obj, preserve_all_data_layers=True, depsgraph=dg)
self.temporary_bids(new_data)
new_obj = bpy.data.objects.new(obj.name + "_", new_data)
self.temporary_bids(new_obj)
else:
new_data = obj.data.copy()
self.temporary_bids(new_data)
new_obj = obj.copy()
self.temporary_bids(new_obj)
new_obj.name = obj.name + "_"
new_obj.data = new_data
assert new_data.users == 1
if obj.type == 'MESH':
# Move object materials to mesh
for mat_index, mat_slot in enumerate(obj.material_slots):
if mat_slot.link == 'OBJECT':
new_data.materials[mat_index] = mat_slot.material
new_obj.material_slots[mat_index].link = 'DATA'
# New objects are moved to the scene collection, ensuring they're visible
self.context.scene.collection.objects.link(new_obj)
new_obj.hide_set(False)
new_obj.hide_viewport = False
new_obj.hide_render = False
new_obj.hide_select = False
new_obj.parent = parent
if reset_origin:
new_data.transform(new_obj.matrix_world)
bpy.ops.object.origin_set(get_context(new_obj), type='ORIGIN_GEOMETRY', center='MEDIAN')
else:
new_obj.matrix_world = obj.matrix_world
return new_obj
class SaveContext:
"""
Saves state of various things and keeps track of temporary objects.
When leaving scope, operations are reverted in the order they were applied.
Example usage:
with SaveContext(bpy.context, "test") as save:
save.prop_foreach(bpy.context.scene.objects, 'location')
bpy.context.active_object.location = (1, 1, 1)
"""
def __init__(self, *args, **kwargs):
self.save = SaveState(*args, **kwargs)
def __enter__(self):
return self.save
def __exit__(self, exc_type, exc_value, traceback):
self.save.revert()
class StateMachineBaseState:
def __init__(self, owner):
self.owner = owner
def on_enter(self):
pass
def on_exit(self):
pass
class StateMachineMixin:
"""Simple state machine."""
state_stack = None
state_events_on_reentry = True
@property
def state(self):
return self.state_stack[-1] if self.state_stack else None
def pop_state(self, *args, **kwargs):
if self.state:
self.state_stack.pop().on_exit(*args, **kwargs)
if self.state_events_on_reentry and self.state:
self.state.on_enter()
def push_state(self, state_class, *args, **kwargs):
assert state_class
new_state = state_class(self)
if self.state_events_on_reentry and self.state:
self.state.on_exit()
if self.state_stack is None:
self.state_stack = []
self.state_stack.append(new_state)
if new_state:
new_state.on_enter(*args, **kwargs)
class DrawHooksMixin:
space_type = bpy.types.SpaceView3D
draw_post_pixel_handler = None
draw_post_view_handler = None
def hook(self, context):
if not self.draw_post_pixel_handler and hasattr(self, "on_draw_post_pixel"):
self.draw_post_pixel_handler = self.space_type.draw_handler_add(self.on_draw_post_pixel,
(context,), 'WINDOW', 'POST_PIXEL')
if not self.draw_post_view_handler and hasattr(self, "on_draw_post_view"):
self.draw_post_pixel_handler = self.space_type.draw_handler_add(self.on_draw_post_view,
(context,), 'WINDOW', 'POST_VIEW')
def unhook(self):
if self.draw_post_pixel_handler:
self.space_type.draw_handler_remove(self.draw_post_pixel_handler, 'WINDOW')
self.draw_post_pixel_handler = None
if self.draw_post_view_handler:
self.space_type.draw_handler_remove(self.draw_post_view_handler, 'WINDOW')
self.draw_post_view_handler = None
def show_window(width=0.5, height=0.5):
"""Open a window at the cursor. Size can be pixels or a fraction of the main window size."""
# Hack from https://blender.stackexchange.com/questions/81974
with SaveContext(bpy.context, "show_window") as save:
render = bpy.context.scene.render
prefs = bpy.context.preferences
main_window = bpy.context.window_manager.windows[0]
save.prop(prefs, 'is_dirty view.render_display_type')
save.prop(render, 'resolution_x resolution_y resolution_percentage')
render.resolution_x = int(main_window.width * width) if width <= 1.0 else int(width)
render.resolution_y = int(main_window.height * height) if height <= 1.0 else int(height)
render.resolution_percentage = 100
prefs.view.render_display_type = 'WINDOW'
bpy.ops.render.view_show('INVOKE_DEFAULT')
return bpy.context.window_manager.windows[-1]
def show_text_window(text, title, width=0.5, height=0.5, font_size=16):
"""Open a window at the cursor displaying the given text."""
# Open a render preview window, then modify it to show a text editor instead
window = show_window(width, height)
area = window.screen.areas[0]
area.type = 'TEXT_EDITOR'
space = area.spaces[0]
assert isinstance(space, bpy.types.SpaceTextEditor)
# Make a temporary text
string = text
text = bpy.data.texts.get(title) or bpy.data.texts.new(name=title)
text.use_fake_user = False
text.from_string(string)
text.cursor_set(0)
# Minimal interface
if font_size is not None:
space.font_size = font_size
space.show_line_highlight = True
space.show_line_numbers = False
space.show_margin = False
space.show_region_footer = False
space.show_region_header = False
space.show_region_ui = False
space.show_syntax_highlight = False
space.show_word_wrap = True
space.text = text
def register(settings, prefs):
bpy.utils.register_class(GRET_OT_property_warning)
def unregister():
bpy.utils.unregister_class(GRET_OT_property_warning)
|
greisane/gret
|
operator.py
|
operator.py
|
py
| 21,651 |
python
|
en
|
code
| 298 |
github-code
|
6
|
12984152626
|
#7. Dadas dos listas enlazadas simples ya creadas (PTR1 y PTR2) ordenadas
#ascendentemente, hacer un algoritmo que cree una tercera lista PTR3
#ordenada descendentemente con los elementos comunes de las dos listas.
from List import List,Node
PTR1 = List(list=[1,3,5,7,9,10])
PTR2 = List(list=[1,2,4,5,6,6,7,8,9,11])
PTR3 = List()
i = PTR1.first
while i:
if PTR2.count(i.data) > 0:
PTR3.append(i.data)
i = i.next
PTR3.reverse()
PTR3.show()
|
WaffleLovesCherries/ActividadListasEnlazadas
|
ClassList/Ejercicio7.py
|
Ejercicio7.py
|
py
| 466 |
python
|
es
|
code
| 0 |
github-code
|
6
|
10422164463
|
from __future__ import annotations
import traceback
from PySide6 import QtWidgets
from randovania.games.prime2.patcher.claris_randomizer import ClarisRandomizerExportError
def create_box_for_exception(val: Exception) -> QtWidgets.QMessageBox:
box = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Critical,
"An exception was raised",
(
f"An unhandled Exception occurred:\n{val}\n\n"
"When reporting, make sure to paste the entire contents of the following box."
"\nIt has already be copied to your clipboard."
),
QtWidgets.QMessageBox.Ok,
)
from randovania.gui.lib import common_qt_lib
common_qt_lib.set_default_window_icon(box)
detailed_exception = "".join(traceback.format_exception(val))
if isinstance(val, ClarisRandomizerExportError):
detailed_exception += "\n\n"
detailed_exception += val.detailed_text()
box.setDetailedText(detailed_exception)
common_qt_lib.set_clipboard(detailed_exception)
# Expand the detailed text
for button in box.buttons():
if box.buttonRole(button) == QtWidgets.QMessageBox.ActionRole:
button.click()
break
box_layout: QtWidgets.QGridLayout = box.layout()
box_layout.addItem(
QtWidgets.QSpacerItem(600, 0, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding),
box_layout.rowCount(),
0,
1,
box_layout.columnCount(),
)
return box
|
randovania/randovania
|
randovania/gui/lib/error_message_box.py
|
error_message_box.py
|
py
| 1,513 |
python
|
en
|
code
| 165 |
github-code
|
6
|
32713874308
|
import scrapy
class KistaSpider(scrapy.Spider):
name = "kista"
def start_requests(self):
urls = ['https://www.hemnet.se/bostader?location_ids%5B%5D=473377&item_types%5B%5D=bostadsratt',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
yield {
'sold': response.css("span.result-type-toggle__sold-count::text").re(r'\d+'),
'for_sell': response.css("span.result-type-toggle__for-sale-count::text").re(r'\d+')
}
|
theone4ever/hemnet
|
hemnet/spiders/kista_bostadsratt_spider.py
|
kista_bostadsratt_spider.py
|
py
| 547 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1482920507
|
# 从爬虫生成的Excel表格中读取数据并生成词云图
import os
import sys
import PIL
import jieba
import openpyxl
import wordcloud
import configparser
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import Counter
from multiprocessing import Pool
# 定义一些参数,参数的详细介绍见GitHub上的readme.md
config_file = 'config/config.ini'
config_Section_Name = 'GC_DEFAULT' # 要读取的配置页名
stop_Word = ['!', '!', ':', '*', ',', ',', '?','《','》',
'。', ' ', '的', '了', '是', '啊', '吗', '吧','这','你','我','他','就'] # 停用词表
def read_Danmu(workbook_Name, sheet_Name): # 从Excel表中读取数据
try:
workbook = openpyxl.load_workbook(workbook_Name)
worksheet = workbook[sheet_Name] # 当然也可以通过索引读sheet,为了可读性选择用名称
data = worksheet.iter_rows(values_only=1)
return data
#若报错,则返回空迭代器
except openpyxl.utils.exceptions.InvalidFileException:
print(f"输入文件的路径或格式错误,请打开{config_file}文件重新配置路径\n")
return iter(())
except KeyError:
print(f"工作表页名错误,请检查Sheet的名字和{config_file}中是否一致\n")
return iter(())
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print(f"发生错误: {exc_type} - {exc_value}")
return iter(())
def cut_words(row):
try:
# 每行第一列是弹幕,第二列是出现次数
sentence = row[0]
count = row[1]
# 运用jieba 进行分词,将结果储存在Counter中,再将其中词语的出现次数翻count倍
words = jieba.lcut(sentence)
# 去除停用词表中的词
cut_Words = pd.Series(words)
cut_Words = cut_Words[~cut_Words.isin(stop_Word)]
# 将分词存入计数器中
new_Counter = Counter(cut_Words.tolist())
for item in new_Counter:
new_Counter[item] *= count # 弹幕中词语出现数 = 弹幕出现次数*弹幕中词语出现次数
return new_Counter
except TypeError:
return Counter() #遇见异常输入的情况,返回空计数器。
def generate_Word_Cloud(counter): # 生成词云图
try:
if not counter: # 如果计数器对象为空,则给出提示并退出函数
return "输入的词频为空!"
img = PIL.Image.open(pic_Path).convert('RGBA') # 解决灰度图像ERROR
pic = np.array(img)
image_colors = wordcloud.ImageColorGenerator(pic)
word_Cloud = wordcloud.WordCloud(
font_path=font_Path, mask=pic, width=WC_Width, height=WC_Height, mode="RGBA", background_color='white')
word_Cloud.generate_from_frequencies(counter)
plt.imshow(word_Cloud.recolor(color_func=image_colors),
interpolation='bilinear')
word_Cloud.to_file(output_Path)
plt.axis('off')
plt.show()
return f"词云图生成完成,请前往{output_Path}查看"
except FileNotFoundError : #pic_Path 或 font_Path错误的情况
return f"图片或字体路径错误,请前往{config_file}核查。"
except TypeError or ValueError : #WC_Width 或WC_Height类型或数组错误的情况
return f"图片的Height与Width设置有误,请前往{config_file}核查。"
except PIL.UnidentifiedImageError :
return f"不支持该类型的图片,请修改图片路径。"
except Exception as e:
return f"生成词云图时发生错误:{e}"
def main():
rows = read_Danmu(workbook_Name, sheet_Name)
word_counts = Counter()
# 利用线程池优化分词速度,在生成所有弹幕的词云图是能节省时间
with Pool() as pool:
cut_words_results = pool.map(cut_words, rows)
for result in cut_words_results:
word_counts.update(result)
print(generate_Word_Cloud(word_counts))
if __name__ == "__main__":
# 读取参数的配置
config = configparser.ConfigParser()
if not os.path.exists(config_file):
print(f"配置文件 {config_file} 不存在!")
exit(1)
config.read(config_file)
workbook_Name = config.get(config_Section_Name, 'workbook_name',
fallback='output/Top_20_danmu.xlsx') # 要读取的Excel表的名称,默认为crawler.py生成的文件
# 要读取的Excel表的页的名称,可从['Top 20', '所有弹幕']中选择
sheet_Name = config.get(config_Section_Name, 'sheet_Name', fallback='所有弹幕')
WC_Width = config.getint(
config_Section_Name, 'WC_Width', fallback=1200) # 词云图的宽度
WC_Height = config.getint(
config_Section_Name, 'WC_Height', fallback=1200) # 词云图的高度
font_Path = config.get(config_Section_Name, 'font_Path',
fallback="config/msyh.ttc") # 字体存储路径
pic_Path = config.get(config_Section_Name, 'pic_Path',
fallback="config/m.png") # 词云背景图路径
output_Path = config.get(
config_Section_Name, 'output_Path', fallback="output/word_could.png")
main()
|
AyaGuang/bilibili-Danmu-Crawler
|
102101430/generate_Cloud.py
|
generate_Cloud.py
|
py
| 5,425 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
37731445163
|
import math
from visual import*
import Image
dx=-50
dy=15
#Green comment block was originally user input for drop position, etc.,
#but was commented out, just not deleted.
"""
dx=-20
dy=input("please input the drop y position....recommand 10 or higher")
w=input("if you know the bounce height press '1', if you dont press '2' :")
if w==1:
bh=input("enter the 1st bounce height")
res=sqrt(bh/dy)##the equation for resitution
else:
res=.89
else :
print("try the balls we know!" )
type1=input("press '1' for b-ball, '2' for tennis ball")
if type1==1:#b ball
res=0.83#restitution
elif type1==2:#t ball
res=0.72
else: #no friction ball. no restitution
res=1
"""
im = Image.open('tennisball.jpg')
tex = materials.texture(data=im, mapping='spherical')
im2 = Image.open('BasketballColor.jpg')
tex2 = materials.texture(data=im2, mapping='spherical')
im3 = Image.open('golfball.jpg')
tex3 = materials.texture(data=im3, mapping='spherical')
floor = box(pos=(0,0,0), length=100, height=0.5, width=2, color= (1,1,1), material=materials.wood) #width=5, #edit
floor1 = box(pos=(0,0,-5), length=100, height=0.5, width=2, color= (1,1,1), material=materials.wood)
floor2 = box(pos=(0,0,5), length=100, height=0.5, width=2, color= (1,1,1), material=materials.wood)
floor3 = box(pos=(0,0,-10), length=100, height=0.5, width=2, color= (1,1,1), material=materials.wood)
pball = sphere(pos=(dx,dy,-10), radius=1.5, color=color.red, make_trail=True, material=materials.emissive) #edit
ball = sphere(pos=(dx,dy,0), radius=1, material=tex, make_trail=True, color=color.green)
ball2 = sphere(pos=(dx,dy,-5), radius=2, material=tex2, make_trail=True, color=color.orange) #the starting point of the ball .. add z value to make it 3d
ball3 = sphere(pos=(dx,dy,5), radius=.5, material=tex3, make_trail=True)
pball.velocity = vector(4,0)
ball.velocity = vector(4,0)
ball2.velocity = vector(4,0)
ball3.velocity = vector(4,0)#(4,0,0) for 3d motion
gravity = 9.81
dt = 0.01 #delta time frame of the ball
while true:
rate(100) #rate of speed
pball.pos = pball.pos + pball.velocity*dt #ball.pos += velocity*dt
ball.pos = ball.pos + ball.velocity*dt #ball.pos += velocity*dt
ball2.pos = ball2.pos + ball2.velocity*dt #ball.pos += velocity*dt
ball3.pos = ball3.pos + ball3.velocity*dt #ball.pos += velocity*dt
if pball.y < .5:
pball.velocity.y = abs(pball.velocity.y)
else:
pball.velocity.y = pball.velocity.y - gravity*dt
if ball.y < .5:
ball.velocity.y = abs(ball.velocity.y*0.905)
else:
ball.velocity.y = ball.velocity.y - gravity*dt
if ball2.y < .5:
ball2.velocity.y = abs(ball2.velocity.y*0.79)
else:
ball2.velocity.y = ball2.velocity.y - gravity*dt
if ball3.y < .5:
ball3.velocity.y = abs(ball3.velocity.y*0.78)
else:
ball3.velocity.y = ball3.velocity.y - gravity*dt
"""NOTE:
All lines are original, except for the if-else statements, which are edited, unless otherwise stated.
"""
|
emayer2/Projects
|
Python Simulation/Ball Bounce.py
|
Ball Bounce.py
|
py
| 3,190 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72474001467
|
import random
import numpy as np
from math import sqrt, log
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
x1_list = []
x2_list = []
y_list = []
counter = 0
def drawFunc(minX, minY, maxX, maxY, ax = None):
#fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
#ax.set_xlabel('x1')
#ax.set_ylabel('x2')
#ax.set_zlabel('f(x1,x2)')
x1_array = np.arange(minX, maxX, 0.1)
x2_array = np.arange(minY, maxY, 0.1)
x1_array, x2_array = fill_arrays(x1_array, x2_array)
R = fill_z(x1_array, x2_array)
x1_array = np.arange(minX, maxX, 0.1)
x2_array = np.arange(minY, maxY, 0.1)
x1_array, x2_array = np.meshgrid(x1_array, x2_array)
#R = f(x1_array, x2_array)
#drawBoder(ax, x1_array, g1_1)
#drawBoder(ax, x1_array, g2_1)
#drawBoder(ax, x1_array, g3_1)
#drawBoder(ax, x1_array, g4_1)
#print(R)
ax.plot_surface(x1_array, x2_array, R, alpha = 0.6)
#plt.show()
def fill_arrays(x, y):
final_y = []
final_x = []
for i in range(len(y)):
final_y.append([])
for j in range(len(x)):
if (barier(x[j], y[i])):
#if f(x[j], y[i]) > 50:
#print("i =", i, "j =", j)
#print("x =", x[j], "y =", y[i], "f =", f(x[j], y[i]))
final_y[i].append(x[j])
else: final_y[i].append(0)
for i in range(len(x)):
final_x.append([])
for j in range(len(y)):
if (barier(x[j], y[i])):
final_x[i].append(y[j])
else: final_x[i].append(0)
#for i in range(len(final_x)):
# print(i,")", final_x[i])
return final_y, final_x
def fill_z(x, y):
z = []
for i in range(len(x)):
z.append([])
for j in range(len(x[i])):
if (x[i][j] != 0 and y[j][i] != 0):
z[i].append(f(x[i][j], y[j][i]))
else: z[i].append(0.0)
#print("i =", i, "j =", j)
#print("x =", x[i][j], "y =", y[j][i], "z =", z[i][j])
#for i in range(len(z)):
# print(i,")", z[i])
r = np.array(z)
#for i in range(len(z)):
# r.__add__(np.array[z[i]])
return r
def fill_F2(x, y):
z = []
for i in range(len(x)):
z.append([])
for j in range(len(x[i])):
if (barier(x[i][j], y[i][j])):
z[i].append(f(x[i][j], y[i][j]))
else: z[i].append(0.0)
r = np.array(z)
#for i in range(len(z)):
# r.__add__(np.array[z[i]])
#print(r)
return r
def g1_1(x1):
return (-3*x1 + 6) / 2
def g2_1(x1):
return (-x1 - 3) / (-1)
def g3_1(x1):
return (x1 - 7) / (-1)
def g4_1(x1):
return (2*x1 - 4) / 3
def drawBoder(ax, x1, g):
zs = np.arange(0, 80, 35)
X, Z = np.meshgrid(x1, zs)
Y = g(X)
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, alpha = 0.4)
def show(x1_list, x2_list):
N = int(x1_list.__len__())
if (N <= 0):
return
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('f(x1,x2)')
#x1_array = np.arange(min(x1_list) - 0.1, max(x1_list) + 0.1, 0.1)
#x2_array = np.arange(min(x2_list) - 0.1, max(x2_list) + 0.1, 0.1)
#x1_array, x2_array = np.meshgrid(x1_array, x2_array)
#R = f(x1_array, x2_array)
#ax.plot_surface(x1_array, x2_array, R, color='b', alpha=0.5)
drawFunc(0, 0, 5, 5, ax)
x1_list2 = []
x2_list2 = []
f_list = []
ax.scatter(x1_list[0], x2_list[0], f(x1_list[0], x2_list[0]), c='black')
x1_list2.append(x1_list[0])
x2_list2.append(x2_list[0])
f_list.append(f(x1_list[0], x2_list[0]))
for n in range(1, N - 1):
ax.scatter(x1_list[n], x2_list[n], f(x1_list[n], x2_list[n]), c='red')
x1_list2.append(x1_list[n])
x2_list2.append(x2_list[n])
f_list.append(f(x1_list[n], x2_list[n]))
ax.scatter(x1_list[N - 1], x2_list[N - 1], f(x1_list[N - 1], x2_list[N - 1]), c='green')
x1_list2.append(x1_list[N - 1])
x2_list2.append(x2_list[N - 1])
f_list.append(f(x1_list[N - 1], x2_list[n]))
ax.plot(x1_list2, x2_list2, f_list, color="black")
plt.show()
# <---------- f
def f(x1, x2):
return (x1-6)**2 +(x2-7)**2
def f_x1(x1, x2):
return 2*x1 - 12
def f_x2(x1, x2):
return 2*x2 - 14
# -------------->
# <---------- gi
def g1(x1, x2):
return -3*x1 - 2*x2 + 6
def g2(x1, x2):
return -x1 + x2 - 3
def g3(x1, x2):
return x1 + x2 - 7
def g4(x1, x2):
return 2*x1 - 3*x2 - 4
# -------------->
# <---------- gi_bool
def g1_bool(x1, x2):
return -3*x1 - 2*x2 + 6 <= 0
def g2_bool(x1, x2):
return -x1 + x2 - 3 <= 0
def g3_bool(x1, x2):
return x1 + x2 - 7 <= 0
def g4_bool(x1, x2):
return 2*x1 - 3*x2 - 4 <= 0
def barier(x1, x2):
return (g1_bool(x1, x2) and g2_bool(x1, x2) and g3_bool(x1, x2) and g4_bool(x1, x2))
# -------------->
# <---------- X
def F(x1, x2, r):
return f(x1,x2) + P(x1, x2, r)
def F_x1(x1, x2, r):
return f_x1(x1, x2) + P_x1(x1, x2, r)
def F_x2(x1, x2, r):
return f_x2(x1, x2) + P_x2(x1, x2, r)
# -------------->
# <-------------- P
def P(x1, x2, r):
sum = 1/g1(x1, x2) + 1/g2(x1, x2) + 1/g3(x1, x2) + 1/g4(x1, x2)
return -r*sum
def P_x1(x1, x2, r):
sum = 3/(g1(x1, x2)**2) + 1/(g2(x1, x2)**2) - 1/(g3(x1, x2)**2) - 1/(g4(x1, x2)**2)
return -r*sum
def P_x2(x1, x2, r):
sum = 2/(g1(x1, x2)**2) - 1/(g2(x1, x2)**2) - 1/(g3(x1, x2)**2) + 3/(g4(x1, x2)**2)
return -r*sum
# ------------>
def gradient(x1, x2, r):
i = F_x1(x1, x2, r)
j = F_x2(x1, x2, r)
return [i, j]
def module_of_gradient(grad):
i = 0; j = 1
return sqrt(grad[i]**2 + grad[j]**2)
def method_of_gradient_descent_with_a_constant_step(x1, x2, e, M, r):
global counter
k = 0
counter += 1
x1_next = x1
x2_next = x2
while True:
counter += 2
grad = gradient(x1, x2, r)
module_grad = module_of_gradient(grad)
if ((module_grad < e) and (k >= M)):
return (x1_next, x2_next)
gamma = 0.1
x1_next = x1 - gamma * grad[0]
x2_next = x2 - gamma * grad[1]
counter += 2
while (F(x1_next, x2_next, r) - F(x1, x2, r) >= 0 or not barier(x1_next, x2_next)):
gamma /= 4
x1_next = x1 - gamma * grad[0]
x2_next = x2 - gamma * grad[1]
counter += 1
#print(grad, 'x1 =', x1, 'x2 =', x2, 'x1_next =', x1_next, 'x2_next =', x2_next, 'gamma =', gamma)
x1_list.append(x1); x2_list.append(x2)
if ((sqrt(abs(x1_next - x1)**2 + abs(x2_next - x2)**2) <= e)
& (abs(F(x1_next, x2_next, r) - F(x1, x2, r)) <= e)):
return (x1_next, x2_next)
x1 = x1_next
x2 = x2_next
k += 1
def barrier_function_method(x1, x2, r, C, e, M, k):
min_x1, min_x2 = method_of_gradient_descent_with_a_constant_step(x1, x2, e, M, r)
#print("x1 =", min_x1, "x2 =", min_x2)
fine = P(min_x1, min_x2, r)
#print("fine =", fine)
if (abs(fine) <= e):
return [(round(min_x1, round_num),
round(min_x2, round_num),
round(f(min_x1, min_x2), round_num)),
k]
k += 1
r = r/C
return barrier_function_method(min_x1, min_x2, r, C, e, M, k)
round_num = 4
x1 = 2.5
x2 = 1
e = 0.0001
M = 100
r = 1
c = 10
k = 0
result = barrier_function_method(x1, x2, r, c, e, M, k)
print(f"Barrier function method: {result[0]}; count of iteractions = {result[1]}")
print('Count of compute function =', counter + 1)
show(x1_list, x2_list)
#drawFunc(0, 0, 5, 5)
|
AlexSmirno/Learning
|
6 Семестр/Оптимизация/Lab_6_grad.py
|
Lab_6_grad.py
|
py
| 7,739 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5838127346
|
from datetime import datetime
from maico.sensor.stream import Confluence
from maico.sensor.targets.human import Human
from maico.sensor.targets.human_feature import MoveStatistics
from maico.sensor.targets.first_action import FirstActionFeature
import maico.sensor.streams.human_stream as hs
class OneToManyStream(Confluence):
KINECT_FPS = 30
FRAMES_FOR_MOVE = 15
MOVES_FOR_STAT = 4
def __init__(self, human_stream):
self._observation_begin = None
# hyper parameters (it will be arguments in future)
self.move_threshold = 0.1 # above this speed, human act to move (not searching items)
self.move_stream = hs.MoveStream(human_stream, self.FRAMES_FOR_MOVE, self.KINECT_FPS, self.move_threshold)
self.move_stat_stream = hs.MoveStatisticsStream(self.move_stream, self.MOVES_FOR_STAT)
super(OneToManyStream, self).__init__(human_stream, self.move_stat_stream)
def notify(self, target):
key = target.__class__
if key is Human:
self._pool[key] = [target] # store only 1 (latest) human
if self._observation_begin is None:
self._observation_begin = datetime.utcnow() # remember first human
else:
if key not in self._pool:
self._pool[key] = []
self._pool[key].append(target)
if self.is_activated():
t = self.merge()
self.out_stream.push(t)
self.reset()
def is_activated(self):
hs = self.get(Human)
stats = self.get(MoveStatistics)
if len(hs) == 1 and len(stats) == 1:
return True
else:
return False
def merge(self):
h = self.get(Human)[0]
stat = self.get(MoveStatistics)[0]
staying_time = (datetime.utcnow() - self._observation_begin).total_seconds()
feature = FirstActionFeature(
_id=h._id,
staying_time=staying_time,
mean_moving_rate=stat.moving_time.sum_ / stat.seconds.sum_,
max_moving_rate=stat.moving_time.max_ / stat.seconds.mean_,
min_moving_rate=stat.moving_time.min_ / stat.seconds.mean_,
mean_moving_speed=stat.moving_speed.mean_
)
return feature
|
tech-sketch/maico
|
maico/sensor/streams/one_to_many_stream.py
|
one_to_many_stream.py
|
py
| 2,289 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74432928827
|
"""
This file is part of Candela.
Candela is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Candela is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Candela. If not, see <http://www.gnu.org/licenses/>.
"""
import curses
import sys
import signal
import threading
import textwrap
import platform
import constants
class Shell():
"""
The main Candela class
Controls the shell by taking control of the current terminal window.
Performs input and output to the user
"""
def __init__(self, scriptfile=None):
"""
Create an instance of a Shell
This call takes over the current terminal by calling curses.initscr()
Sets global shell state, including size information, menus, stickers,
the header, and the prompt.
Kwargs:
scriptfile - the name of the script file to run. If not None and the
file exists, the script will be immediately run.
"""
self._register_sigint_handler()
self.script_lines = self._parse_script_file(scriptfile)
self.script_counter = 0
self.scriptfile = ""
self.stdscr = curses.initscr()
self.stdscr.keypad(1)
self.platform = self._get_platform()
# holds the backlog of shell output
self.backbuffer = []
self.height,self.width = self.stdscr.getmaxyx()
# the list of menus in the shell app
self.menus = []
# the currently visible stickers in the app
self.stickers = []
# should the command menu be shown
self.should_show_help = True
# for commands with only positional args, show the
# name of the next argument as the user types
self.should_show_hint = False
# dictionary of functions to call on key events
# keys are chars representing the pressed keys
self.keyevent_hooks = {}
# the text to stick in the upper left corner of the window
self.header = ""
self._header_bottom = 0
self._header_right = 0
self._header_right_margin = 50
self.prompt = "> "
def _parse_script_file(self, filename):
"""
Open a file if it exists and return its contents as a list of lines
Args:
filename - the file to attempt to open
"""
self.scriptfile = filename
try:
f = open(filename, 'r')
script_lines = f.readlines()
script_lines = [a.strip('\n') for a in script_lines]
f.close()
except Exception as e:
return
return script_lines
def runscript(self, scriptfile):
"""
Set up the global shell state necessary to run a script from a file
Args:
scriptfile - the string name of the file containing the script.
paths are relative to system cwd
"""
self.script_lines = self._parse_script_file(scriptfile)
self.script_counter = 0
def get_helpstring(self):
"""
Get the help string for the current menu.
This string contains a preformatted list of commands and their
descriptions from the current menu.
"""
_menu = self.get_menu()
if not _menu:
return
helpstring = "\n\n" + _menu.title + "\n" + "-"*20 + "\n" + _menu.options()
return helpstring
def sticker(self, output, new_output="", pos=None):
"""
Place, change, or remove a sticker from the shell window.
Candela has the concept of a sticker - a small block of text that
is "stuck" to the window. They can be used to convey persistent
information to the shell user.
If only output is specified, this creates a new sticker with the string
output. If output and new_output are specified, and there is an existing
sticker whose text is the same as output, this will replace that
sticker's text with new_output.
Args:
output - The text of the sticker to manipulate
Kwargs:
new_output - The text that will replace the text of the chosen sticker
pos - The (y, x) tuple indicating where to place the sticker
"""
if len(self.stickers) > 0:
sort = sorted(self.stickers, key=lambda x: x[1][0], reverse=True)
ht = sort[0][1][0]+1
else:
ht = 3
pos = pos or (ht, self.width - 20)
match = None
for text,_pos in self.stickers:
if output == text:
match = (text,_pos)
break
if match:
self.remove_sticker(match[0])
sticker = (new_output or output, match[1] if match else pos)
self.stickers.append(sticker)
self._update_screen()
def remove_sticker(self, text):
"""
Remove the sticker with the given text from the window
Args:
text - The text of the sticker to remove
"""
self.stickers = [a for a in self.stickers if a[0] != text]
def _print_stickers(self):
"""
Print all current stickers at the appropriate positions
"""
for text,pos in self.stickers:
_y,_x = pos
if _x + len(text) > self.width:
_x = self.width - len(text) - 1
self.stdscr.addstr(_y, _x, text)
def _print_header(self):
"""
Print the header in the appropriate position
"""
ht = 0
for line in self.header.split("\n"):
self.stdscr.addstr(ht, 0, line + (" "*self._header_right_margin))
if len(line) > self._header_right:
self._header_right = len(line)
ht += 1
self.stdscr.addstr(ht, 0, " "*(self._header_right+self._header_right_margin))
self._header_bottom = ht
self.mt_width = self._header_right + 49
def clear(self):
"""
Remove all scrollback text from the window
"""
backbuffer = list(self.backbuffer)
printstring = "\n"
for i in range(self.height):
self.put(printstring)
def _print_backbuffer(self):
"""
Print the previously printed output above the current command line.
candela.shell.Shell stores previously printed commands and output
in a backbuffer. Like a normal shell, it handles printing these lines
in reverse order to allow the user to see their past work.
"""
rev = list(self.backbuffer)
rev.reverse()
for i, tup in zip(range(len(rev)), rev):
string, iscommand = tup
ypos = self.height-2-i
if ypos > 0:
printstring = string
if iscommand:
printstring = "%s%s" % (self.prompt, string)
self.stdscr.addstr(ypos,0,printstring)
def _print_help(self):
"""
Print the menu help box for the current menu
"""
_helpstring = self.get_helpstring()
if not _helpstring:
return
helpstrings = [" %s" % a for a in _helpstring.split("\n")]
ht = 0
longest = len(max(helpstrings, key=len))
_x = self._header_right + self._header_right_margin
if _x + longest > self.width:
_x = self.width - longest - 1
for line in helpstrings:
self.stdscr.addstr(ht, _x, line + " "*15)
ht += 1
def put(self, output, command=False):
"""
Print the output string on the bottom line of the shell window
Also pushes the backbuffer up the screen by the number of lines
in output.
Args:
output - The string to print. May contain newlines
Kwargs:
command - False if the string was not a user-entered command,
True otherwise (users of Candela should always use False)
"""
self._update_screen()
if not output:
return
output = str(output)
_x,_y = (self.height-1, 0)
lines = []
for line in output.split('\n'):
if len(line) > self.width - 3:
for line in textwrap.wrap(line, self.width-3):
lines.append(line)
else:
lines.append(line)
for line in lines:
# put the line
self.stdscr.addstr(_x, _y, line)
# add it to backbuffer
backbuf_string = line
to_append = (backbuf_string, command)
if line != self.prompt:
index = 0
if len(self.backbuffer) >= 200:
index = 1
self.backbuffer = self.backbuffer[index:] + [to_append]
def _input(self, prompt):
"""
Handle user input on the shell window.
Works similarly to python's raw_input().
Takes a prompt and returns the raw string entered before the return key
by the user.
The input is returned withnewlines stripped.
Args:
prompt - The text to display prompting the user to enter text
"""
self.put(prompt)
keyin = ''
buff = ''
hist_counter = 1
while keyin != 10:
keyin = self.stdscr.getch()
_y,_x = self.stdscr.getyx()
index = _x - len(self.prompt)
#self.stdscr.addstr(20, 70, str(keyin)) # for debugging
try:
if chr(keyin) in self.keyevent_hooks.keys():
cont = self.keyevent_hooks[chr(keyin)](chr(keyin), buff)
if cont == False:
continue
except:
pass
if keyin in [127, 263]: # backspaces
del_lo, del_hi = self._get_backspace_indices()
buff = buff[:index+del_lo] + buff[index+del_hi:]
self._redraw_buffer(buff)
self.stdscr.move(_y, max(_x+del_lo, len(self.prompt)))
elif keyin in [curses.KEY_UP, curses.KEY_DOWN]: # up and down arrows
hist_counter,buff = self._process_history_command(keyin, hist_counter)
elif keyin in [curses.KEY_LEFT, curses.KEY_RIGHT]: # left, right arrows
if keyin == curses.KEY_LEFT:
newx = max(_x - 1, len(self.prompt))
elif keyin == curses.KEY_RIGHT:
newx = min(_x + 1, len(buff) + len(self.prompt))
self.stdscr.move(_y, newx)
elif keyin == curses.KEY_F1: # F1
curses.endwin()
sys.exit()
elif keyin in [9]: # tab
choices = self._tabcomplete(buff)
if len(choices) == 1:
if len(buff.split()) == 1 and not buff.endswith(' '):
buff = choices[0]
else:
if len(buff.split()) != 1 and not buff.endswith(' '):
buff = ' '.join(buff.split()[:-1])
if buff.endswith(' '):
buff += choices[0]
else:
buff += ' ' + choices[0]
elif len(choices) > 1:
self.put(" ".join(choices))
elif len(choices) == 0:
pass
self._redraw_buffer(buff)
elif keyin >= 32 and keyin <= 126: # ascii input
buff = buff[:index-1] + chr(keyin) + buff[index-1:]
self._redraw_buffer(buff)
self.stdscr.move(_y, min(_x, len(buff) + len(self.prompt)))
if self.should_show_hint and keyin == 32:
command = self._get_command(buff)
if hasattr(command, 'definition') and '-' not in command.definition:
try:
nextarg = command.definition.split()[len(buff.split())]
self.stdscr.addstr(_y, _x+1, nextarg)
self.stdscr.move(_y, _x)
except:
pass
self.put(buff, command=True)
self.stdscr.refresh()
return buff
def _get_backspace_indices(self):
if self.platform == "Linux":
return (0, 1)
elif self.platform == "Darwin":
return (-len(self.prompt)-1, -len(self.prompt))
def _tabcomplete(self, buff):
"""
Get a list of possible completions for the current buffer
If the current buffer doesn't contain a valid command, see if the
buffer is a prefix of any valid commands. If so, return those as possible
completions. Otherwise, delegate the completion finding to the command object.
Args:
buff - The string buffer representing the current unfinished command input
Return:
A list of completion strings for the current token in the command
"""
menu = self.get_menu()
commands = []
if menu:
commands = menu.commands
output = []
if len(buff.split()) <= 1 and ' ' not in buff:
for command in commands:
if command.name.startswith(buff):
output.append(command.name)
for alias in command.aliases:
if alias.startswith(buff):
output.append(alias)
else:
command = self._get_command(buff)
if command:
output = command._tabcomplete(buff)
return output
def _get_command(self, buff):
"""
Get the command instance referenced by string in the current input buffer
Args:
buff - The string version of the current command input buffer
Return:
The Command instance corresponding to the buffer command
"""
menu = self.get_menu()
commands = []
if menu:
commands = menu.commands
if len(commands) == 0:
self.put("No commands found. Maybe you forgot to set self.menus or self.menu?")
self.put("Hint: use F1 to quit")
for command in commands:
if command.name == buff.split()[0] or buff.split()[0] in command.aliases:
return command
return None
def _redraw_buffer(self, buff):
"""
Clear the bottom line and re-print the given string on that line
Args:
buff - The line to print on the cleared bottom line
"""
self.stdscr.addstr(self.height-1, 0, " "*(self.width-3))
self.stdscr.addstr(self.height-1, 0, "%s%s" % (self.prompt, buff))
def _process_history_command(self, keyin, hist_counter):
"""
Get the next command from the backbuffer and return it
Also return the modified buffer counter.
Args:
keyin - The key just pressed
hist_counter - The current position in the backbuffer
"""
hist_commands = [(s,c) for s,c in self.backbuffer if c]
if not hist_commands:
return hist_counter, ""
buff = hist_commands[-hist_counter][0]
self.stdscr.addstr(self.height-1, 0, " "*(self.width-3))
self.stdscr.addstr(self.height-1, 0, "%s%s" % (self.prompt, buff))
if keyin == curses.KEY_UP and hist_counter < len(hist_commands):
hist_counter += 1
elif keyin == curses.KEY_DOWN and hist_counter > 0:
hist_counter -= 1
return hist_counter, buff
def _script_in(self):
"""
Substitute for _input used when reading from a script.
Returns the next command from the script being read.
"""
if not self.script_lines:
return None
if self.script_counter < len(self.script_lines):
command = self.script_lines[self.script_counter]
self.script_counter += 1
else:
command = None
return command
def main_loop(self):
"""
The main shell IO loop.
The sequence of events is as follows:
get an input command
split into tokens
find matching command
validate tokens for command
run command
This loop can be broken out of only with by a command returning
constants.CHOICE_QUIT or by pressing F1
"""
ret_choice = None
while ret_choice != constants.CHOICE_QUIT:
success = True
ret_choice = constants.CHOICE_INVALID
choice = self._script_in()
if choice:
self.put("%s%s" % (self.prompt, choice))
else:
choice = self._input(self.prompt)
tokens = choice.split()
if len(tokens) == 0:
self.put("\n")
continue
command = self._get_command(choice)
if not command:
self.put("Invalid command - no match")
continue
try:
args, kwargs = command.parse_command(tokens)
success, message = command.validate(*args, **kwargs)
if not success:
self.put(message)
else:
ret_choice = command.run(*args, **kwargs)
if ret_choice == constants.CHOICE_INVALID:
self.put("Invalid command")
else:
menus = [a.name for a in self.menus]
if str(ret_choice).lower() in menus:
self.menu = ret_choice.lower()
else:
self.put("New menu '%s' not found" % ret_choice.lower())
except Exception as e:
self.put(e)
return self
def get_menu(self):
"""
Get the current menu as a Menu
"""
if not self.menus: return
try:
return [a for a in self.menus if a.name == self.menu][0]
except:
return
def defer(self, func, args=(), kwargs={}, timeout_duration=10, default=None):
"""
Create a new thread, run func in the thread for a max of
timeout_duration seconds
This is useful for blocking operations that must be performed
after the next window refresh.
For example, if a command should set a sticker when it starts executing
and then clear that sticker when it's done, simply using the following
will not work:
def _run(*args, **kwargs):
self.sticker("Hello!")
# do things...
self.remove_sticker("Hello!")
This is because the sticker is both added and removed in the same
refresh loop of the window. Put another way, the sticker is added and
removed before the window gets redrawn.
defer() can be used to get around this by scheduling the sticker
to be removed shortly after the next window refresh, like so:
def _run(*args, **kwargs):
self.sticker("Hello!")
# do things...
def clear_sticker():
time.sleep(.1)
self.remove_sticker("Hello!")
self.defer(clear_sticker)
Args:
func - The callback function to run in the new thread
Kwargs:
args - The arguments to pass to the threaded function
kwargs - The keyword arguments to pass to the threaded function
timeout_duration - the amount of time in seconds to wait before
killing the thread
default - The value to return in case of a timeout
"""
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = default
def run(self):
self.result = func(*args, **kwargs)
it = InterruptableThread()
it.start()
it.join(timeout_duration)
if it.isAlive():
return it.result
else:
return it.result
def end(self):
"""
End the current Candela shell and safely shut down the curses session
"""
curses.endwin()
def _register_sigint_handler(self):
"""
Properly handle ^C and any other method of sending SIGINT.
This avoids leaving the user with a borked up terminal.
"""
def signal_handler(signal, frame):
self.end()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def _update_screen(self):
"""
Refresh the screen and redraw all elements in their appropriate positions
"""
self.height,self.width = self.stdscr.getmaxyx()
self.stdscr.clear()
self._print_backbuffer()
if self.width < self._header_right + 80 or self.height < self._header_bottom + 37:
pass
else:
self._print_header()
if self.should_show_help:
self._print_help()
self._print_stickers()
self.stdscr.refresh()
def _get_platform(self):
"""
Return the platform name. This is fine, but it's used in a hacky way to
get around a backspace-cooking behavior in Linux (at least Ubuntu)
"""
return platform.uname()[0]
|
emmettbutler/candela
|
candela/shell.py
|
shell.py
|
py
| 21,960 |
python
|
en
|
code
| 71 |
github-code
|
6
|
44782282173
|
import os
import LuckyDraw
import Quiz
import hangman
import time
def main():
def title():
clear()
print("\t\t_______Game Vault______\n\n")
def clear():
os.system('cls')
def delay():
time.sleep(1)
status = True
while(status!=False):
title()
print("The available games are:")
print("\n1.Hangman\n2.Luckydraw\n3.Quiz")
ch=int(input("\n\nEnter your selection..\n\t\t:"))
delay()
if (ch==1):
hangman.main()
elif (ch==2):
LuckyDraw.main()
else:
Quiz.main()
title()
c=input("Do you want to play another game?(y/n)\n\t\t:")
c=c.lower()
if c=='y':
status=True
else:
status=False
clear()
print("\t\tThank you for visiting!!!!")
delay()
if __name__ == "__main__":
main()
|
aswinachu02/Python-Projects
|
GameVault.py
|
GameVault.py
|
py
| 961 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20723844837
|
# Exercise 1 : Family
# 1. Create a class called Family and implement the following attributes:
# - members: list of dictionaries with the following keys : name, age, gender and is_child (boolean).
# - last_name : (string)
# Initial members data:
# [
# {'name':'Michael','age':35,'gender':'Male','is_child':False},
# {'name':'Sarah','age':32,'gender':'Female','is_child':False}
# ]
class Family() :
def __init__(self, last_name):
self.members = []
self.last_name = last_name
def member(self, name, age, gender, is_child):
member = {
'name': name,
'age': age,
'gender': gender,
'is_child': is_child
}
self.members.append(member)
# 2. Implement the following methods:
# - born: adds a child to the members list (use **kwargs), don’t forget to print a message congratulating the family.
def born(self, **kwargs):
self.child = {}
for key, value in kwargs.items():
self.child[key] = value
self.members.append(self.child)
if 'name' in self.child :
print(f"Congratulations to the {self.last_name} family on the birth of {self.child['name']}!")
elif 'gender' in self.child and self.child['gender'] == 'Male' :
print(f"Congratulations to the {self.last_name} family on the birth of their babyboy!")
elif 'gender' in self.child and self.child['gender'] == 'Female' :
print(f"Congratulations to the {self.last_name} family on the birth of their babygirl!")
else :
print(f"Congratulations to the {self.last_name} family on the birth of their child!")
# - is_18: takes the name of a family member as a parameter and returns True if they are over 18 and False if not.
def is_18(self, name) :
for member in self.members:
if 'name' in member and member['name'] == name:
return member['age'] >= 18
return False
# - family_presentation: a method that prints the family’s last name and all the members’ first name.
def family_presentation(self) :
print(f"The {self.last_name} family:")
name_list = []
for member in self.members:
if 'name' in member:
name_list.append(member['name'])
elif 'gender' in member :
if member['gender'] == 'Male' :
name_list.append('babyboy')
elif member['gender'] == 'Female' :
name_list.append('babygirl')
else :
name_list.append('baby')
else :
name_list.append('baby')
names = ', '.join(name_list)
print(names)
smiths = Family("Smith")
smiths.member('Michael', 35, 'Male', False)
smiths.member('Sarah', 32, 'Female', False)
for member in smiths.members:
print(member)
smiths.born(name='Emily', age=0, gender='Female')
smiths.born(age=0, gender='Female')
smiths.born(age=0, gender='Male')
smiths.born(age=0)
for member in smiths.members:
print(member)
print(smiths.is_18('Michael')) # Output: True
print(smiths.is_18('Sarah')) # Output: True
print(smiths.is_18('Emily')) # Output: False
smiths.family_presentation()
# print(smiths.members)
# Exercise 2 : TheIncredibles Family
# 1. Create a class called TheIncredibles. This class should inherit from the Family class:
# This is no random family they are an incredible family, therefore we need to add the following
# keys to our dictionaries: power and incredible_name.
# Initial members data:
# [
# {'name':'Michael','age':35,'gender':'Male','is_child':False,'power': 'fly','incredible_name':'MikeFly'},
# {'name':'Sarah','age':32,'gender':'Female','is_child':False,'power': 'read minds','incredible_name':'SuperWoman'}
# ]
class TheIncredibles(Family):
def __init__(self, last_name):
super().__init__(last_name)
# def incredible_member(self, name, age, gender, is_child, power, incredible_name):
# super().member(name, age, gender, is_child)
# incredible_member = {
# 'name': name,
# 'age': age,
# 'gender': gender,
# 'is_child': is_child,
# 'power' : power,
# 'incredible_name' : incredible_name
# }
# self.members.append(incredible_member)
def incredible_member(self, name, age, gender, is_child, power, incredible_name):
member = {
'name': name,
'age': age,
'gender': gender,
'is_child': is_child,
'power': power,
'incredible_name': incredible_name
}
self.members.append(member)
# 2. Add a method called use_power, this method should print the power of a member only if they are over 18 years old.
# If not raise an exception (look up exceptions) which stated they are not over 18 years old.
def use_power(self, name):
for member in self.members:
if member['name'] == name:
if member['age'] >= 18:
print(f"{member['name']} can use their power: {member['power']}")
else:
raise Exception(f"{member['name']} is not over 18 years old and cannot use their power.")
# 3. Add a method called incredible_presentation which :
# - Prints the family’s last name and all the members’ first name (ie. use the super() function,
# to call the family_presentation method)
# - Prints all the members’ incredible name and power.
def incredible_presentation(self):
super().family_presentation()
for member in self.members:
print(f"{member['incredible_name']} - Power: {member['power']}")
incredible_family = TheIncredibles("Incredible")
incredible_family.incredible_member('Michael', 35, 'Male', False, 'fly', 'MikeFly')
incredible_family.incredible_member('Sarah', 32, 'Female', False, 'read minds','SuperWoman')
# 4. Call the incredible_presentation method.
incredible_family.incredible_presentation()
# 5. Use the born method inherited from the Family class to add Baby Jack with the following power: “Unknown Power”.
incredible_family.born(name="Baby Jack", age=0, gender="Male", is_child=True, power="Unknown Power", incredible_name="BabyJack")
# 6. Call the incredible_presentation method again.
incredible_family.incredible_presentation()
incredible_family.use_power("Michael")
incredible_family.use_power("Sarah")
incredible_family.use_power("Baby Jack")
|
Alex-Rabaev/DI-Bootcamp
|
week 3/Day 2/ExercisesXP/W3D2_ExerciseXP_plus.py
|
W3D2_ExerciseXP_plus.py
|
py
| 6,662 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34836695873
|
#!/usr/bin/env python3
"""Tools to define Templates.
Templates are very similar to plugins, but use jinja to transform `.enbt` template files upon installation.
"""
__author__ = "Miguel Hernández-Cabronero"
__since__ = "2021/08/01"
import sys
import argparse
import inspect
import os
import glob
import shutil
import tempfile
import jinja2
import stat
from .installable import Installable, InstallableMeta
import enb.config
from enb.config import options
class MetaTemplate(InstallableMeta):
def __init__(cls, *args, **kwargs):
if cls.__name__ != "Template":
cls.tags.add("template")
super().__init__(*args, **kwargs)
class Template(Installable, metaclass=MetaTemplate):
"""
Base class to define templates. Subclasses must be defined in the __plugin__.py file of the template's
source dir.
- Templates copy the source dir's contents (except for __plugin__.py) and then transforms
any `*.enbt` file applying jinja and removing that extension.
- Templates may require so-called fields in order to produce output.
These fields can be automatically taken from enb.config.ini (e.g., file-based configuration),
passed as arguments to the template installation CLI, and programmatically.
- One or more templates can be installed into an existing directory, the __plugin__.py file is not written
by default to the installation dir.
"""
# Map of required field names to their corresponding help
required_fields_to_help = dict()
# Files in the template's source dir ending with templatable_extension
# are subject to jinja templating upon installation.
templatable_extension = ".enbt"
@classmethod
def get_fields(cls, original_fields=None):
try:
return cls._fields
except AttributeError:
# If there are required fields, satisfy them or fail
fields = dict(original_fields) if original_fields is not None else dict()
if cls.required_fields_to_help:
ini_cli_fields, unused_options = cls.get_field_parser().parse_known_args()
# Syntax is "plugin install <template> <installation>, so
# four non-parsed options are expected
assert len(unused_options) >= 4, (sys.argv, ini_cli_fields, unused_options)
unused_options = unused_options[4:]
for field_name in cls.required_fields_to_help:
if field_name not in fields:
try:
fields[field_name] = getattr(ini_cli_fields, field_name)
assert fields[field_name] is not None
except (KeyError, AssertionError) as ex:
raise SyntaxError(
f"Missing field {repr(field_name)}. Help for {field_name}:\n"
f"{cls.required_fields_to_help[field_name]}\n\n"
f"Invoke again with --{field_name}=\"your value\" or with -h for additional help.\n") from ex
if unused_options:
print(f"Warning: unused option{'s' if len(unused_options) > 1 else ''}. \n - ", end="")
print('\n - '.join(repr(o) for o in unused_options))
print(f"NOTE: You can use '' or \"\" to define fields with spaces in them.")
print()
cls._fields = fields
return fields
@classmethod
def install(cls, installation_dir, overwrite_destination=False, fields=None):
"""Install a template into the given dir. See super().install for more information.
:param installation_dir: directory where the contents of the template are placed.
It will be created if not existing.
:param overwrite_destination: if False, a SyntaxError is raised if any of the
destination contents existed prior to this call. Note that installation_dir
can already exist, it is the files and directories moved into it that can
trigger this SyntaxError.
:param fields: if not None, it must be a dict-like object containing a field to field value
mapping. If None, it is interpreted as an empty dictionary.
Required template fields not present in fields will be then read from the CLI
arguments. If those are not provided, then the default values read from `*.ini`
configuration files. If any required field cannot not satisfied after this,
a SyntaxError is raised.
"""
# If there are required fields, satisfy them or fail
fields = cls.get_fields(original_fields=fields)
template_src_dir = os.path.dirname(os.path.abspath(inspect.getfile(cls)))
for input_path in glob.glob(os.path.join(template_src_dir, "**", "*"), recursive=True):
if "__pycache__" in input_path:
continue
if os.path.basename(input_path) == "__plugin__.py":
continue
# By default, the original structure and file names are preserved.
output_path = os.path.abspath(input_path).replace(
os.path.abspath(template_src_dir),
os.path.abspath(installation_dir))
# Directories are created when found
if os.path.isdir(input_path):
os.makedirs(output_path, exist_ok=True)
continue
input_is_executable = os.access(input_path, os.X_OK)
# Files ending in '.enbt' will be identified as templates, processed and stripped of their extension.
is_templatable = os.path.isfile(input_path) \
and os.path.basename(input_path).endswith(cls.templatable_extension)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
if is_templatable:
with tempfile.NamedTemporaryFile(mode="w+") as templated_file:
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(input_path))),
autoescape=jinja2.select_autoescape())
template = jinja_env.get_template(os.path.basename(input_path))
templated_file.write(template.render(**fields))
templated_file.flush()
templated_file.seek(0)
if os.path.exists(output_path[:-len(cls.templatable_extension)]) and not options.force:
raise ValueError(
f"Error installing template {cls.name}: output file {repr(output_path)} already exists "
f"and options.force={options.force}. Run with -f to overwrite.")
with open(output_path[:-len(cls.templatable_extension)], "w") as output_file:
output_file.write(templated_file.read())
if input_is_executable:
os.chmod(output_path[:-len(cls.templatable_extension)],
os.stat(output_path[:-len(cls.templatable_extension)]).st_mode | stat.S_IEXEC)
else:
if os.path.exists(output_path) and not options.force:
raise ValueError(
f"Error installing template {cls.name}: output file {repr(output_path)} already exists "
f"and options.force={options.force}. Run with -f to overwrite.")
shutil.copy(input_path, output_path)
cls.build(installation_dir=installation_dir)
print(f"Template {repr(cls.name)} successfully installed into {repr(installation_dir)}.")
@classmethod
def get_field_parser(cls):
description = f"Template {repr(cls.name)} installation help."
if cls.required_fields_to_help:
description += f"\n\nFields are automatically read from the following paths (in this order):\n"
description += "\n".join(enb.config.ini.used_config_paths)
# defined_description = f"\n\nAlready refined fields:"
defined_field_lines = []
for field_name in sorted(cls.required_fields_to_help.keys()):
try:
defined_field_lines.append(f" {field_name} = {enb.config.ini.get_key('template', field_name)}")
except KeyError:
pass
if defined_field_lines:
description += f"\n\nFile-defined fields:\n"
description += "\n".join(defined_field_lines)
parser = argparse.ArgumentParser(
prog=f"enb plugin install {cls.name}",
description=description,
formatter_class=argparse.RawTextHelpFormatter)
required_flags_group = parser.add_argument_group(
"Required flags (use '' or \"\" quoting for fields with spaces)")
for field_name, field_help in cls.required_fields_to_help.items():
try:
default_field_value = enb.config.ini.get_key("template", field_name)
except KeyError:
default_field_value = None
if field_help[-1] != ".":
field_help += "."
required_flags_group.add_argument(
f"--{field_name}",
default=default_field_value,
help=field_help,
metavar=field_name)
# This argument is for showing help to the user only, since it will have already been parsed
# by enb.config.ini by the time this is called.
parser.add_argument(f"--ini", nargs="*", required=False, type=str,
help="Additional .ini paths with a [field] section containing field = value lines")
return parser
|
miguelinux314/experiment-notebook
|
enb/plugins/template.py
|
template.py
|
py
| 9,816 |
python
|
en
|
code
| 3 |
github-code
|
6
|
71484280508
|
N, A, B, C, D = map(int, input().split())
S = "#{}#".format(input())
def reachable(start, end):
now = start
while now <= end:
nex = now
while nex <= end and S[now] == S[nex]:
nex += 1
if S[now] == '#' and nex - now >= 2:
return False
now = nex
return True
if not reachable(A, C) or not reachable(B, D):
print("No")
quit()
if C > D:
can_over = False
for i in range(B, D+1):
if S[i-1] == S[i] == S[i+1] == ".":
can_over = True
if not can_over:
print("No")
quit()
print("Yes")
|
knuu/competitive-programming
|
atcoder/agc/agc034_a.py
|
agc034_a.py
|
py
| 604 |
python
|
en
|
code
| 1 |
github-code
|
6
|
1090949893
|
from keras.applications import resnet50
from keras.applications import mobilenetv2
from keras.applications import mobilenet
from keras.applications import vgg19
# from keras_squeezenet import SqueezeNet
import conv.networks.get_vgg16_cifar10 as gvc
import conv.networks.gen_conv_net as gcn
# import conv.networks.MobileNet as mobilenet
import conv.networks.MobileNet_for_mobile as mobilenet_for_mobile
import conv.networks.VGG19_for_mobile as vgg19_for_mobile
import conv.networks.SqueezeNet as sqn
import conv.networks.DenseNet as dn
import conv.networks.ResNet50 as rn50
from keras_applications.imagenet_utils import decode_predictions
from keras.preprocessing.image import img_to_array
from keras.applications import imagenet_utils
from keras.engine.input_layer import Input
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import optimizers
from keras.layers.core import Lambda
from keras import backend as K
from keras import regularizers
from keras.models import Model
from keras import optimizers
import keras
import numpy as np
from os import listdir
from os.path import isfile, join
import os
import matplotlib.image as mpimg
import time
# SqueezeNet: https://github.com/rcmalli/keras-squeezenet/blob/master/examples/example_keras_squeezenet.ipynb
# https://keras.io/applications/
def get_all_nets(network_name, include_top=True, num_filter=4):
if(network_name=="ResNet50"):
model = resnet50.ResNet101(weights='imagenet',
include_top=include_top, input_shape=(224, 224, 3))
# if(include_top==False):
# model.pop()
elif(network_name=="MobileNetV2"):
model = mobilenetv2.MobileNetV2(weights='imagenet',
include_top=include_top, input_shape=(224, 224, 3))
elif(network_name=="MobileNet"):
model = mobilenet.MobileNet(weights='imagenet',
include_top=include_top,# pooling='avg',
input_shape=(224, 224, 3))
elif(network_name=="MobileNet_for_mobile"):
model = mobilenet_for_mobile.MobileNet(
include_top=include_top, weights='imagenet',
input_shape=(224, 224, 3), num_filter=num_filter)
elif(network_name=="VGG19"):
model = vgg19.VGG19(weights='imagenet',
include_top=include_top, input_shape=(224, 224, 3))
elif(network_name=="VGG19_for_mobile"):
model = vgg19_for_mobile.VGG19(
include_top=include_top, weights='imagenet',
input_shape=(224, 224, 3), num_filter=num_filter)
elif(network_name=="SqueezeNet"):
model = SqueezeNet(weights='imagenet',
include_top=include_top, input_shape=(224, 224, 3))
# if(include_top==False):
# model.pop()
# model.pop()
# model.pop()
# model.pop()
if(include_top):
opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
def get_nets_wo_weights(network_name, num_classes, include_top=False,
input_shape=(32, 32, 3), num_filter=4, use_bias=False):
if(network_name=="ResNet50"):
model = rn50.ResNet50(include_top=include_top,
input_shape=input_shape, weights=None,
classes=num_classes, num_vert_filters=num_filter)
elif(network_name=="DenseNet121"):
model = dn.DenseNet121(include_top=include_top,
input_shape=input_shape, weights=None,
classes=num_classes, num_filter=num_filter)
elif(network_name=="MobileNetV2"):
model = mobilenetv2.MobileNetV2(include_top=include_top,
input_shape=input_shape, weights=None,
classes=num_classes)
elif(network_name=="MobileNet"):
model = mobilenet.MobileNet(
include_top=include_top, input_shape=input_shape, weights=None,
classes=num_classes, num_filter=num_filter)
elif(network_name=="MobileNet_for_mobile"):
model = mobilenet_for_mobile.MobileNet(
include_top=include_top, input_shape=input_shape, weights=None,
classes=num_classes, num_filter=num_filter)
elif(network_name=="VGG19"):
model = vgg19.VGG19(input_shape=input_shape,
include_top=include_top, weights=None,
classes=num_classes)
elif(network_name=="SqueezeNet"):
model = sqn.SqueezeNet(input_shape=input_shape,
include_top=include_top, weights=None, num_filter=num_filter,
use_bias=use_bias, classes=num_classes)
elif(network_name=="vgg"):
model = gvc.get_conv_vert_net(x_shape=input_shape,
num_classes=num_classes, num_vert_filters=num_filter,
use_bias=use_bias)
elif(network_name=="conv"):
model = gcn.get_conv_vert_net(input_shape=input_shape,
num_classes=num_classes,
num_extra_conv_layers=2, num_ver_filter=num_filter,
use_bias=use_bias)
if(include_top == False):
x = model.output
# x = keras.layers.GlobalAveragePooling2D()(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
# x = Activation('relu')(x)
x = Dropout(0.5)(x)
# x = Dense(num_output)(x)
# x = Activation('softmax')(x)
x = keras.layers.Dense(num_classes, activation='softmax',
use_bias=True, name='Logits')(x)
full_model = Model(inputs = model.input,outputs = x)
else:
full_model = model
opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
full_model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return full_model
def get_box_nets(network_name, num_classes, include_top=False,
input_shape=(32, 32, 3), num_filter=4, num_layer=4, use_bias=False):
if(network_name=="ResNet50"):
model = resnet50.ResNet50(include_top=include_top,
input_shape=input_shape, weights=None)
# if(include_top==False):
# model.pop()
elif(network_name=="DenseNet121"):
model = dn.DenseNet121(include_top=include_top,
input_shape=input_shape, weights=None,
classes=num_classes, num_filter=num_filter,
num_layer=num_layer)
elif(network_name=="MobileNetV2"):
model = mobilenetv2.MobileNetV2(include_top=include_top,
input_shape=input_shape, weights=None,
classes=num_classes)
elif(network_name=="MobileNet"):
model = mobilenet.MobileNet(
include_top=include_top, input_shape=input_shape, weights=None,
classes=num_classes, num_filter=num_filter, num_layers=num_layer)
elif(network_name=="MobileNet_for_mobile"):
model = mobilenet_for_mobile.MobileNet(
include_top=include_top, input_shape=input_shape, weights=None,
classes=num_classes, num_filter=num_filter)
elif(network_name=="VGG19"):
model = vgg19.VGG19(input_shape=input_shape,
include_top=include_top, weights=None,
classes=num_classes)
elif(network_name=="SqueezeNet"):
model = sqn.SqueezeNet(input_shape=input_shape,
include_top=include_top, weights=None, num_filter=num_filter,
use_bias=use_bias, classes=num_classes,
num_layers=num_layer)
elif(network_name=="vgg"):
model = gvc.get_conv_vert_net(x_shape=input_shape,
num_classes=num_classes, num_vert_filters=num_filter,
use_bias=use_bias)
elif(network_name=="conv"):
model = gcn.get_conv_vert_net(input_shape=input_shape,
num_classes=num_classes,
num_extra_conv_layers=num_layers, num_ver_filter=num_filter,
use_bias=use_bias)
opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
def preprocess_image(network_name, x):
if(network_name=="ResNet50"):
x = resnet50.preprocess_input(x)
elif(network_name=="MobileNetV2"):
x = mobilenetv2.preprocess_input(x)
elif(network_name=="MobileNet"):
x = mobilenet.preprocess_input(x)
elif(network_name=="VGG19"):
x = vgg19.preprocess_input(x)
elif(network_name=="SqueezeNet"):
x = imagenet_utils.preprocess_input(x)
return x
def preprocess_image_fn(network_name):
if(network_name=="ResNet50"):
x = resnet50.preprocess_input
elif(network_name=="MobileNetV2"):
x = mobilenetv2.preprocess_input
elif(network_name=="MobileNet"):
x = mobilenet.preprocess_input
elif(network_name=="VGG19"):
x = vgg19.preprocess_input
elif(network_name=="SqueezeNet"):
x = imagenet_utils.preprocess_input
return x
def decodepred(network_name, preds):
if(network_name=="ResNet50"):
preds = resnet50.decode_predictions(preds, top=3)[0]
elif(network_name=="MobileNetV2"):
preds = mobilenetv2.decode_predictions(preds, top=3)[0]
elif(network_name=="MobileNet"):
preds = mobilenet.decode_predictions(preds, top=3)[0]
elif(network_name=="VGG19"):
preds = vgg19.decode_predictions(preds, top=3)[0]
elif(network_name=="SqueezeNet"):
preds = imagenet_utils.decode_predictions(preds, top=3)[0]
return x
def analyse_model(model):
print("All functions ", dir(model))
print("Summary model ", model.summary())
print("Layer details ", dir(model.layers[2]))
for i, layer in enumerate(model.layers):
print("Length in each layer ", i, layer.name,
layer.input_shape, layer.output_shape,
len(layer.weights))
if(len(layer.weights)):
for j, weight in enumerate(layer.weights):
print("Weights ", j, weight.shape)
return
def add_classifier(base_model, num_output):
for layer in base_model.layers:
layer.trainable = False
x = base_model.output
x = keras.layers.GlobalAveragePooling2D()(x)
# x = Dense(16, kernel_regularizer=regularizers.l2(0.01))(x)
# x = Activation('relu')(x)
# x = Dropout(0.5)(x)
# x = Dense(num_output)(x)
# x = Activation('softmax')(x)
x = keras.layers.Dense(num_output, activation='softmax',
use_bias=True, name='Logits')(x)
model = Model(inputs = base_model.input,outputs = x)
# initiate RMSprop optimizer
opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='binary_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
def get_all_prediction(image_filelist):
prediction_list = []
for filename in image_filelist:
# img = image.load_img(os.path.join(imagenet_path, filename), target_size=(224, 224))
img = image.load_img(os.path.join(imagenet_path, filename), target_size=(227, 227)) # Squeezenet
# img1 = mpimg.imread(os.path.join(imagenet_path, filename))
# print(img1.shape)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = imagenet_utils.preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', filename, imagenet_utils.decode_predictions(preds, top=3)[0])
print("Pred values ", np.argmax(preds))
# Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]
prediction_list.append(preds)
return prediction_list
if __name__ == '__main__':
network_types_list = ["MobileNetV2"]#, "ResNet50", "MobileNetV2", "VGG19"] # , "SqueezeNet"
for network_type in network_types_list:
print("Network Type ", network_type)
model = get_all_nets(network_type, include_top=True)
analyse_model(model)
# model = get_all_nets(network_type, include_top=False)
# model = add_classifier(model)
imagenet_path = "/mnt/additional/aryan/imagenet_validation_data/ILSVRC2012_img_val/"
# http://www.image-net.org/challenges/LSVRC/2012/
# https://cv-tricks.com/tensorflow-tutorial/keras/
# Finding actual predictions
# http://machinelearninguru.com/deep_learning/data_preparation/hdf5/hdf5.html
image_filelist = [f for f in listdir(imagenet_path) if isfile(join(imagenet_path, f))]
print("Number of files ", len(image_filelist))
start_time = time.time()
get_all_prediction(image_filelist[:10])
total_time = time.time() - start_time
print("Total prediction time ", total_time)
print("File list ", image_filelist[:10])
|
nitthilan/ml_tutorials
|
conv/networks/get_all_imagenet.py
|
get_all_imagenet.py
|
py
| 11,760 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22618188640
|
# encoding: utf-8
# pylint: disable=redefined-outer-name,missing-docstring
import pytest
from tests import utils
from app import create_app
@pytest.yield_fixture(scope='session')
def flask_app():
app = create_app(flask_config='testing')
from app.extensions import db
with app.app_context():
db.create_all()
yield app
db.drop_all()
@pytest.yield_fixture()
def db(flask_app):
# pylint: disable=unused-argument,invalid-name
from app.extensions import db as db_instance
yield db_instance
db_instance.session.rollback()
@pytest.fixture(scope='session')
def flask_app_client(flask_app):
flask_app.test_client_class = utils.AutoAuthFlaskClient
flask_app.response_class = utils.JSONResponse
return flask_app.test_client()
@pytest.yield_fixture(scope='session')
def regular_user(flask_app):
# pylint: disable=invalid-name,unused-argument
from app.extensions import db
regular_user_instance = utils.generate_user_instance(
username='regular_user'
)
db.session.add(regular_user_instance)
db.session.commit()
yield regular_user_instance
db.session.delete(regular_user_instance)
db.session.commit()
@pytest.yield_fixture(scope='session')
def readonly_user(flask_app):
# pylint: disable=invalid-name,unused-argument
from app.extensions import db
readonly_user_instance = utils.generate_user_instance(
username='readonly_user',
is_readonly=True
)
db.session.add(readonly_user_instance)
db.session.commit()
yield readonly_user_instance
db.session.delete(readonly_user_instance)
db.session.commit()
@pytest.yield_fixture(scope='session')
def admin_user(flask_app):
# pylint: disable=invalid-name,unused-argument
from app.extensions import db
admin_user_instance = utils.generate_user_instance(
username='admin_user',
is_admin=True
)
db.session.add(admin_user_instance)
db.session.commit()
yield admin_user_instance
db.session.delete(admin_user_instance)
db.session.commit()
|
DurandA/pokemon-battle-api
|
tests/conftest.py
|
conftest.py
|
py
| 2,085 |
python
|
en
|
code
| 3 |
github-code
|
6
|
30793951295
|
'''Instead of giving some hard coded values and changing it later in the entire code which will be very time consuming and troublesome
we are going to create a class which will manage all the settings parameter so even if we have to change later we only need to make
changes in this file
'''
class settings:
def __init__(self) -> None:
#screen
self.width = 1200
self.height = 800
self.bg_color = ("cyan")
#ship
self.ship_speed_factor= 2.0
self.ship_limit = 3
#Bullets
self.bullet_speed_factor = 3
self.bullet_width = 5
self.bullet_height = 15
self.bullet_color = 25,25,112
self.bullets_allowed = 5
#alien
self.alien_speed = 0.5
self.fleet_drop_speed = 10
self.fleet_direction = 1
#Amount by which difficulty of game should be increased
self.speedup = 1.2
self.initialize_dynamic_settings()
self.alien_points = 50
#These are the initial settings of game
def initialize_dynamic_settings(self):
self.ship_speed_factor = 2.0
self.bullet_speed_factor = 2
self.alien_speed = 0.5
self.fleet_direction = 1
#This function is called when player completes certain level . It increases the difficulty of the game
def increase_speed(self):
self.ship_speed_factor *= self.speedup
self.bullet_speed_factor *= self.speedup
self.alien_speed *= self.speedup
self.alien_points *= self.speedup
print(self.alien_points)
|
shreyashkhurud123/Alien_Invasion_Python
|
Alien_Invasion/Alien_Invasion/settings.py
|
settings.py
|
py
| 1,628 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29534323943
|
from scipy.interpolate import Rbf # radial basis functions
import matplotlib.pyplot as plt
import numpy as np
x = [1555,1203,568,1098,397,564,1445,337,1658,1517,948]
y = [860,206,1097,425,594,614,553,917,693,469,306]
x = [0.9, 0.6, 0.1, 0.5, 0.04, 0.1, 0.82, 0.0, 1.0, 0.89, 0.46]
y = [0.73, 0.0, 1.0, 0.24, 0.43, 0.45, 0.38, 0.7, 0.54, 0.29, 0.11]
z = [1]*len(x)
rbf_adj = Rbf(x, y, z, function='gaussian')
x_fine = np.linspace(0, 1, 81)
y_fine = np.linspace(0, 1, 82)
x_grid, y_grid = np.meshgrid(x_fine, y_fine)
z_grid = rbf_adj(x_grid.ravel(), y_grid.ravel()).reshape(x_grid.shape)
plt.gca().invert_yaxis()
#plt.gca().invert_xaxis()
plt.pcolor(x_fine, y_fine, z_grid);
plt.plot(x, y, 'ok');
plt.xlabel('x'); plt.ylabel('y'); plt.colorbar();
plt.title('Heat Intensity Map');
plt.show()
|
twilly27/DatacomProject
|
Project/HeatMapping.py
|
HeatMapping.py
|
py
| 795 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12611135709
|
import pytest
from utils import *
from fireplace.exceptions import GameOver
LORD_JARAXXUS = "EX1_323"
LORD_JARAXXUS_HERO = "EX1_323h"
LORD_JARAXXUS_WEAPON = "EX1_323w"
INFERNO = "EX1_tk33"
INFERNO_TOKEN = "EX1_tk34"
def test_jaraxxus():
game = prepare_game(CardClass.WARRIOR, CardClass.WARRIOR)
game.player1.hero.power.use()
game.player1.give(LIGHTS_JUSTICE).play()
assert game.player1.weapon.id == LIGHTS_JUSTICE
game.end_turn()
game.end_turn()
assert game.player1.hero.health == 30
assert game.player1.hero.armor == 2
game.player1.give(LORD_JARAXXUS).play()
assert game.player1.hero.id == LORD_JARAXXUS_HERO
assert game.player1.weapon.id == LORD_JARAXXUS_WEAPON
assert game.player1.hero.health == 15
assert game.player1.hero.armor == 0
assert game.player1.hero.power.id == INFERNO
assert len(game.player1.field) == 0
game.end_turn()
game.end_turn()
game.player1.hero.power.use()
assert len(game.player1.field) == 1
assert game.player1.field[0].id == INFERNO_TOKEN
def test_jaraxxus_cult_master():
game = prepare_game()
game.player1.discard_hand()
game.player1.summon("EX1_595")
game.player1.give(LORD_JARAXXUS).play()
assert len(game.player1.field) == 1
assert not game.player1.hand
def test_jaraxxus_knife_juggler():
game = prepare_game()
juggler = game.player1.summon("NEW1_019")
game.player1.give(LORD_JARAXXUS).play()
assert game.player2.hero.health == 30
assert juggler.health == 2
def test_jaraxxus_molten_giant():
game = prepare_game()
jaraxxus = game.player1.give("EX1_323")
molten = game.player1.give("EX1_620")
jaraxxus.play()
assert game.player1.hero.health == 15
assert molten.cost == 20
def test_jaraxxus_mirror_entity():
game = prepare_game()
mirror = game.player1.give("EX1_294")
mirror.play()
game.end_turn()
jaraxxus = game.player2.give(LORD_JARAXXUS)
jaraxxus.play()
assert not game.player1.secrets
assert game.player2.hero.id == LORD_JARAXXUS_HERO
assert len(game.player1.field) == 1
assert game.player1.field[0].id == LORD_JARAXXUS
def test_jaraxxus_repentance():
game = prepare_game()
repentance = game.player1.give("EX1_379")
repentance.play()
game.end_turn()
jaraxxus = game.player2.give(LORD_JARAXXUS)
jaraxxus.play()
assert not game.player1.secrets
assert game.player2.hero.id == LORD_JARAXXUS_HERO
assert game.player2.hero.health == game.player2.hero.max_health == 1
def test_jaraxxus_snipe():
game = prepare_game()
snipe = game.player1.give("EX1_609")
snipe.play()
game.end_turn()
jaraxxus = game.player2.give(LORD_JARAXXUS)
jaraxxus.play()
assert len(game.player1.secrets) == 1
assert game.player2.hero.health == 15
def test_jaraxxus_sacred_trial():
game = prepare_game()
trial = game.player1.give("LOE_027")
trial.play()
game.end_turn()
game.player2.give(WISP).play()
game.player2.give(WISP).play()
game.player2.give(WISP).play()
jaraxxus = game.player2.give(LORD_JARAXXUS)
jaraxxus.play()
# Will not trigger as 4th minion due to timing
assert trial in game.player1.secrets
assert not game.player2.hero.dead
game.end_turn()
game.end_turn()
wisp4 = game.player2.summon(WISP)
assert not wisp4.dead
jaraxxus = game.player2.give(LORD_JARAXXUS)
with pytest.raises(GameOver):
jaraxxus.play()
assert trial not in game.player1.secrets
assert game.player2.hero.dead
|
jleclanche/fireplace
|
tests/test_jaraxxus.py
|
test_jaraxxus.py
|
py
| 3,302 |
python
|
en
|
code
| 645 |
github-code
|
6
|
44855982856
|
def stringToInt(s):
multiply = 1
if s[0] == '-':
multiply = -1
s = s[1:]
mul = len(s)-1
num = 0
for ch in s:
num = num + (10 ** mul) * (ord(ch)-48)
mul = mul - 1
num = num * multiply
return num
print(stringToInt("-0000045637560003330003"))
|
sandeepjoshi1910/Algorithms-and-Data-Structures
|
stoi.py
|
stoi.py
|
py
| 319 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10862974654
|
"""
Run the model end to end
"""
import argparse
import sys
import torch
from pathlib import Path
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from smallteacher.data import DataModule, train_augmentations
from smallteacher.models import FullySupervised, SemiSupervised
from smallteacher.constants import Metrics
from smallteacher.config import BEST_MODEL_NAME
from smallssd.data import LabelledData, UnlabelledData
from smallssd.config import DATAFOLDER_PATH
from smallssd.keys import CLASSNAME_TO_IDX
import mlflow
import mlflow.pytorch
def parse_args(args):
"""Parse the arguments."""
parser = argparse.ArgumentParser(
description="Simple training script for training a pytorch lightning model."
)
parser.add_argument(
"--model",
help="Chooses model architecture",
type=str,
default="FRCNN",
choices=["FRCNN", "RetinaNet", "SSD"],
)
parser.add_argument(
"--workers", help="Number of dataloader workers", type=int, default="1"
)
parser.add_argument(
"--mlflow_experiment", type=str, default="pytorch_lightning_experiment"
)
parser.add_argument("--seed", type=int, default="42")
return parser.parse_args(args)
def get_checkpoint(version: int) -> Path:
return list(
Path(f"lightning_logs/version_{version}/checkpoints").glob("best_model*.ckpt")
)[0]
def train_fully_supervised(datamodule, model_name) -> int:
model = FullySupervised(
model_base=model_name,
num_classes=len(CLASSNAME_TO_IDX),
)
fully_supervised_trainer = pl.Trainer(
callbacks=[
EarlyStopping(monitor=Metrics.MAP, mode="max", patience=10),
ModelCheckpoint(filename=BEST_MODEL_NAME, monitor=Metrics.MAP, mode="max"),
],
gpus=torch.cuda.device_count(),
)
fully_supervised_trainer.fit(model, datamodule=datamodule)
best_model = FullySupervised.load_from_checkpoint(
get_checkpoint(fully_supervised_trainer.logger.version),
model_base=model_name,
num_classes=len(CLASSNAME_TO_IDX),
)
fully_supervised_trainer.test(best_model, datamodule=datamodule)
return fully_supervised_trainer.logger.version
def train_teacher_student(datamodule, model_name, model_checkpoint) -> int:
unlabelled_ds = UnlabelledData(root=DATAFOLDER_PATH)
datamodule.add_unlabelled_training_dataset(unlabelled_ds)
org_model = FullySupervised.load_from_checkpoint(
model_checkpoint,
model_base=model_name,
num_classes=len(CLASSNAME_TO_IDX),
)
model = SemiSupervised(
trained_model=org_model.model,
model_base=model_name,
num_classes=len(CLASSNAME_TO_IDX),
)
trainer = pl.Trainer(
gpus=torch.cuda.device_count(),
callbacks=[
EarlyStopping(monitor=Metrics.MAP, mode="max", patience=10),
ModelCheckpoint(filename=BEST_MODEL_NAME, monitor=Metrics.MAP, mode="max"),
],
)
trainer.fit(model, datamodule=datamodule)
best_model = SemiSupervised.load_from_checkpoint(
get_checkpoint(trainer.logger.version),
model_base=model_name,
num_classes=len(CLASSNAME_TO_IDX),
)
trainer.test(best_model, datamodule=datamodule)
return best_model
def main(args=None):
if args is None:
args = sys.argv[1:]
args = parse_args(args)
mlflow.set_experiment(experiment_name=args.mlflow_experiment)
pl.seed_everything(args.seed)
datamodule = DataModule(
*LabelledData(root=DATAFOLDER_PATH, eval=False).split(
transforms=[train_augmentations, None]
),
test_dataset=LabelledData(root=DATAFOLDER_PATH, eval=True),
num_workers=args.workers,
)
mlflow.pytorch.autolog()
with mlflow.start_run(run_name=f"{args.model}_fully_supervised"):
version_id = train_fully_supervised(datamodule, args.model)
best_model_checkpoint = get_checkpoint(version_id)
with mlflow.start_run(run_name=f"{args.model}_teacher_student"):
best_model = train_teacher_student(
datamodule, args.model, best_model_checkpoint
)
mlflow.pytorch.log_model(best_model.model, artifact_path="model")
if __name__ == "__main__":
main()
|
SmallRobotCompany/smallteacher
|
smallssd/end_to_end.py
|
end_to_end.py
|
py
| 4,407 |
python
|
en
|
code
| 5 |
github-code
|
6
|
71060757628
|
import turtle
from math import sin, cos, pi
r = 200
inc = 2*pi/100
t = 0
n = 1.5
for i in range (100):
x1 = r * sin(t)
y1 = r * cos(t)
x2 = r * sin(t+n)
y2 = r * cos(t+n)
turtle.penup()
turtle.goto(x1, y1)
turtle.pendown()
turtle.goto(x2, y2)
t += inc
|
Camilotk/python-pooii
|
tutoriais/desenho.py
|
desenho.py
|
py
| 290 |
python
|
en
|
code
| 0 |
github-code
|
6
|
69809912829
|
import threading
from datetime import datetime
from time import sleep
from random import randint
from queue import Queue
# loops = [4,2]
# def loop(nloop,nsec):
# print('start loop',nloop,'at:',datetime.now())
# sleep(nsec)
# print('loop',nloop,'done at:',datetime.now())
# def main():
# print('starting at:',datetime.now())
# threads = []
# nloops = range(len(loops))
# for i in nloops:
# t = threading.Thread(target=loop,args=(i,loops[i]))
# threads.append(t)
# for i in nloops:
# threads[i].start()
# for i in nloops:
# threads[i].join()
# print('all DONE at:',datetime.now())
# class ThreadFunc(object):
# def __init__(self,func,args,name=''):
# self.name = name
# self.func = func
# self.args = args
# def __call__(self): # 可执行函数
# print(self.name)
# self.func(*self.args)
# def main():
# print('starting at:',datetime.now())
# threads = []
# nloops = range(len(loops))
# for i in nloops:
# t = threading.Thread(target=ThreadFunc(loop,(i,loops[i]),loop.__name__))
# threads.append(t)
# for i in nloops:
# threads[i].start()
# for i in nloops:
# threads[i].join()
# print('all DONE at:',datetime.now())
# class Student(object):
# def __init__(self,name,age):
# self.name = name
# self.age = age
# def __call__(self):
# print(self.name,self.age)
class MyThread(threading.Thread):
def __init__(self,func,args,name=''):
threading.Thread.__init__(self)
self.name = name
self.func = func
self.args = args
def run(self):
self.func(*self.args)
def writeQ(queue):
print('producing object for Q...')
queue.put('xxx',1)
print('size now',queue.qsize())
def readQ(queue):
val = queue.get(1)
print(val,'consumed object from Q ... size now',queue.qsize())
def writer(queue,loops):
for i in range(loops):
writeQ(queue)
sleep(randint(1,3))
def reader(queue,loops):
for i in range(loops):
readQ(queue)
sleep(randint(2,5))
funcs = [writer,reader]
nfuncs = range(len(funcs))
def main():
nloops = randint(2,5)
print('nloops',nloops)
q = Queue(32)
threads = []
for i in nfuncs:
t = MyThread(funcs[i],(q,nloops),funcs[i].__name__)
threads.append(t)
for i in nfuncs:
threads[i].start()
for i in nfuncs:
threads[i].join()
print('all DONE')
if __name__ == '__main__':
main()
# Student('wc',23)()
|
algebrachan/pythonStudy
|
py_by_myself/study_test/thread_test.py
|
thread_test.py
|
py
| 2,433 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2254678822
|
import urllib
from xml.dom import minidom
import re
def buildResponse(node_list):
return_string = ""
for i in node_list:
return_string = return_string + i + "\n"
return return_string.strip()
def buildURL(key, word):
return "http://www.dictionaryapi.com/api/v1/references/collegiate/xml/" + word + "?key=" + key
def getXML(word):
url = buildURL("1a276aec-1aa8-42d4-9575-d29c2d4fb105", word)
response = urllib.urlopen(url).read()
data = minidom.parseString(str(response))
return data
def getDefinition(word):
data = getXML(word)
itemlist = data.getElementsByTagName('def')
node_list = []
for i in itemlist:
dts = i.getElementsByTagName('dt')
node_list.append(str(dts[0].childNodes[0].nodeValue))
if len(node_list) < 3:
return buildResponse(node_list)
else:
return buildResponse(node_list[:3])
|
sarthfrey/Texty
|
dictionaryDef.py
|
dictionaryDef.py
|
py
| 819 |
python
|
en
|
code
| 9 |
github-code
|
6
|
35031034974
|
from app.models.player import *
import random
player1 = Player("PLayer 1")
player2 = Player("Player 2")
players = [player1, player2]
def one_player(name1):
player1.name = name1
player2.name = "Computer"
def add_players(name1, name2):
player1.name = name1
player2.name = name2
def random_move(self):
options = ["rock", "paper", "scissors"]
move = random.choice(options)
return move
def set_moves(move1, move2):
player1.move = move1
player2.move = move2
def result():
if player1.move == player2.move:
return "Oooh it's a draw"
elif (player1.move == "rock" and player2.move == "scissors"):
return players[0].name + " wins"
elif(player1.move == "scissors" and player2.move == "paper"):
return players[0].name + " wins"
elif(player1.move == "paper" and player2.move == "rock"):
return players[0].name + " wins"
else:
return players[1].name + " wins"
|
linseycurrie/Wk2-HW-RockPaperScissors-Flask
|
app/models/play_game.py
|
play_game.py
|
py
| 953 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6397362139
|
import sys
from math import sqrt
from itertools import compress
# 利用byte求质数
def get_primes_3(n):
""" Returns a list of primes < n for n > 2 """
sieve = bytearray([True]) * (n // 2)
for i in range(3, int(n ** 0.5) + 1, 2):
if sieve[i // 2]:
sieve[i * i // 2::i] = bytearray((n - i * i - 1) // (2 * i) + 1)
return [2, *compress(range(3, n, 2), sieve[1:])]
def is_prime(n):
# Only used to test odd numbers.
return all(n % d for d in range(3, round(sqrt(n)) + 1, 2))
def f(a, b):
'''
Won't be tested for b greater than 10_000_000
>>> f(3, 3)
The number of prime numbers between 3 and 3 included is 1
>>> f(4, 4)
The number of prime numbers between 4 and 4 included is 0
>>> f(2, 5)
The number of prime numbers between 2 and 5 included is 3
>>> f(2, 10)
The number of prime numbers between 2 and 10 included is 4
>>> f(2, 11)
The number of prime numbers between 2 and 11 included is 5
>>> f(1234, 567890)
The number of prime numbers between 1234 and 567890 included is 46457
>>> f(89, 5678901)
The number of prime numbers between 89 and 5678901 included is 392201
>>> f(89, 5678901)
The number of prime numbers between 89 and 5678901 included is 392201
'''
count = 0
for i in range(a,b+1):
if is_prime(i):
count+=1
less_a_primes = get_primes_3(a + 1)
less_b_primes = get_primes_3(b + 1)
for item in less_a_primes:
if item < a:
less_b_primes.remove(item)
count = len(less_b_primes)
print(f'The number of prime numbers between {a} and {b} included is {count}')
if __name__ == '__main__':
import doctest
doctest.testmod()
|
YuanG1944/COMP9021_19T3_ALL
|
9021 Python/review/mid-examples/2017S1_Sol/5.py
|
5.py
|
py
| 1,735 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20495057760
|
import sys, iptc, re, socket
single_options = False
predesigned_rules = ['BlockIncomingSSH', 'BlockOutgoingSSH', 'BlockAllSSH', 'BlockIncomingHTTP', 'BlockIncomingHTTPS',\
'BlockIncomingPing', 'BlockInvalidPackets', 'BlockSYNFlooding', 'BlockXMASAttack', 'ForceSYNPackets']
accepted_protocols = ['ah','egp','esp','gre','icmp','idp','igmp','ip','pim','pum','pup','raw','rsvp','sctp','tcp','tp','udp']
ipsrc = None
ipsrc_range = None
ipdst = None
ipdst_range = None
portsrc = None
portsrc_range = None
portdst = None
portdst_range = None
protocol = None
interfacein = None
interfaceout = None
target = None
custom_position = 0
direction = None
checker = False
############################### List of Predefined Rules #############################
def block_incoming_ssh():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = "22"
match = rule.create_match("state")
match.state = "NEW"
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def block_outgoing_ssh():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = "22"
match = rule.create_match("state")
match.state = "NEW"
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def block_all_ssh():
chain1 = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
chain2 = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = "22"
match = rule.create_match("state")
match.state = "NEW"
target = iptc.Target(rule, "DROP")
rule.target = target
chain1.insert_rule(rule)
chain2.insert_rule(rule)
print("Successfully Created")
def block_incoming_http():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = "80"
match = rule.create_match("state")
match.state = "NEW"
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def block_incoming_https():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = "443"
match = rule.create_match("state")
match.state = "NEW"
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def block_incoming_ping():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.protocol = "icmp"
match = rule.create_match("icmp")
match.icmp_type = "echo-reply"
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def block_invalid_packets():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
match = rule.create_match("state")
match.state = "iNVALID"
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def syn_flooding():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.tcp_flags = [ 'FIN,SYN,RST,ACK', 'SYN' ]
match = rule.create_match("limit")
match.limit = "10/second"
target = iptc.Target(rule, "ACCEPT")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def block_xmas_attack():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.tcp_flags = [ 'ALL', 'ALL' ]
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def force_syn_packets():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.syn = "!1"
match = rule.create_match("state")
match.state = "NEW"
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
# Function to delete rules
all_rules_deleted = True
def delete_rules(table):
global all_rules_deleted
all_rules_deleted = True
for chain in table.chains:
#print(chain.name)
for rule in chain.rules:
try:
chain.delete_rule(rule)
print(rule.protocol, rule.src, rule.dst, rule.target.name, "is DELETED")
except:
all_rules_deleted = False
if(all_rules_deleted==False):
#print("First Iteration Failed")
delete_rules(table)
# Function to delete a single rule
def delete_rule(rule, table, direction = None):
if(direction == 'input'):
chain = iptc.Chain(table, "INPUT")
deleted1 = False
for index, rule in enumerate(chain.rules):
if(int(rule_number) == index):
try:
chain.delete_rule(rule)
print("Rule Successfully Deleted for Input")
deleted1 = True
except:
sys.exit("The rule could not be deleted for Input. Please, try again.")
if(deleted1 == False):
print("The Rule Could Not Be Found for Input")
elif (direction == 'output'):
chain = iptc.Chain(table, "OUTPUT")
deleted1 = False
for index, rule in enumerate(chain.rules):
if(int(rule_number) == index):
try:
chain.delete_rule(rule)
print("Rule Successfully Deleted for Output")
deleted1 = True
except:
sys.exit("The rule could not be deleted for Input. Please, try again.")
if(deleted1 == False):
print("The Rule Could Not Be Found for Output")
else:
sys.exit("Delete rule function error. Incorrect parameter")
# First check, for options that should be used alone
for index, value in enumerate(sys.argv):
if(value == '-l' ):
if (len(sys.argv)) != 2:
sys.exit("The option -l does not accept additional options. Please, type: myFirewall -l")
single_options = True
table = iptc.Table(iptc.Table.FILTER)
for chain in table.chains:
#print ("Chain ",chain.name)
rule_type = chain.name[:3]
for index, rule in enumerate(chain.rules):
dport = None
sport = None
ip_src_range = None
ip_dst_range = None
match_state = None
match_tcp_flags = None
for match in rule.matches:
if (match.dport != None):
dport = match.dport
if (match.sport != None):
sport = match.sport
if (match.src_range != None):
ip_src_range = match.src_range
if (match.dst_range != None):
ip_dst_range = match.dst_range
if (match.state != None):
match_state = match.state
if (match.tcp_flags != None):
match_tcp_flags = match.tcp_flags[match.tcp_flags.find(' ')+1:]
if(ip_src_range != None):
source_ip = ip_src_range
else:
source_ip = rule.src
if(ip_dst_range != None):
destination_ip = ip_dst_range
else:
destination_ip = rule.dst
print ("==========================================")
print ("RULE("+ rule_type+")", index, "||", "proto:", rule.protocol + " ||", "sport:", str(sport) + " ||",
"dport:", str(dport) + " ||", "src:", source_ip + " ||", "dst:", destination_ip + " ||\n", "|| inInt:",
str(rule.in_interface) + " ||", "outInt:", str(rule.out_interface) + " ||",
"tcpflags:", str(match_tcp_flags) + " ||", "state:", str(match_state) + " ||", "Target:", rule.target.name)
print ("==========================================")
elif(value == '-r'):
if (len(sys.argv)) != 2:
sys.exit("The option -r does not accept additional options. Please, type: myFirewall -r")
single_options = True
table1 = iptc.Table(iptc.Table.FILTER)
delete_rules(table1)
table2 = iptc.Table(iptc.Table.MANGLE)
delete_rules(table2)
table3 = iptc.Table(iptc.Table.NAT)
delete_rules(table3)
table4 = iptc.Table(iptc.Table.RAW)
delete_rules(table4)
table5 = iptc.Table(iptc.Table.SECURITY)
delete_rules(table5)
elif(value == '-d'):
if (len(sys.argv) != 3 and len(sys.argv) != 4):
sys.exit("The option -d does not accept these options. Please, type: myFirewall -d RuleNumer [-in|-out]")
single_options = True
table = iptc.Table(iptc.Table.FILTER)
rule_number = sys.argv[2]
if(len(sys.argv) == 4):
if (sys.argv[3] == '-in'):
delete_rule(rule_number, table, direction = 'input')
elif (sys.argv[3] == '-out'):
delete_rule(rule_number, table, direction = 'output')
else:
sys.exit("Incorrect parameter. Please, type: myFirewall -d RuleNumer [-in|-out]")
else:
delete_rule(rule_number, table, direction = 'input')
delete_rule(rule_number, table, direction = 'output')
#for chain in table.chains:
#for rule in chain.rules:
# chain.delete_rule(rule)
elif(value == '-all'):
if ((len(sys.argv) != 3) and (sys.argv[index+1]!='ACCEPT') and (sys.argv[index+1]!='DROP')):
sys.exit("The -all option lets the user to ACCEPT or DROP all packets, independently of ports,"+\
" protocols or IPs. Please, specify a ACCEPT or DROP argument")
else:
single_options = True
rule = iptc.Rule()
rule.target = rule.create_target(sys.argv[index+1])
chain1 = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
chain2 = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT")
chain1.insert_rule(rule)
chain2.insert_rule(rule)
elif(value == '-rule'):
single_options = True
if (len(sys.argv)) != 3:
if (len(sys.argv) == 2):
print("The list of rules available is:\n")
for i in predesigned_rules:
print(i)
else:
sys.exit("The option -r does not accept additional options. Please, type: -rule RULE")
elif(sys.argv[index+1] == 'BlockIncomingSSH'):
block_incoming_ssh()
elif(sys.argv[index+1] == 'BlockOutgoingSSH'):
block_outgoing_ssh()
elif(sys.argv[index+1] == 'BlockAllSSH'):
block_all_ssh()
elif(sys.argv[index+1] == 'BlockIncomingHTTP'):
block_incoming_http()
elif(sys.argv[index+1] == 'BlockIncomingHTTPS'):
block_incoming_https()
elif(sys.argv[index+1] == 'BlockIncomingPing'):
block_incoming_ping()
elif(sys.argv[index+1] == 'BlockInvalidPackets'):
block_invalid_packets()
elif(sys.argv[index+1] == 'BlockSYNFlooding'):
syn_flooding()
elif(sys.argv[index+1] == 'BlockXMASAttack'):
block_xmas_attack()
elif(sys.argv[index+1] == 'ForceSYNPackets'):
force_syn_packets()
else:
print("Rule not available. The list of available rules is:\n")
for i in predesigned_rules:
print(i)
print("")
if(not single_options):
# Iterator to retrieve all information and create a Rule
for index, value in enumerate(sys.argv):
if(value == '-ipsrc'):
match_single = re.search('^(([0-9]?[0-9]\.)|(1[0-9][0-9]\.)|(2[0-5][0-5]\.)){3}(([0-9]?[0-9])|(1[0-9][0-9])|(2[0-5][0-5]))$', sys.argv[index+1])
match_range = re.search('^(([0-9]?[0-9]\.)|(1[0-9][0-9]\.)|(2[0-5][0-5]\.)){3}(([0-9]?[0-9])|(1[0-9][0-9])|(2[0-5][0-5]))-(([0-9]?[0-9]\.)|(1[0-9][0-9]\.)|(2[0-5][0-5]\.)){3}(([0-9]?[0-9])|(1[0-9][0-9])|(2[0-5][0-5]))$', sys.argv[index+1])
if((match_single==None) and (match_range==None)):
sys.exit("The IP address format is incorrect")
else:
checker = True
if(match_single!=None):
ipsrc = sys.argv[index+1]
if(match_range!=None):
ipsrc_range = sys.argv[index+1]
elif(value == '-ipdst'):
match_single = re.search('^(([0-9]?[0-9]\.)|(1[0-9][0-9]\.)|(2[0-5][0-5]\.)){3}(([0-9]?[0-9])|(1[0-9][0-9])|(2[0-5][0-5]))$', sys.argv[index+1])
match_range = re.search('^(([0-9]?[0-9]\.)|(1[0-9][0-9]\.)|(2[0-5][0-5]\.)){3}(([0-9]?[0-9])|(1[0-9][0-9])|(2[0-5][0-5]))-(([0-9]?[0-9]\.)|(1[0-9][0-9]\.)|(2[0-5][0-5]\.)){3}(([0-9]?[0-9])|(1[0-9][0-9])|(2[0-5][0-5]))$', sys.argv[index+1])
if(match_single==None and match_range==None):
sys.exit("The IP address format is incorrect")
else:
checker = True
if(match_single!=None):
ipdst = sys.argv[index+1]
if(match_range!=None):
ipdst_range = sys.argv[index+1]
elif(value == '-portsrc'):
match_single = re.search('^[0-9]+$', sys.argv[index+1])
match_range = re.search('^[0-9]+:[0-9]+$', sys.argv[index+1])
if(match_single==None and match_range==None):
sys.exit("The Port/Port range format is incorrect")
checker = True
if(match_single != None):
if(int(sys.argv[index+1])<65536 and int(sys.argv[index+1])>0):
portsrc = sys.argv[index+1]
else:
sys.exit("The specified port is out of the boundaries. Please, type a value between 1 and 65535")
elif(match_range != None):
first_port_group = int(sys.argv[index+1][:sys.argv[index+1].find(':')])
second_port_group = int(sys.argv[index+1][sys.argv[index+1].find(':')+1:])
if(((first_port_group<65536) and (first_port_group>0) and (second_port_group<65536) and (second_port_group>0))):
portsrc_range = sys.argv[index+1]
else:
sys.exit("The specified port range is out of the boundaries. Please, type values between 1 and 65535")
else:
sys.exit("Port incorrectly parsed")
elif(value == '-portdst'):
match_single = re.search('^[0-9]+$', sys.argv[index+1])
match_range = re.search('^[0-9]+:[0-9]+$', sys.argv[index+1])
if(match_single==None and match_range==None):
sys.exit("The Port/Port range format is incorrect")
checker = True
if(match_single != None):
if(int(sys.argv[index+1])<65536 and int(sys.argv[index+1])>0):
portdst = sys.argv[index+1]
else:
sys.exit("The specified port is out of the boundaries. Please, type a value between 1 and 65535")
elif(match_range != None):
first_port_group = int(sys.argv[index+1][:sys.argv[index+1].find(':')])
second_port_group = int(sys.argv[index+1][sys.argv[index+1].find(':')+1:])
if(((first_port_group<65536) and (first_port_group>0) and (second_port_group<65536) and (second_port_group>0))):
portdst_range = sys.argv[index+1]
else:
sys.exit("The specified port range is out of the boundaries. Please, type values between 1 and 65535")
else:
sys.exit("Port incorrectly parsed")
elif(value == '-proto'):
accepted = False
for i in accepted_protocols:
if(i == sys.argv[index+1]):
accepted = True
else:
protocol = sys.argv[index+1]
if(not accepted):
sys.exit("The protocol provided is not accepted. The list of accepted protocols is:",'ah',
'egp','esp','gre','icmp','idp','igmp','ip','pim','pum','pup','raw','rsvp','sctp','tcp','tp','udp')
checker = True
elif(value == '-intin'):
available_interface = False
for i in socket.if_nameindex():
if(i[1] == sys.argv[index+1]):
available_interface = True
if(available_interface == False):
sys.exit("The selected interface is not available on this system")
else:
interfacein = sys.argv[index+1]
checker = True
elif(value == '-intout'):
available_interface = False
for i in socket.if_nameindex():
if(i[1] == sys.argv[index+1]):
available_interface = True
if(available_interface == False):
sys.exit("The selected interface is not available on this system")
else:
interfaceout = sys.argv[index+1]
checker = True
elif(value == '-pos'):
match = re.search('^[0-9]*$', sys.argv[index+1])
if(match==None):
sys.exit("Incorrect position format. Please, type an integer >= 0")
else:
custom_position = sys.argv[index+1]
checker = True
elif(value == '-t'):
if(sys.argv[index+1] == "ACCEPT"):
target = "ACCEPT"
elif(sys.argv[index+1] == "DROP"):
target = "DROP"
else:
sys.exit('Incorrect target option. Please, choose between "ACCEPT" and "DROP"')
checker = True
elif(value == '-in'):
direction = 'incoming'
elif(value == '-out'):
direction = 'outgoing'
else:
if(checker == True or index==0):
checker = False
else:
sys.exit("Incorrect option: " + value)
rule = iptc.Rule()
if(ipsrc != None):
rule.src = ipsrc
if(ipsrc_range != None or ipdst_range != None):
match = rule.create_match("iprange")
if(ipsrc_range != None):
match.src_range = ipsrc_range
else:
match.dst_range = ipdst_range
if(ipdst != None):
rule.dst = ipdst
if(protocol != None):
rule.protocol = protocol
if(protocol == "tcp" or protocol == "udp"):
match = rule.create_match(protocol)
if(portsrc != None or portdst != None):
if(protocol == None):
protocol = "tcp"
rule.protocol = protocol
match = rule.create_match(protocol)
if(portsrc != None):
match.sport = portsrc
if(portdst != None):
match.dport = portdst
if(portsrc_range != None or portdst_range != None):
if(protocol == None):
protocol = "tcp"
rule.protocol = protocol
match = rule.create_match(protocol)
if(portsrc_range != None):
match.sport = portsrc_range
if(portdst_range != None):
match.dport = portdst_range
if(interfacein != None):
rule.in_interface = interfacein
if(interfaceout != None):
rule.out_interface = interfaceout
if(target != None):
rule.target = rule.create_target(target)
else:
sys.exit('You must specify a target: -t "ACCEPT" or -t "DROP"')
if(direction == None):
chain1 = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
chain2 = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT")
try:
chain1.insert_rule(rule, position=int(custom_position))
except:
sys.exit("Index of insertion out of boundaries for existing Input table. Please, choose a value between 0 and (Max.AmountOfRules-1)")
try:
chain2.insert_rule(rule, position=int(custom_position))
except:
sys.exit("Index of insertion out of boundaries for Output table. Please, choose a value between 0 and (Max.AmountOfRules-1)")
elif(direction == "incoming"):
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
try:
chain.insert_rule(rule, position=int(custom_position))
except:
sys.exit("Index of insertion out of boundaries. Please, choose a value between 0 and (Max.AmountOfRules-1)")
elif(direction == "outgoing"):
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT")
try:
chain.insert_rule(rule, position=int(custom_position))
except:
sys.exit("Index of insertion out of boundaries. Please, choose a value between 0 and (Max.AmountOfRules-1)")
|
syerbes/myFirewall
|
myFirewall.py
|
myFirewall.py
|
py
| 21,668 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28153479584
|
import src.fileIO as io
import src.chris as chris
import src.filepaths as fp
import src.analysis as anal
import src.plotting as plot
from pathlib import Path
def batch_calculate_peak_wavelength(parent_directory,
batch_name,
file_paths,
directory_paths,
plot_files):
'''
Calculate sample batch peak wavelength and error, from individual files
within batch.
Args:
parent_directory: <string> parent directory identifier
batch_name: <string> batch name string
file_paths: <array> array of target file paths
directory_paths: <dict> dictionary containing required paths
plot_files: <string> "True" or "False" for plotting output
Returns:
results_dictionary: <dict>
Batch Name
File Names
File Paths
Secondary Strings
Individual file values for:
Background Files
Region Trim Index: <array> min, max indices
popt: <array> fano fit parameters:
peak, gamma, q, amplitude, damping
pcov: <array> fano fit errors
peak, gamma, q, amplitude, damping
'''
batch_dictionary = fp.update_batch_dictionary(
parent=parent_directory,
batch_name=batch_name,
file_paths=file_paths)
for file in file_paths:
wavelength, raw_intensity = io.read_GMR_file(file_path=file)
sample_parameters = fp.sample_information(file_path=file)
background_file, background_parameters = fp.find_background(
background_path=directory_paths['Background Path'],
sample_details=sample_parameters,
file_string='.txt')
print(background_file)
if len(background_file) == 0:
normalised_intensity = anal.normalise_intensity(
raw_intensity=anal.timecorrected_intensity(
raw_intensity=raw_intensity,
integration_time=sample_parameters[
f'{parent_directory} Integration Time']))
else:
_, background_raw_intensity = io.read_GMR_file(
file_path=background_file[0])
background_parent = background_parameters['Parent Directory']
normalised_intensity = anal.bg_normal_intensity(
intensity=raw_intensity,
background_intensity=background_raw_intensity,
integration_time=sample_parameters[
f'{parent_directory} Integration Time'],
background_integration_time=background_parameters[
f'{background_parent} Integration Time'])
out_string = sample_parameters[f'{parent_directory} Secondary String']
plot.spectrumplt(
wavelength=wavelength,
intensity=normalised_intensity,
out_path=Path(f'{directory_paths["Results Path"]}/{batch_name}_{out_string}'))
peak_results = chris.calc_peakwavelength(
wavelength=wavelength,
normalised_intensity=normalised_intensity,
sample_details=sample_parameters,
plot_figure=plot_files,
out_path=Path(
f'{directory_paths["Results Path"]}'
f'/{batch_name}_{out_string}_Peak.png'))
batch_dictionary.update(
{f'{out_string} File': sample_parameters})
batch_dictionary.update(
{f'{out_string} Background': background_parameters})
batch_dictionary.update(peak_results)
return batch_dictionary
if __name__ == '__main__':
''' Organisation '''
root = Path().absolute()
info, directory_paths = fp.get_directory_paths(root_path=root)
file_paths = fp.get_files_paths(
directory_path=directory_paths['Spectrum Path'],
file_string='.txt')
parent, batches = fp.get_all_batches(file_paths=file_paths)
''' Batch Processing '''
for batch, filepaths in batches.items():
out_file = Path(
f'{directory_paths["Results Path"]}'
f'/{batch}_Peak.json')
if out_file.is_file():
pass
else:
results_dictionary = batch_calculate_peak_wavelength(
parent_directory=parent,
batch_name=batch,
file_paths=filepaths,
directory_paths=directory_paths,
plot_files=info['Plot Files'])
io.save_json_dicts(
out_path=out_file,
dictionary=results_dictionary)
|
jm1261/PeakFinder
|
batch_peakfinder.py
|
batch_peakfinder.py
|
py
| 4,669 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15206966945
|
# -*- coding: utf-8 -*-
"""
Ventricular tachycardia, ventricular bigeminy, Atrial fibrillation,
Atrial fibrillation, Ventricular trigeminy, Ventricular escape ,
Normal sinus rhythm, Sinus arrhythmia, Ventricular couplet
"""
import tkinter as tk
import scipy.io as sio
from PIL import Image, ImageTk
class App():
ancho=760
alto=760
estado=False
contadores=[0,0,0,0,0,0,0,0,0]#son los que van a contar el numero de dato que se ejecuta
#se va a cosiacar las señales
Signal=0
def __init__(self):
#cargar las variables .mat
self.raiz=tk.Tk()
self.importData()
self.frame=tk.Frame(self.raiz,bg="white")
self.frame.config(width=self.ancho,height=self.alto)
self.frame.pack()
self.titulo=tk.Label(self.frame,bg="white",text="Dispositivo Generador de Arritmias Cardiacas")
self.titulo.config(font=("Grotesque",24))
self.titulo.place(x=0,y=0,width=self.ancho,height=self.alto//16)
self.opcion = tk.IntVar()
names=["Taquicardia ventricualar","Bigeminismo Ventricular","Fibrilacion atrial","Flutter atrial","Trigeminismo Ventricular",
"Escape Ventricular","Ritmo Sinusal","Arritmia Sinusal","Couplet Ventricular"]
for i in range(1,10):
tk.Radiobutton(self.frame, text=names[i-1],font=("Grotesque",16) ,variable=self.opcion,bg="white",anchor="w",
value=i, command=self.selec).place(x=50,y=self.alto//8+(i-1)*self.alto//20,
width=self.ancho//2.5,height=self.alto//32)
temp=Image.open('LOGO_UMNG.png')
temp=temp.resize((200, 250), Image.ANTIALIAS)
self.imagen = ImageTk.PhotoImage(temp)
tk.Label(self.raiz, image=self.imagen,bg="white").place(x=450,y=140)
self.nombres=tk.Label(self.frame,bg="white",text="Juan Camilo Sandoval Cabrera\nNohora Camila Sarmiento Palma",anchor="e")
self.nombres.config(font=("Grotesque",12))
self.nombres.place(x=420,y=420,width=self.ancho//3,height=self.alto//16)
tk.Button(self.frame, text="Iniciar",font=("Grotesque",16),command=self.Estado_DataON).place(x=270,y=600)
tk.Button(self.frame, text="Pausar",font=("Grotesque",16),command=self.Estado_DataOFF).place(x=400,y=600)
self.titulo.after(700,self.Enviar_Data)
def Estado_DataON(self):
self.estado=True
def Estado_DataOFF(self):
self.estado=False
def Enviar_Data(self):
delay=3
op=self.opcion.get()
c=op-1
if self.estado:
print(self.Signal[0,self.contadores[c]])
self.contadores[c]+=1
if c==7:
delay=4
self.titulo.after(delay,self.Enviar_Data)
def selec(self):
op=self.opcion.get()#el lunes hacer el selector
if op==1:
self.Signal=self.VT #variables de las señales
elif op==2:
self.Signal=self.VB #variables de las señales
elif op==3:
self.Signal=self.AFIB #variables de las señales
elif op==4:
self.Signal=self.AFL #variables de las señales
elif op==5:
self.Signal=self.VTRI #variables de las señales
elif op==6:
self.Signal=self.VES #variables de las señales
elif op==7:
self.Signal=self.S #variables de las señales
elif op==8:
self.Signal=self.SARR #variables de las señales
elif op==9:
self.Signal=self.VCOUP #variables de las señales
def iniciar(self):
self.raiz.mainloop()
def importData(self):
AFIB=sio.loadmat('AFIB.mat')
self.AFIB=AFIB['SignalNorm']
AFL=sio.loadmat('AFL.mat')
self.AFL=AFL['SignalNorm']
S=sio.loadmat('S.mat')
self.S=S['SignalNorm']
VES=sio.loadmat('VS.mat')
self.VES=VES['SignalNorm']
VCOUP=sio.loadmat('VCop.mat')
self.VCOUP=VCOUP['SignalNorm']
VT=sio.loadmat('TV.mat')
self.VT=VT['SignalNorm']
SARR=sio.loadmat('SARR.mat')
self.SARR=SARR['SignalNorm']
VB=sio.loadmat('VB.mat')
self.VB=VB['SignalNorm']
#VT=sio.loadmat('VT.mat')#SE PERDIO
#self.VT=VT['SignalNorm']
VTRI=sio.loadmat('VTRI.mat')
self.VTRI=VTRI['SignalNorm']
def main():
mi_app = App()
mi_app.iniciar()
if __name__ == '__main__':
main()
|
Sandovaljuan99/INMEDUMG
|
Cardiac arrhythmia simulator/IGPY.py
|
IGPY.py
|
py
| 4,934 |
python
|
es
|
code
| 1 |
github-code
|
6
|
73652351869
|
# 给你一个字符串 s 和一个整数 k 。你可以选择字符串中的任一字符,并将其更改为任何其他大写英文字符。该操作最多可执行 k 次。
# 在执行上述操作后,返回包含相同字母的最长子字符串的长度。
class Solution(object):
def characterReplacement(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
num = [0] * 26
left = right = maxn = 0
n = len(s)
while(right < n):
num[ord(s[right]) - ord('A')] += 1
maxn = max(maxn, num[ord(s[right]) - ord('A')])
if right - left + 1 - maxn > k:
num[ord(s[left]) - ord('A')] -= 1
left += 1
right += 1
return right - left
s = "ABAB"
k = 2
a = Solution()
print(a.characterReplacement(s, k))
|
xxxxlc/leetcode
|
array/characterReplacement.py
|
characterReplacement.py
|
py
| 876 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
7946323385
|
from flaskr.db import get_db
no_of_existing_accounts = 3
def test_create_account(client, app):
expected_account = {
"account_number": "4",
"account_name": "Brukskonto",
"account_nickname": "Min Brukskonto",
"account_owner_name": "Ola Nordmann",
"account_type": "DEPOSIT",
"currency": "NOK",
"available_balance": "0",
"booked_balance": "0",
"status": "open"
}
input_data = {
"account_name": "Brukskonto",
"account_nickname": "Min Brukskonto",
"account_owner_name": "Ola Nordmann",
"account_type": "DEPOSIT",
"currency": "NOK"
}
url = '/v1/accounts'
assert client.get(url).status_code == 200
response = client.post(url, json=input_data)
assert response.status_code == 201 # Created
# Check that another account has been added to the database
with app.app_context():
db = get_db()
count = db.execute('SELECT COUNT(id) FROM account').fetchone()[0]
assert count == no_of_existing_accounts + 1
assert response.json == expected_account
def test_get_accounts(client):
expected_result = {
"accounts": [
{
"account_number": "1",
"account_name": "Brukskonto",
"account_nickname": "Min Brukskonto",
"account_owner_name": "Ola Nordmann",
"account_type": "DEPOSIT",
"currency": "NOK",
"available_balance": "10000",
"booked_balance": "8000",
"status": "open"
},
{
"account_number": "2",
"account_name": "Sparekonto",
"account_nickname": "Min Sparekonto",
"account_owner_name": "Ola Nordmann",
"account_type": "SAVING",
"currency": "NOK",
"available_balance": "50000",
"booked_balance": "50000",
"status": "open"
},
{
"account_number": "3",
"account_name": "Valutakonto",
"account_nickname": "Min Valutakonto",
"account_owner_name": "Ola Nordmann",
"account_type": "CURRENCY",
"currency": "USD",
"available_balance": "5000",
"booked_balance": "5000",
"status": "open"
}
]
}
url = '/v1/accounts'
response = client.get(url)
assert response.status_code == 200 # OK
assert response.json == expected_result
def test_get_wrong_url(client):
bad_url = '/not_exists'
assert client.get(bad_url).status_code == 404 # Not found
def test_post_wrong_url(client):
bad_url = '/not_exists'
input_data = {
"account_name": "Brukskonto",
"account_nickname": "Min Brukskonto",
"account_owner_name": "Ola Nordmann",
"account_type": "DEPOSIT",
"currency": "NOK"
}
response = client.post(bad_url, json=input_data)
assert response.status_code == 404 # Not found
def test_create_account_missing_data(client, app):
input_data = {
"nothing_useful": "blah"
}
url = '/v1/accounts'
assert client.get(url).status_code == 200
response = client.post(url, json=input_data)
assert response.status_code == 400 # Bad request
# Check no incomplete accounts have been added to the db
with app.app_context():
db = get_db()
count = db.execute('SELECT COUNT(id) FROM account').fetchone()[0]
assert count == no_of_existing_accounts
# Check attempting to insert bad data doesn't break get
assert client.get(url).status_code == 200
def test_create_account_invalid_account_type(client, app):
input_data = {
"account_name": "Brukskonto",
"account_nickname": "Min Brukskonto",
"account_owner_name": "Ola Nordmann",
"account_type": "NOT_EXISTS",
"currency": "NOK"
}
url = '/v1/accounts'
assert client.get(url).status_code == 200
response = client.post(url, json=input_data)
assert response.status_code == 400 # Bad request
assert response.data == b'Account type NOT_EXISTS is not valid'
# Check no incomplete accounts have been added to the db
with app.app_context():
db = get_db()
count = db.execute('SELECT COUNT(id) FROM account').fetchone()[0]
assert count == no_of_existing_accounts
# Check attempting to insert bad data doesn't break get
assert client.get(url).status_code == 200
|
eilidht/Accounts
|
tests/test_account.py
|
test_account.py
|
py
| 4,629 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8764441086
|
class animal:
leg=4
@staticmethod
def sum(x,y):
sum=x+y
print(sum)
@staticmethod
def mul(x,y):
mul=x*y
print(mul)
@classmethod
def walk(cls,name):
print(f"{name} has {animal.leg} leg")
@classmethod
def evenodd(cls,num):
if num%2==0:
print(f"{num} is even number")
else:
print("f{num} is odd number")
t1=animal()
t1.sum(10,30)
animal.mul(10,50)
t1.walk("dog")
animal.evenodd(10)
|
divyansh251/basic-oops-concepts
|
oops4.py
|
oops4.py
|
py
| 409 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1040065850
|
import numpy as np
import pandas as pd
import operator
from sklearn import preprocessing
data = pd.read_csv("data.csv",header=None)
min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0,1))
def classify(v,k,distance):
target_values = data.iloc[:,-1]
nearest_neighbors = knn(data,k,v,distance)
classification_values = {}
for index in nearest_neighbors:
if target_values[index] not in classification_values.keys():
classification_values[target_values[index]] = 1
else:
classification_values[target_values[index]] += 1
return max(classification_values.items(),key=operator.itemgetter(1))[0]
def knn(vectors,k,vector_to_classify,distance):
distances = []
for i in range(0,len(vectors)):
x = vectors.loc[i,:]
x = x[0:len(x)-1]
x = min_max_scaler.fit_transform(x.values.astype(float).reshape(-1,1))[:,0]
distances.append({"index": i,
"value": distance(x,vector_to_classify)})
distances = sorted(distances,key=lambda x:x['value'], reverse=True)
indexes = list(map(lambda distance: distance['index'],distances[0:k]))
return indexes
def euclidean_distance(x,y):
summation = 0
for i in range(0,x.size):
summation += ((x[i] - y[i])**2)
return (summation)**(1/2)
def manhattan_distance(x,y):
summation = 0
for i in range(0,x.size):
summation += abs(x[i]-y[i])
return summation
def maximum_metric(x,y):
max_distance = 0
for i in range(0,x.size):
difference = abs(x[i]-y[i])
if(difference > max_distance):
max_distance = difference
return max_distance
vectors_to_classify = [np.array([1100000,60,1,2,1,500]),
np.array([1100000,60,1,2,1,500]),
np.array([1800000,65,1,2,1,1000]),
np.array([2300000,72,1,3,1,1400]),
np.array([3900000,110,2,3,1,1800])]
distances = [{'name':'Euclidean Distance','function':euclidean_distance},
{'name':'Manhattan Distance','function':manhattan_distance},
{'name':'Maximum Metric','function':maximum_metric}]
for distance in distances:
print("Distance " + str(distance['name']))
for k in [1,3,5]:
print("K = " + str(k))
for v in vectors_to_classify:
v = min_max_scaler.fit_transform(v.astype(float).reshape(-1,1))[:,0]
print(classify(v,k,distance['function']))
|
egjimenezg/DataAnalysis
|
knn/knn.py
|
knn.py
|
py
| 2,364 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16404587226
|
from ksz.src import plot
import matplotlib.pyplot as plt
data_path_list = [
'/data/ycli/dr12/galaxy_DR12v5_LOWZ_North_TOT_wMASS.dat',
'/data/ycli/dr12/galaxy_DR12v5_LOWZ_South_TOT_wMASS.dat',
'/data/ycli/dr12/galaxy_DR12v5_CMASS_North_TOT_wMASS.dat',
'/data/ycli/dr12/galaxy_DR12v5_CMASS_South_TOT_wMASS.dat',
#'/data/ycli/6df/6dFGS_2MASS_RA_DEC_Z_J_K_bJ_rF_GOOD.cat',
#'/data/ycli/group_catalog/6dFGS_M_group.dat',
#'/data/ycli/group_catalog/6dFGS_L_group.dat',
'/data/ycli/group_catalog/SDSS_M_group.dat',
#'/data/ycli/group_catalog/SDSS_L_group.dat',
'/data/ycli/cgc/CGC_wMASS.dat',
]
label_list = [
'LOWZ North CGC',
'LOWZ South CGC',
'CMASS North',
'CMASS South',
#'6dF',
#'6dF mass-weighted halo center',
#'6dF luminosity-weighted halo center',
'DR13 Group',
#'dr13 luminosity-weighted halo center',
'DR7 CGC',
]
ap_list = [
7.,
7.,
#0.,
#0.,
8.,
#11.,
11.,
#11.,
7.,
#7.,
]
#plot.plot_stellarmass_hist(data_path_list, label_list)
plot.plot_halomass_hist(data_path_list, label_list)
#plot.plot_rvir_hist(data_path_list, label_list, rho_crit = 2.775e11, ap_list=ap_list)
#plot.plot_z_hist(data_path_list, label_list)
plt.show()
|
YichaoLi/pksz
|
plot_pipe/plot_stellar_mass.py
|
plot_stellar_mass.py
|
py
| 1,401 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28610424615
|
from __future__ import annotations
import json
import subprocess
import collections
import concurrent.futures
from os import path, system
from datetime import datetime
root_path = path.abspath("src/test_cases/UI")
report_path = path.abspath("src/reports/concurrent_test_logs")
def generate_pytest_commands():
config_run_test_dir = path.dirname(__file__)
with open(path.join(config_run_test_dir, "config_run_multiple_test.json")) as f:
config_data = json.load(f)
list_test_suite = config_data['test_suite']
pytest_run_cmds = []
for suite in list_test_suite:
test_name = suite['test']['name'].replace(".", "::")
browser_name = suite['test']['browser']
test_suite_option = f"{suite['name']}::{test_name}"
options_cmd = collections.namedtuple('OptionCmd', ['test_name', 'browser'])
pytest_run_cmds.append(options_cmd(test_suite_option, browser_name))
return pytest_run_cmds
def execute_pytest_cmd(option_cmd):
run_cmd_process = subprocess.run(["pytest", f"{root_path}\\{option_cmd.test_name}",
f"--browser={option_cmd.browser}"],
capture_output=True)
return run_cmd_process.stdout
list_options_cmd = generate_pytest_commands()
with concurrent.futures.ThreadPoolExecutor(max_workers=len(list_options_cmd)) as executor:
running_cmd = {executor.submit(execute_pytest_cmd, options): options for options in list_options_cmd}
for completed_cmd in concurrent.futures.as_completed(running_cmd):
test_ran = running_cmd[completed_cmd].test_name.split("::")[-1]
browser_ran = running_cmd[completed_cmd].browser
try:
time_logging = datetime.now().strftime("%Y.%m.%d_(%H-%M-%S.%f)")
with open(f"{report_path}\\Result_{test_ran}_{time_logging}.log", "wb") as f:
f.write(completed_cmd.result())
except Exception as exc:
print(f"Pytest ran with error {exc}.")
|
huymapmap40/pytest_automation
|
src/config/parallel_test/run_parallel_test.py
|
run_parallel_test.py
|
py
| 2,068 |
python
|
en
|
code
| 1 |
github-code
|
6
|
13749339342
|
import ROOT
#from root_numpy import root2array, root2rec, tree2rec
import pylab,numpy,pickle
import matplotlib
pylab.rcParams['font.size'] = 14.0
pylab.rcParams['axes.labelsize']=18.0
pylab.rcParams['axes.titlesize']=20.0
pylab.rcParams['ytick.labelsize']='large'
pylab.rcParams['xtick.labelsize']='large'
pylab.rcParams['lines.markeredgewidth']=1.0
pylab.rc ('text', usetex=True)
pylab.rc ('font', family='serif')
pylab.rc ('font', serif='Computer Modern Roman')
log_sigma_days = numpy.array([-5,-4,-3,-2,-1,-0.52287874528033762,0,1])
### NEW GENIE 1460 Included ###
dec0_e3_foldedspectrum = (1072.916206382002,0)
dec16_e3_foldedspectrum = (1545.0315486757047,0)
dec30_e3_foldedspectrum = (1803.4879220886971,0)
dec45_e3_foldedspectrum = (1955.9670994116407,0)
dec60_e3_foldedspectrum = (2117.1599069802728,0)
dec75_e3_foldedspectrum = (2228.3197855702933,0)
sa_avg_foldedspectrum = (1654.0807981564465,0)
sys_adjustment = 0.89559693491089454
### Int(EffaE-3) (JF,RH)###
#samp2_e3_foldedspectrum_sum = (1759.219287256351,0) ## 100 GeV flux equal to 1.0 GeV^-1 cm^-2 s^-1
#samp2_e35_foldedspectrum_sum = (2925.5560058208703,0) ##
#samp2_e25_foldedspectrum_sum = (1320.5883336274608,0) ##
sens_e3_dec0_meansrc_events = numpy.array([6.4656,6.70643,6.7344,7.38432,10.4106,13.2816,16.2928,28.1549])
sens_e3_dec16_meansrc_events = numpy.array([6.4384,6.62176,6.79315,7.4096,10.5558,13.0896,16.5709,30.3184])
sens_e3_dec30_meansrc_events = numpy.array([7.632,7.32,7.54048,8.00864,10.68,12.6272,16.0406,27.1056])
sens_e3_dec45_meansrc_events = numpy.array([6.86976,6.87104,7.09792,8.60768,11.3456,12.983,16.1408,27.0288])
sens_e3_dec60_meansrc_events = numpy.array([6.77216,6.54144,7.29088,8.584,11.0262,13.2019,15.5658,24.368])
sens_e3_dec75_meansrc_events = numpy.array([5.6608,5.64512,5.95296,7.37824,10.8947,12.7984,15.9766,28.8221])
ul_e3_dec0_meansrc_events = numpy.array([7.5456,8.09952,9.06432,11.376,17.5674,22.2304,29.9581,60.232])
ul_e3_dec16_meansrc_events = numpy.array([7.77754,8.51104,9.67872,11.8336,18.1984,23.208,30.528,64.568])
ul_e3_dec30_meansrc_events = numpy.array([8.95392,9.34349,10.2138,12.5501,18.1462,22.568,29.6342,59.744])
ul_e3_dec45_meansrc_events = numpy.array([8.45888,8.73325,9.74496,12.8112,19.0477,22.5107,29.5024,59.3357])
ul_e3_dec60_meansrc_events = numpy.array([8.17261,8.74912,10.1846,13.3968,19.3747,23.0784,30.0032,57.7504])
ul_e3_dec75_meansrc_events = numpy.array([7.30272,7.66144,8.52512,11.688,19.0272,24.0032,31.9216,64.608])
ilow_en_bins = pickle.load(open("./pickles/effarea_low_energy_bins.pkl",'r'))
high_en_bins = pickle.load(open("./pickles/effarea_high_energy_bins.pkl",'r'))
genie_avg_area = pickle.load(open("./pickles/g1460_numu_effarea_avg.pkl",'r'))
genie_dec0_area = pickle.load(open("./pickles/g1460_numu_effarea_dec0.pkl",'r'))
genie_dec16_area = pickle.load(open("./pickles/g1460_numu_effarea_dec16.pkl",'r'))
genie_dec30_area = pickle.load(open("./pickles/g1460_numu_effarea_dec30.pkl",'r'))
genie_dec45_area = pickle.load(open("./pickles/g1460_numu_effarea_dec45.pkl",'r'))
genie_dec60_area = pickle.load(open("./pickles/g1460_numu_effarea_dec60.pkl",'r'))
genie_dec75_area = pickle.load(open("./pickles/g1460_numu_effarea_dec75.pkl",'r'))
nugen_avg_area = pickle.load(open("./pickles/g1460_nugmu_effarea_avg.pkl",'r'))
nugen_dec0_area = pickle.load(open("./pickles/g1460_nugmu_effarea_dec0.pkl",'r'))
nugen_dec16_area = pickle.load(open("./pickles/g1460_nugmu_effarea_dec16.pkl",'r'))
nugen_dec30_area = pickle.load(open("./pickles/g1460_nugmu_effarea_dec30.pkl",'r'))
nugen_dec45_area = pickle.load(open("./pickles/g1460_nugmu_effarea_dec45.pkl",'r'))
nugen_dec60_area = pickle.load(open("./pickles/g1460_nugmu_effarea_dec60.pkl",'r'))
nugen_dec75_area = pickle.load(open("./pickles/g1460_nugmu_effarea_dec75.pkl",'r'))
sa0 = 2*numpy.pi*((1-numpy.cos(numpy.deg2rad(95.))) - (1-numpy.cos(numpy.deg2rad(80.))))
sa16 = 2*numpy.pi*((1-numpy.cos(numpy.deg2rad(80.))) - (1-numpy.cos(numpy.deg2rad(65.))))
sa30 = 2*numpy.pi*((1-numpy.cos(numpy.deg2rad(65.))) - (1-numpy.cos(numpy.deg2rad(50.))))
sa45 = 2*numpy.pi*((1-numpy.cos(numpy.deg2rad(50.))) - (1-numpy.cos(numpy.deg2rad(35.))))
sa60 = 2*numpy.pi*((1-numpy.cos(numpy.deg2rad(35.))) - (1-numpy.cos(numpy.deg2rad(20.))))
sa75 = 2*numpy.pi*(1-numpy.cos(numpy.deg2rad(20.)))
saTotal = 2*numpy.pi*(1-numpy.cos(numpy.deg2rad(95.)))
sky_frac = [0.23989563791056959, 0.22901050354066707, 0.20251868181221927, 0.16222554659621455, 0.11087700847006936, 0.055472621670260208]
fluxnorm_dec16_e3 = ul_e3_dec16_meansrc_events/dec16_e3_foldedspectrum[0]
fluxnorm_dec0_e3 = ul_e3_dec0_meansrc_events/dec0_e3_foldedspectrum[0]
fluxnorm_dec30_e3 = ul_e3_dec30_meansrc_events/dec30_e3_foldedspectrum[0]
fluxnorm_dec45_e3 = ul_e3_dec45_meansrc_events/dec45_e3_foldedspectrum[0]
fluxnorm_dec60_e3 = ul_e3_dec60_meansrc_events/dec60_e3_foldedspectrum[0]
fluxnorm_dec75_e3 = ul_e3_dec75_meansrc_events/dec75_e3_foldedspectrum[0]
uls = [ul_e3_dec0_meansrc_events,ul_e3_dec16_meansrc_events,ul_e3_dec30_meansrc_events,ul_e3_dec45_meansrc_events,ul_e3_dec60_meansrc_events,ul_e3_dec75_meansrc_events]
event_ul_avg_list = [uls[i]*sky_frac[i] for i in range(len(sky_frac))]
event_ul_avg = numpy.array([0.,0.,0.,0.,0.,0.,0.,0.])
for listy in event_ul_avg_list:
event_ul_avg+=listy
fluxnorm_sa_avg_e3 = event_ul_avg / sa_avg_foldedspectrum[0]
#fluxnorm_0 = sens_bdt0_e3_meansrc_events/samp2_e3_foldedspectrum_sum[0]
#fluxnorm_0_disco = disco_bdt0_e3_meansrc_events/samp2_e3_foldedspectrum_sum[0]
#fluxnorm_0_25 = sens_bdt0_e25_meansrc_events/samp2_e25_foldedspectrum_sum[0]
#fluxnorm_0_35 = sens_bdt0_e35_meansrc_events/samp2_e35_foldedspectrum_sum[0]
pylab.figure()
pylab.plot(log_sigma_days,event_ul_avg,'k-',lw=2,label="Averaged")
pylab.plot(log_sigma_days,ul_e3_dec0_meansrc_events,'k--',lw=2,label=r"$\delta=0^{\circ}$")
pylab.plot(log_sigma_days,ul_e3_dec30_meansrc_events,'k-.',lw=2,label=r"$\delta=30^{\circ}$")
pylab.plot(log_sigma_days,ul_e3_dec60_meansrc_events,'k:',lw=2,label=r"$\delta=60^{\circ}$")
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad (Days)$')
pylab.ylabel("NSrc Events")
pylab.axis([-5,1,3,60])
pylab.grid()
pylab.legend(loc="upper left")
matplotlib.pyplot.gcf().subplots_adjust(right=.85)
pylab.title(r"Upper Limit $E^{-3}$ 90% C.L.")
pylab.savefig("LowEnTransient_NEventUpperLimit_E3_G1460_MultiDec")
fig1=pylab.figure()
pylab.plot(log_sigma_days,event_ul_avg,'k-',lw=2)
#pylab.plot(0.77011529478710161,13.5279,"w*",ms=20.0,label="Most Significant Flare")
#pylab.plot(log_sigma_days,disco_bdt0_e3_meansrc_events,'k-',lw=2,label="Discovery Potential")
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad$ (Days)')
pylab.ylabel("NSrc Events")
pylab.axis([-5,1,0,62])
pylab.grid()
pylab.legend(loc="upper left")
matplotlib.pyplot.gcf().subplots_adjust(right=.85)
pylab.title(r"Upper Limit $E^{-3}$ 90$\%$ C.L.")
pylab.savefig("LowEnTransient_NEventUpperLimit_E3_G1460_Avg.pdf")
figgy=pylab.figure()
ax = figgy.add_subplot(111)
pylab.plot(log_sigma_days,fluxnorm_sa_avg_e3,'k-',lw=2,label=r"$E^{-3.0}$")
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad$ (Days)')
pylab.ylabel(r"$\frac{dN}{dE}$ @ 100 GeV ($10^{-2}$GeV$^{-1}$ cm$^{-2}$)")
pylab.axis([-5,1,0.00,0.037483054073961818])
pylab.yticks([0.0060456538828970677,0.012091307765794135,0.018136961648691202,0.024182615531588271,0.030228269414485337,0.036273923297382403],["0.6","1.21","1.81","2.42","3.02","3.63"])
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
matplotlib.pyplot.gcf().subplots_adjust(right=.85)
pylab.grid()
#pylab.legend(loc="upper left")
pylab.title(r"Time-Integrated Flux Upper Limit $E^{-3}$")
pylab.savefig("LowEnTransient_FluxUpperLimit_E3_G1460_Avg.pdf")
figgy=pylab.figure()
ax = figgy.add_subplot(111)
pylab.plot(log_sigma_days,event_ul_avg,'k-',lw=2,label=r"$E^{-3.0}$")
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad$ (Days)')
pylab.ylabel("NSrc Events")
pylab.axis([-5,1,0.00,62])
pylab.yticks([ 0., 10., 20., 30., 40., 50., 60.])
pylab.grid()
ax2 = ax.twinx()
ax2.set_ylim(0,0.037483054073961818)
ax2.set_xlim(-5,1)
ax2.set_yticks([0.0060456538828970677,0.012091307765794135,0.018136961648691202,0.024182615531588271,0.030228269414485337,0.036273923297382403])
ax2.set_yticklabels(["0.6","1.21","1.81","2.42","3.02","3.63"])
ax2.set_ylabel(r"$\frac{dN}{dE}$ @ 100 GeV ($10^{-2}$GeV$^{-1}$ cm$^{-2}$)")
matplotlib.pyplot.gcf().subplots_adjust(right=.85)
#pylab.legend(loc="upper left")
pylab.title(r"Time-Integrated Flux Upper Limit $E^{-3}$")
pylab.savefig("LowEnTransient_FluxUpperLimit_E3_G1460_Avg_DoubleY.pdf")
figgy=pylab.figure()
ax = figgy.add_subplot(111)
pylab.plot(log_sigma_days,fluxnorm_dec0_e3,'k--',lw=2,label=r"$\delta = 0^{\circ}$")
pylab.plot(log_sigma_days,fluxnorm_dec16_e3,'k-',lw=2,label=r"$\delta = 16^{\circ}$")
pylab.plot(log_sigma_days,fluxnorm_dec30_e3,'k-.',lw=2,label=r"$\delta = 30^{\circ}$")
pylab.plot(log_sigma_days,fluxnorm_dec60_e3,'k:',lw=2,label=r"$\delta = 60^{\circ}$")
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad$ (Days)')
pylab.ylabel(r"$\frac{dN}{dE}$ @ 100 GeV ($10^{-2}$GeV$^{-1}$ cm$^{-2}$)")
pylab.axis([-5,1,0.00,0.058])
pylab.yticks([0.00 , 0.00828571, 0.01657143, 0.02485714, 0.03314286, 0.04142857, 0.04971429, 0.058],["0.0","0.83","1.7","2.5","3.3","4.1","5.0","5.8"])
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
matplotlib.pyplot.gcf().subplots_adjust(right=.85)
pylab.grid()
pylab.legend(loc="upper left")
pylab.title(r"Time-Integrated Flux Upper Limit $E^{-3}$")
pylab.savefig("LowEnTransient_FluxUpperLimit_E3_G1460_MultiDec.pdf")
'''
pylab.figure(figsize=(10,8))
pylab.plot(log_sigma_days,fluxnorm_0,'b-',lw=2,label='Sensitivity (90% C.L.)')
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad (Days)$')
pylab.ylabel(r"$\frac{dN}{dE}$ [$GeV^{-1} cm^{-2} s^{-1}$] @ 100 GeV Pivot Energy")
#pylab.axis([-5,1,5e3,5e4])
pylab.yticks([0.001,0.005,0.01,0.015,0.02,0.025],["$1e-3$","$5.0e-3$","$1.0e-2$","1.5e-2","2.0e-2","2.5e-2"])
pylab.grid()
pylab.legend(loc="upper left")
pylab.title(r"Flux Sensitivity (MergedSim) $E^{-3}$")
pylab.savefig("LowEnTransient_FluenceSensitivity_E3_MergedSim_FinalCut")
pylab.figure(figsize=(10,8))
pylab.plot(log_sigma_days,fluxnorm_0,'b-',lw=2,label='Sensitivity (90% C.L.)')
pylab.plot(log_sigma_days,fluxnorm_0_disco,'k-',lw=2,label='Discovery Potential')
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad (Days)$')
pylab.ylabel(r"$\frac{dN}{dE}$ [$GeV^{-1} cm^{-2} s^{-1}$] @ 100 GeV Pivot Energy")
#pylab.axis([-5,1,5e3,5e4])
pylab.yticks([0.001,0.005,0.01,0.015,0.02,0.025],["$1e-3$","$5.0e-3$","$1.0e-2$","1.5e-2","2.0e-2","2.5e-2"])
pylab.grid()
pylab.legend(loc="upper left")
pylab.title(r"Flux Sensitivity (MergedSim) $E^{-3}$")
pylab.savefig("LowEnTransient_FluenceSensitivityAndDisco_E3_MergedSim_FinalCut")
pylab.figure(figsize=(10,8))
pylab.plot(log_sigma_days,merged_samp1_e2_meansrc_events,'g-',lw=2,label='Sample 1')
pylab.plot(log_sigma_days,merged_samp2_e2_meansrc_events,'b-',lw=2,label='Sample 2')
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad (Days)$')
pylab.ylabel("NSrc Events")
pylab.axis([-6,1,3,15])
pylab.grid()
pylab.title("Sensitivity (MergedSim)")
pylab.legend(loc='upper left')
pylab.savefig("LowEnTransient_DiscoPotential_E2_MergedSim_SampleComparison")
pylab.figure(figsize=(10,8))
pylab.plot(log_sigma_days,nugen_samp1_e2_meansrc_events,'g--',lw=2,label='Sample 1 (Nugen)')
pylab.plot(log_sigma_days,nugen_samp2_e2_meansrc_events,'b--',lw=2,label='Sample 2 (Nugen)')
pylab.plot(log_sigma_days,merged_samp1_e2_meansrc_events,'g-',lw=2,label='Sample 1 (MergedSim)')
pylab.plot(log_sigma_days,merged_samp2_e2_meansrc_events,'b-',lw=2,label='Sample 2 (MergedSim)')
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad (Days)$')
pylab.ylabel("NSrc Events")
pylab.axis([-6,1,3,15])
pylab.grid()
pylab.title("Sensitivity")
pylab.legend(loc='upper left')
pylab.savefig("LowEnTransient_DiscoPotential_E2_NugenANDMerged_SampleComparison")
'''
|
daughjd/bashscripts
|
PaperPlotter.py
|
PaperPlotter.py
|
py
| 11,938 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6671408695
|
from netpyne import specs
def set_netParams(Nin, Pops, Exc_ThtoAll, Exc_AlltoAll, Inh_AlltoAll):
netParams = specs.NetParams() # object of class NetParams to store the network parameters
netParams.defaultThreshold = 0.0
## Cell parameters/rules
GenericCell = {'secs': {}}
GenericCell['secs']['soma'] = {'geom': {}, 'pointps': {}} # soma params dict
GenericCell['secs']['soma']['geom'] = {'diam': 6.366, 'L': 5.0, 'cm': 1.0} # Area of 100 um2 --> point process current in [mA/cm2]
GenericCell['secs']['soma']['pointps']['Izhi'] = { # soma Izhikevich properties
'mod': 'Izhi2007b_dyn_thr',
'C': 1,
'k': 0.04,
'vpeak': 10.0,
'celltype': 1}
netParams.cellParams['IzhiCell'] = GenericCell
# Population parameters - First, we define thalamic cells, then cortical. This impacts on individual gIDs read from Matlab connections
# Population corresponding to thalamic cells
netParams.popParams['artificial'] = {'cellModel': 'VecStim', 'numCells': Nin, 'spkTimes': [1]*Nin, 'xRange': [-0.01,0.01],'yRange': [0,0.01],'zRange': [-0.01,0.01]}
# Populations in cortex
for ntype in range(len(Pops)):
name = Pops[str(ntype+1)]['Label'] + '-' + Pops[str(ntype+1)]['Layer']
netParams.popParams[name] = {'cellType': 'IzhiCell', 'cellsList': Pops[str(ntype+1)]['list']}
# Defining a generic synapse
netParams.synMechParams['exc'] = {'mod': 'FluctExp2Syn', 'tau_rise': 1.0, 'tau_fall': 2.0, 'cn': 4.0, 'type': 1}
netParams.synMechParams['inh'] = {'mod': 'FluctExp2Syn', 'tau_rise': 1.0, 'tau_fall': 2.0, 'cn': 4.0, 'type': -1}
## Connections by "connList" uses IDs relative to the "preConds" and "postConds"
netParams.connParams['Thalamus->All_exc'] = {
'preConds': {'pop': 'artificial'}, # conditions of presyn cells
'postConds': {'cellType': 'IzhiCell'}, # conditions of postsyn cells
'connList': Exc_ThtoAll['connList_gID'], # list of conns
'weight': Exc_ThtoAll['weightList'], # synaptic weight
'delay': Exc_ThtoAll['delayList'], # transmission delay (ms)
'synMech': 'exc'}
netParams.connParams['All->All_exc'] = {
'preConds': {'cellType': 'IzhiCell'}, # conditions of presyn cells
'postConds': {'cellType': 'IzhiCell'}, # conditions of postsyn cells
'connList': Exc_AlltoAll['connList_gID'], # list of conns
'weight': Exc_AlltoAll['weightList'], # synaptic weight
'delay': Exc_AlltoAll['delayList'], # transmission delay (ms)
'synMech': 'exc'}
netParams.connParams['All->All_inh'] = {
'preConds': {'cellType': 'IzhiCell'}, # conditions of presyn cells
'postConds': {'cellType': 'IzhiCell'}, # conditions of postsyn cells
'connList': Inh_AlltoAll['connList_gID'], # list of conns
'weight': Inh_AlltoAll['weightList'], # synaptic weight
'delay': Inh_AlltoAll['delayList'], # transmission delay (ms)
'synMech': 'inh'}
return netParams
|
DepartmentofNeurophysiology/Cortical-representation-of-touch-in-silico-NetPyne
|
netParams.py
|
netParams.py
|
py
| 3,439 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10528777232
|
import pygame
from pygame.sprite import Sprite
class Tiro(Sprite):
"""Class para manipular os tiros disparados pela nave"""
def __init__(self, ik_game):
"""Cria um disparo na posição atual da nave"""
super().__init__()
self.screen = ik_game.screen
self.configuracoes = ik_game.configuracoes
self.cor = self.configuracoes.tiro_cor
# Cria um disparo rect na posição (0, 0) e reposiciona no local certo
self.rect = pygame.Rect(0, 0, self.configuracoes.tiro_width,
self.configuracoes.tiro_height)
self.rect.midtop = ik_game.nave.rect.midtop
# Armazena a posição do disparo como um decimal
self.y = float(self.rect.y)
def update(self):
"""Move o tiro para cima na tela"""
# Atualiza a posição decimal do disparo
self.y -= self.configuracoes.tiro_vel
# Atualiza a posição rect
self.rect.y = self.y
def draw_tiro(self):
"""Desenha o tiro na tela"""
pygame.draw.rect(self.screen, self.cor, self.rect)
|
ruansmachado/Invasao_Klingon
|
tiro.py
|
tiro.py
|
py
| 1,130 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
26345275028
|
dct = {}
while True:
inp = input()
if inp == "drop the media":
break
command = inp.split(" ")[0]
post_name = inp.split(" ")[1]
if command == "post":
dct[post_name] = {"Likes": 0, "Dislikes": 0, "Comments": {}}
elif command == "like":
dct[post_name]["Likes"] += 1
elif command == "dislike":
dct[post_name]["Dislikes"] += 1
elif command == "comment":
dct[post_name]["Comments"].update(
{inp.split(" ")[2]: inp.split(" ")[3:]}
)
for key, value in dct.items():
print("Post: {} | Likes: {} | Dislikes: {}\nComments:". format(
key, value['Likes'], value['Dislikes'])
)
if value["Comments"] == {}:
print("None")
else:
for x, y in value["Comments"].items():
print("* {}: {}".format(x, " ".join(y)))
|
YovchoGandjurov/Python-Fundamentals
|
02. Lists and Dictionaries/Dictionaries/05.Social_Media_Posts.py
|
05.Social_Media_Posts.py
|
py
| 856 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71548544188
|
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from collections import OrderedDict
from modules import CompactBasicBlock, BasicBlock, Bottleneck, DAPPM, segmenthead, GhostBottleneck
bn_mom = 0.1
BatchNorm2d = nn.BatchNorm2d
class CompactDualResNet(nn.Module):
def __init__(self, block, layers, num_classes=19, planes=64, spp_planes=128, head_planes=128, augment=True):
super(CompactDualResNet, self).__init__()
highres_planes = planes * 2
self.augment = augment
self.conv1 = nn.Sequential(
nn.Conv2d(3,planes,kernel_size=3, stride=2, padding=1),
#BatchNorm2d(planes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(planes,planes,kernel_size=3, stride=2, padding=1),
#BatchNorm2d(planes, momentum=bn_mom),
nn.ReLU(inplace=True),
)
self.relu = nn.ReLU(inplace=False)
self.layer1 = self._make_layer(block, planes, planes, layers[0])
self.layer2 = self._make_layer(block, planes, planes * 2, layers[1], stride=2)
self.layer3 = self._make_layer(block, planes * 2, planes * 4, layers[2], stride=2)
self.layer4 = self._make_layer(CompactBasicBlock, planes * 4, planes * 8, layers[3], stride=2)
self.compression3 = nn.Sequential(
nn.Conv2d(planes * 4, highres_planes, kernel_size=1, bias=False),
BatchNorm2d(highres_planes, momentum=bn_mom),
)
self.compression4 = nn.Sequential(
nn.Conv2d(planes * 8, highres_planes, kernel_size=1, bias=False),
BatchNorm2d(highres_planes, momentum=bn_mom),
)
self.down3 = nn.Sequential(
nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=False),
BatchNorm2d(planes * 4, momentum=bn_mom),
)
self.down4 = nn.Sequential(
nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=False),
BatchNorm2d(planes * 4, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(planes * 4, planes * 8, kernel_size=3, stride=2, padding=1, bias=False),
BatchNorm2d(planes * 8, momentum=bn_mom),
)
self.layer3_ = self._make_layer(block, planes * 2, highres_planes, 2)
self.layer4_ = self._make_layer(block, highres_planes, highres_planes, 2)
self.layer5_ = self._make_ghost_bottleneck(GhostBottleneck, highres_planes , highres_planes, 1)
self.layer5 = self._make_ghost_bottleneck(GhostBottleneck, planes * 8, planes * 8, 1, stride=2)
self.spp = DAPPM(planes * 16, spp_planes, planes * 4)
if self.augment:
self.seghead_extra = segmenthead(highres_planes, head_planes, num_classes)
self.final_layer = segmenthead(planes * 4, head_planes, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=bn_mom),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
if i == (blocks-1):
layers.append(block(inplanes, planes, stride=1, no_relu=True))
else:
layers.append(block(inplanes, planes, stride=1, no_relu=False))
return nn.Sequential(*layers)
def _make_divisible(self, v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _make_ghost_bottleneck(self, block, inplanes, planes, blocks, stride=1):
if stride != 1 or inplanes != planes * 2:
out_channel = planes * 2
else:
out_channel = planes
cfg = [[3, 96, out_channel, 0, 1]] # k, t, c, SE, s
input_channel = inplanes
layers = []
for k, exp_size, c, se_ratio, s in cfg:
output_channel = c
hidden_channel = self._make_divisible(exp_size, 4)
layers.append(block(input_channel, hidden_channel, output_channel, k, s, se_ratio=se_ratio))
input_channel = output_channel
return nn.Sequential(*layers)
def forward(self, x):
width_output = x.shape[-1] // 8
height_output = x.shape[-2] // 8
layers = []
x = self.conv1(x)
x = self.layer1(x)
layers.append(x)
x = self.layer2(self.relu(x))
layers.append(x)
x = self.layer3(self.relu(x))
layers.append(x)
x_ = self.layer3_(self.relu(layers[1]))
x = x + self.down3(self.relu(x_))
x_ = x_ + F.interpolate(
self.compression3(self.relu(layers[2])),
size=[height_output, width_output],
mode='bilinear')
if self.augment:
temp = x_
x = self.layer4(self.relu(x))
layers.append(x)
x_ = self.layer4_(self.relu(x_))
x = x + self.down4(self.relu(x_))
x_ = x_ + F.interpolate(
self.compression4(self.relu(layers[3])),
size=[height_output, width_output],
mode='bilinear')
x_ = self.layer5_(self.relu(x_))
x = F.interpolate(
self.spp(self.layer5(self.relu(x))),
size=[height_output, width_output],
mode='bilinear')
x_ = self.final_layer(x + x_)
if self.augment:
x_extra = self.seghead_extra(temp)
return [x_extra, x_]
else:
return x_
def get_seg_model(cfg, **kwargs):
model = CompactDualResNet(BasicBlock, [2, 2, 2, 2], num_classes=19, planes=32, spp_planes=128, head_planes=64, augment=True)
return model
if __name__ == '__main__':
import time
device = torch.device('cuda')
#torch.backends.cudnn.enabled = True
#torch.backends.cudnn.benchmark = True
model = CompactDualResNet(BasicBlock, [2, 2, 2, 2], num_classes=11, planes=32, spp_planes=128, head_planes=64)
model.eval()
model.to(device)
iterations = None
#input = torch.randn(1, 3, 1024, 2048).cuda()
input = torch.randn(1, 3, 720, 960).cuda()
with torch.no_grad():
for _ in range(10):
model(input)
if iterations is None:
elapsed_time = 0
iterations = 100
while elapsed_time < 1:
torch.cuda.synchronize()
torch.cuda.synchronize()
t_start = time.time()
for _ in range(iterations):
model(input)
torch.cuda.synchronize()
torch.cuda.synchronize()
elapsed_time = time.time() - t_start
iterations *= 2
FPS = iterations / elapsed_time
iterations = int(FPS * 6)
print('=========Speed Testing=========')
torch.cuda.synchronize()
torch.cuda.synchronize()
t_start = time.time()
for _ in range(iterations):
model(input)
torch.cuda.synchronize()
torch.cuda.synchronize()
elapsed_time = time.time() - t_start
latency = elapsed_time / iterations * 1000
torch.cuda.empty_cache()
FPS = 1000 / latency
print(FPS)
|
himlen1990/cddrnet
|
utils/speed_test/cddrnet_eval_speed.py
|
cddrnet_eval_speed.py
|
py
| 8,667 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9637017975
|
from selenium import webdriver
from selenium.webdriver.edge.service import Service
from selenium.webdriver.common.by import By
from time import sleep
class InternetSpeed:
def __init__(self, edge_driver_path):
self.driver = webdriver.Edge(service=Service(edge_driver_path))
self.down = 0
self.up = 0
self.get_internet_speed()
def get_internet_speed(self):
speedtest_url = "https://www.speedtest.net/"
self.driver.get(speedtest_url)
sleep(10)
start_test = self.driver.find_element(by=By.XPATH,
value='//*[@id="container"]/div/div[3]/div/div/div/div[2]/div[3]/div[1]/a')
start_test.click()
sleep(60)
self.down = self.driver.find_element(by=By.XPATH,
value='//*[@id="container"]/div/div[3]/div/div/div/div[2]/div[3]/div[3]/div/div[3]/div/div/div[2]/div[1]/div[2]/div/div[2]/span').text
self.up = self.driver.find_element(by=By.XPATH,
value='//*[@id="container"]/div/div[3]/div/div/div/div[2]/div[3]/div[3]/div/div[3]/div/div/div[2]/div[1]/div[3]/div/div[2]/span').text
print(self.down)
print(self.up)
self.driver.quit()
|
na-lin/100-days-of-Python
|
Day51_Internet-Speed-Twitter-Complaint-Bot/internet_speed.py
|
internet_speed.py
|
py
| 1,309 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74658795066
|
from lindertree.lsystem import *
from lindertree.turtle_interprate import *
axiom = string_to_symbols('!(1)F(5)X')
constants = {'w':1.4, 'e':1.6, 'a':1.1}
width_rule = Rule.from_string('!(x)', '!(x*w)', constants)
elongation_rule = Rule.from_string('F(x)', 'F(x*e)', constants)
angle_rule1 = Rule.from_string('+(x)', '+(x*a)', constants)
angle_rule2 = Rule.from_string('-(x)', '-(x*a)', constants)
branching_rule = Rule.from_string('X', '!(1)[+(25)F(2)X]F(2)[-(25)F(2)X]!(1)F(5)X', constants)
rules = [width_rule, elongation_rule, branching_rule, angle_rule1, angle_rule2]
print('Axiom : ' + symbols_to_string(axiom))
print('Rules : ')
for rule in rules:
print('- ' + str(rule))
symbols = generate_lsystem(8, axiom, rules)
print(symbols_to_string(symbols))
turtle_interprate(symbols, init_pos=(0,-400))
|
valentinlageard/lindertree
|
example_parametric.py
|
example_parametric.py
|
py
| 807 |
python
|
en
|
code
| 1 |
github-code
|
6
|
28039146623
|
#! /usr/bin/env python3
__author__ = 'Amirhossein Kargaran 9429523 '
import os
import sys
import socket
import pickle
import select
import signal
import threading
import time
from threading import Thread
from datetime import datetime
# Local modules
from APIs.logging import Log
from APIs.logging import Color
from APIs.security import *
from Crypto.Random import random
from filelock import FileLock
file_path = "result.txt"
lock_path = "result.txt.lock"
lock = FileLock(lock_path, timeout=1)
# Declare Global variables
PORT = 5558
TERMINATE = False
CLI_HASH = {}
KEY = ''
ll = list()
class Server():
def __init__(self):
self.HOST_IP = '0.0.0.0'
self.HOST_PORT = '8081'
self.MAX_USR_ACCPT = '100'
def show_help(self):
msg = '''
AVAILABLE COMMANDS:
\h Print these information
\d Set default configuration
\sd Show default configuration
\sc Show current configuration
\sau Show active users
\sac Show active chat rooms
\sf Shutdown server forcefully
\monitor Enables monitor mode'''
print(msg)
def show_config(self, type_='default'):
if type_ in ('active', 'ACTIVE'):
msg = '''
Active configuration of the server :
HOST IP = ''' + self.HOST_IP + '''
HOST PORT = ''' + self.HOST_PORT + '''
MAX USER ALLOWED = ''' + self.MAX_USR_ACCPT
logging.log('Showing Active server configuration')
print(msg)
else:
msg = '''
Default configuration of the server:
HOST IP = 0.0.0.0
HOST PORT = 8081
MAX USER ALLOWED = 100'''
print(msg)
def set_usr_config(self, parameters):
if parameters:
if sys.argv[1] in ('-h', '--help'):
self.show_help()
try:
self.HOST_IP = sys.argv[1]
self.HOST_PORT = sys.argv[2]
self.MAX_USR_ACCPT = sys.argv[3]
except:
print('USAGE:\nscript ip_address port_number max_usr_accpt')
sys.exit(0)
else:
self.HOST_IP = input('Enter host IP : ')
self.HOST_PORT = input('Enter host PORT : ')
self.MAX_USR_ACCPT = input('Enter max number of users server would accept : ')
def update_active_users(self):
self.user_list = []
for cli_obj in CLI_HASH.values():
self.user_list.append(cli_obj.userName)
def signal_handler(self, signal, frame):
print(' has been pressed.\n')
def srv_prompt(self):
# TODO: Add feature to view server socket status
global TERMINATE
while True:
opt = input(Color.PURPLE + '\nenter command $ ' + Color.ENDC)
if opt == '\h':
self.show_help()
elif opt == '\monitor':
print('Monitoring mode ENABLED!')
logging.silent_flag = False
signal.signal(signal.SIGINT, self.signal_handler)
signal.pause()
print('Monitoring mode DISABLED')
logging.silent_flag = True
elif opt == '\sd':
self.show_config(type_='default')
elif opt == '\sc':
self.show_config(type_='active')
elif opt == '\sau':
self.update_active_users()
logging.log(self.user_list)
print(self.user_list)
elif opt == '\sf':
print(Color.WARNING +
'WARNING: All users will be disconnected with out any notification!!' +
Color.ENDC)
opt = input('Do you really want to close server?[Y/N] ')
if opt == 'Y':
logging.log('Shuting down server...')
print('Shuting down server...')
TERMINATE = True
sys.exit(0)
else:
logging.log('Aborted.')
print('Aborted.')
pass
elif opt == '':
pass
else:
print('COMMAND NOT FOUND!!')
def init_clients(self):
global CLI_HASH
while not TERMINATE:
try:
self.server.settimeout(1)
conn, addr = self.server.accept()
except socket.timeout:
pass
except Exception as e:
raise e
else:
logging.log(
'A connection from [{}.{}] has been received.'.format(
addr[0], addr[1]))
cli_obj = Client(conn, addr, self)
CLI_HASH[conn] = cli_obj
threading._start_new_thread(cli_obj.run, ('',))
try:
print('Server has stopped listening on opened socket.')
print('Broadcasting connection termination signal..')
msg = "Sorry! We are unable to serve at this moment."
for cli_socket in CLI_HASH.keys():
try:
cli_socket.send(msg.encode())
except:
cli_socket.close()
CLI_HASH.pop(cli_socket)
except:
pass
def init(self):
logging.log('Initializing server')
if len(sys.argv) == 1:
self.show_config(type_='default')
opt = input('Set these default config?[Y/n] ')
if opt == '':
opt = 'Y'
if opt in ('Y', 'y', 'yes', 'Yes', 'YES'):
print("Setting up default configurations...")
else:
self.set_usr_config(parameters=False)
else:
self.set_usr_config(parameters=True)
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
self.server.bind((self.HOST_IP, int(self.HOST_PORT)))
self.server.listen(int(self.MAX_USR_ACCPT))
except:
print('Unable to bind HOST IP and PORT.\nPlease check your configuration')
sys.exit('EMERGENCY')
print('\nServer is listening at {}:{}'.format(self.HOST_IP, self.HOST_PORT))
print('Server is configured to accept %s clients.' %(str(self.MAX_USR_ACCPT)))
#thread_srv = threading.Thread(target=self.srv_prompt, args=())
thread_cli = threading.Thread(target=self.init_clients, args=())
thread_cli.start()
self.srv_prompt()
for thread in (thread_srv, thread_cli):
thread.join()
print('Server and Client threads are exited.')
class Client():
def __init__(self, conn, addr, srv_obj):
global PORT
self.srv_obj = srv_obj
self.conn = conn
self.addr = addr
self.userName = '-N/A-'
self.PUBLIC_KEY = None
self.KEY = ''
self.items_file='result.txt'
self.port = PORT
PORT = PORT +1
self.EnSharedKey =""
def validate_user(self):
pass
def features(self, msg):
if msg == '@getonline':
self._loop_break_flag = True
self.conn.send(
AES_.encrypt(self.KEY, str(self.srv_obj.user_list)))
if msg.split()[0][1:] in self.srv_obj.user_list:
self._loop_break_flag = True
for _conn in CLI_HASH:
if CLI_HASH[_conn].userName == msg.split()[0][1:]:
try:
self.IND_SOCK = _conn
msg_send = "<" + self.userName + "@" + self.addr[0] +\
"> [IND] " + ' '.join(msg.split()[1:])
self.broadcast(msg_send, IND_FLAG=True)
except Exception as e:
logging.log(msg_type='EXCEPTION', msg=e)
def getSharedKey(self):
TOKEN_CHAR_LIST = "abcdefghij!@#$%"
# Generate unique symmetric 10bit key for each client
passphrase = ''.join(random.sample(TOKEN_CHAR_LIST, 10))
shared_key = hasher(passphrase)
EnSharedKey = RSA_.encrypt(self.PUBLIC_KEY, shared_key)
if EnSharedKey:
return (shared_key, EnSharedKey)
else:
logging.log("Unable to encrypt shared key with RSA.", msg_type='ERROR')
def result(self , *args):
file = open(self.items_file,"r")
fileList = file.readlines()
file.close()
self.broadcast(fileList)
def time1 (self):
self.sock.listen(1)
flag = 1
try :
while True:
print('waiting for a connection')
connection, client_address = self.sock.accept()
try:
print('connection from', client_address)
while True:
data = connection.recv(64)
if flag == 1 :
self.Token, self.STRTOKEN = pickle.loads(data)
if data:
if (self.Token == self.KEY and self.STRTOKEN=="TOKEN") :
print("This user is Valid")
flag = 0
else:
print("This user is not Valid")
connection.close()
return
else :
if data.decode()=="bye" :
try:
with lock.acquire(timeout=10):
wfile = open(self.items_file, 'w+')
for ilist in ll:
wfile.write(str(ilist) + "\n")
wfile.close()
lock.release()
except :
print("Another instance of this application currently holds the lock.")
if data :
print(str(self.userName)+ " : " + str(data.decode()))
ll.append(str(self.userName)+ " : " + str(data.decode()))
else:
return
finally:
connection.close()
except :
"what the fuck ?"
def time2 (self):
while True:
try:
self._loop_break_flag = False
msg = self.conn.recv(20000)
if msg:
if msg.split()[0][0] == '@':
self.srv_obj.update_active_users()
self.features(msg)
if not self._loop_break_flag:
self.result()
else:
self.remove()
pass
except Exception as e:
logging.log(msg_type='EXCEPTION', msg='[{}] {}'.format(self.userName, e))
def run(self, *args):
data = self.conn.recv(4000)
if data:
self.userName, self.PUBLIC_KEY = pickle.loads(data)
if self.PUBLIC_KEY:
self.KEY, self.EnSharedKey = self.getSharedKey()
else:
tmp_conn = "{}:{}".format(self.addr[0], self.addr[1])
logging.log(
"Public key has not been received from [{}@{}]".format(
self.userName, tmp_conn))
logging.log(
"[0.0.0.0:8081 --> {}] Socket has been terminated ".format(tmp_conn))
self.remove()
if self.KEY == '':
logging.log("Symmetric key generation failed")
tmp_msg = "symmetric key {} has been sent to {}".format(self.KEY, self.userName)
logging.log(tmp_msg)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_hostname = socket.gethostname()
local_fqdn = socket.getfqdn()
ip_address = socket.gethostbyname(local_hostname)
print("working on %s (%s) with %s" % (local_hostname, local_fqdn, ip_address))
server_address = (ip_address, self.port)
print('starting up on %s port %s' % server_address)
self.sock.bind(server_address)
EnSharedKey = (self.port , self.EnSharedKey)
EnSharedKey = pickle.dumps(EnSharedKey)
self.conn.send(EnSharedKey)
Thread(target=self.time1()).start()
Thread(target=self.time2()).start()
def broadcast(self, msg, IND_FLAG=False):
msg = pickle.dumps(msg)
if IND_FLAG:
self.IND_SOCK.send(msg)
return
for cli_socket in CLI_HASH.keys():
if 1==1 :
try:
cli_socket.send(msg)
except:
raise Exception
cli_socket.close()
self.remove()
def remove(self):
if self.conn in CLI_HASH.keys():
self.conn.close()
CLI_HASH.pop(self.conn)
self.srv_obj.update_active_users()
print(self.srv_obj.user_list)
sys.exit()
if __name__ == "__main__":
try:
logging = Log(f_name='server_chatroom_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
logging.logging_flag = True
logging.silent_flag = True
logging.validate_file()
server = Server()
server.init()
except SystemExit as e:
if e.code != 'EMERGENCY':
raise
else:
print(sys.exc_info())
print('Something went wrong!!\nPlease contact developers.')
os._exit(1)
except:
raise Exception
print('Something went wrong!!\nPlease contact developers\nTerminating the process forcefully..')
time.sleep(1)
os._exit(1)
|
kargaranamir/Operating-Systems
|
Project II/Code/chatServer.py
|
chatServer.py
|
py
| 14,141 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9754918030
|
import click
import unittest
from click.testing import CliRunner
from doodledashboard.notifications import TextNotification
from parameterized import parameterized
from sketchingdev.console import ConsoleDisplay
from tests.sketchingdev.terminal.ascii_terminal import AsciiTerminal
class TestConsoleDisplayWithText(unittest.TestCase):
@parameterized.expand([
((1, 1), "",
"""
+-+
||
+-+
"""),
((10, 3), "a",
"""
+----------+
||
| a|
||
+----------+
"""),
((10, 3), "centred",
"""
+----------+
||
| centred|
||
+----------+
"""),
((10, 3), "I'm centred",
"""
+----------+
| I'm|
| centred|
||
+----------+
"""),
((10, 3), "Hello World! This is too long",
"""
+----------+
| Hello|
| World!|
| This is|
+----------+
"""),
])
def test_text_centred_in_console(self, console_size, input_text, expected_ascii_terminal):
expected_terminal = AsciiTerminal.extract_text(expected_ascii_terminal)
text_notification = TextNotification()
text_notification.set_text(input_text)
cmd = create_cmd(lambda: ConsoleDisplay(console_size).draw(text_notification))
result = CliRunner().invoke(cmd, catch_exceptions=False)
self.assertEqual(expected_terminal, result.output)
def create_cmd(func):
@click.command()
def c(f=func):
f()
return c
if __name__ == "__main__":
unittest.main()
|
SketchingDev/Doodle-Dashboard-Display-Console
|
tests/sketchingdev/test_text_notification.py
|
test_text_notification.py
|
py
| 1,699 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29050546230
|
from etl import ETL
import os
DATASET_PATH = "/home/login/datasets"
DATASET_NAME = "CIMA"
DATASET_SIZE = 377
validation_size = 0.2
validation_size = int(DATASET_SIZE * validation_size)
validation_etl = ETL("/home/login/datasets", [], size=validation_size)
validation_etl.load(DATASET_NAME)
validation_path = os.path.join(DATASET_PATH, DATASET_NAME, "validation")
if not os.path.exists(validation_path):
os.mkdir(validation_path)
data_path = os.path.join(DATASET_PATH, DATASET_NAME, "data")
validation_set = validation_etl.cima
for key, item in validation_set.items():
print(f"Moving {key} to validation.")
old_path = os.path.join(data_path, key + ".csv")
new_path = os.path.join(validation_path, key + ".csv")
os.rename(old_path, new_path)
|
eskarpnes/anomove
|
etl/validation_split.py
|
validation_split.py
|
py
| 765 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28558999835
|
from helper import is_prime, find_prime_factors, int_list_product
def smallest_multiple(n):
ls = list()
for i in range(2,n):
pf = find_prime_factors(i)
for l in ls:
for f in pf:
if(l == f):
pf.remove(f)
break
for f in pf:
ls.append(f)
ls.sort()
return int_list_product(ls)
print(str(smallest_multiple(20)))
|
thejefftrent/ProjectEuler.py
|
5.py
|
5.py
|
py
| 436 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27070910668
|
import datetime as dt
import random
import pytest
from scheduler import Scheduler, SchedulerError
from scheduler.base.definition import JobType
from scheduler.threading.job import Job
from ...helpers import foo
@pytest.mark.parametrize(
"empty_set",
[
False,
True,
],
)
@pytest.mark.parametrize(
"any_tag",
[
None,
False,
True,
],
)
@pytest.mark.parametrize(
"n_jobs",
[
0,
1,
2,
3,
10,
],
)
def test_get_all_jobs(n_jobs, any_tag, empty_set):
sch = Scheduler()
assert len(sch.jobs) == 0
for _ in range(n_jobs):
sch.once(dt.datetime.now(), foo)
assert len(sch.jobs) == n_jobs
if empty_set:
if any_tag is None:
jobs = sch.get_jobs()
else:
jobs = sch.get_jobs(any_tag=any_tag)
else:
if any_tag is None:
jobs = sch.get_jobs(tags={})
else:
jobs = sch.get_jobs(tags={}, any_tag=any_tag)
assert len(jobs) == n_jobs
@pytest.mark.parametrize(
"job_tags, select_tags, any_tag, returned",
[
[
[{"a", "b"}, {"1", "2", "3"}, {"a", "1"}],
{"a", "1"},
True,
[True, True, True],
],
[
[{"a", "b"}, {"1", "2", "3"}, {"a", "2"}],
{"b", "1"},
True,
[True, True, False],
],
[
[{"a", "b"}, {"1", "2", "3"}, {"b", "1"}],
{"3"},
True,
[False, True, False],
],
[
[{"a", "b"}, {"1", "2", "3"}, {"b", "2"}],
{"2", "3"},
True,
[False, True, True],
],
[
[{"a", "b"}, {"1", "2", "3"}, {"a", "1"}],
{"a", "1"},
False,
[False, False, True],
],
[
[{"a", "b"}, {"1", "2", "3"}, {"a", "2"}],
{"b", "1"},
False,
[False, False, False],
],
[
[{"a", "b"}, {"1", "2", "3"}, {"b", "1"}],
{"1", "3"},
False,
[False, True, False],
],
[
[{"a", "b"}, {"1", "2", "3"}, {"b", "2"}],
{"2", "3"},
False,
[False, True, False],
],
],
)
def test_get_tagged_jobs(job_tags, select_tags, any_tag, returned):
sch = Scheduler()
jobs = [sch.once(dt.timedelta(), lambda: None, tags=tags) for tags in job_tags]
res = sch.get_jobs(tags=select_tags, any_tag=any_tag)
for job, ret in zip(jobs, returned):
if ret:
assert job in res
else:
assert job not in res
|
DigonIO/scheduler
|
tests/threading/scheduler/test_sch_get_jobs.py
|
test_sch_get_jobs.py
|
py
| 2,720 |
python
|
en
|
code
| 51 |
github-code
|
6
|
25476219530
|
import random
suits = ("Hearts", "Spades", "Diamonds", "Clubs")
tarotSuits = ("Swords", "Cups", "Wands", "Coins")
names = ("Ace", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten", "Jack", "Queen", "King")
tarotNames = ("Ace", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten",
"Page", "Knight", "Queen", "King")
arcana = ('The Fool', 'The Magician', 'The High Priestess', 'The Empress', 'The Emperor',
'The Hierophant', 'The Lovers', 'The Chariot', 'Strength', 'The Hermit', 'Wheel of Fortune',
'Justice', 'The Hanged Man', 'Death', 'Temperance', 'The Devil', 'The Tower', 'The Star',
'The Moon', 'The Sun', 'Judgement', 'The World')
unoColors = ('Red', 'Green', 'Blue', 'Yellow')
unoSpecials = ('Skip', 'Reverse', 'Draw')
# TODO:
# Give cards emoji representations
# Implement into bot
class Card:
def get_value(self):
if self.tarot:
if self.suit == "Major":
return arcana.index(self.name)
else:
return min(tarotNames.index(self.name) + 1, 10)
if self.uno:
return
return min(names.index(self.name) + 1, 10)
def __init__(self, suit, name, hidden=False, tarot=False, uno=False):
self.suit = str(suit)
self.name = str(name)
self.hidden = hidden
self.tarot = tarot
self.uno = uno
self.value = self.get_value()
def __str__(self):
if self.suit == "Major":
return self.name
if self.uno:
return f'{self.suit} {self.name}'
return f'{self.name} of {self.suit}'
class Deck:
def __init__(self, hand=False, tarot=False, uno=False):
self.hand = hand
self.tarot = tarot
self.uno = uno
self.stack = []
if self.hand:
return
if self.tarot:
for suit in tarotSuits:
for name in tarotNames:
self.stack.append(Card(suit, name, tarot=True))
for name in arcana:
self.stack.append(Card("Major", name, tarot=True))
return
if self.uno: # Does not work
for color in unoColors:
for num in range(1, 10):
self.stack.append(Card(color, num, uno=True))
self.stack.append(Card(color, num, uno=True))
for special in unoSpecials:
self.stack.append(Card(color, special, uno=True))
self.stack.append(Card(color, special, uno=True))
for num in range(0, 4):
self.stack.append(Card('Wild', '', uno=True))
self.stack.append(Card('Wild', 'Draw', uno=True))
return
for suit in suits:
for name in names:
self.stack.append(Card(suit, name))
return
def list(self, hidden=True):
contents = ""
for card in self.stack:
if card.hidden and hidden:
contents += f"*Hidden card,* "
contents += f"{card}, "
return contents
def shuffle(self):
random.shuffle(self.stack)
def draw(self, pos=0, hidden=False):
dealt = self.stack.pop(pos)
if hidden:
dealt.hidden = True
return dealt
def insert(self, card, pos=0, bottom=False):
if bottom:
self.stack.append(card)
else:
self.stack.insert(pos, card)
|
Malbrett/Nettlebot
|
cards.py
|
cards.py
|
py
| 3,490 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35035790893
|
import csv
import json
import numpy as np
from tabulate import tabulate
import matplotlib.pyplot as plt
from math import ceil
from wand.image import Image as WImage
from subprocess import Popen
def make_json(csvFilePath,keyName,alldata):
# create a dictionary
data = {}
# Open a csv reader called DictReader
with open(csvFilePath, encoding='utf-8') as csvf:
next(csvf)
csvReader = csv.DictReader(csvf, delimiter='\t')
# Convert each row into a dictionary
# and add it to data
for rows in csvReader:
# Assuming a column named 'No' to
# be the primary key
key = rows['CATEGORY']
data[key] = rows
alldata[keyName] = data
jsonfile = json.dumps(alldata)
return jsonfile
def plots(Sample,file,normal,listSample):
#listSample = [row[1] for row in batch]
rows = []
path = "/storage/gluster/vol1/data/PUBLIC/SCAMBIO/ABT414_WES_Analysis/ABT414_Flank/ABT414_Flank/"
if Sample == 'ALL' and not(normal):
ROWS = 3
COLS = ceil(np.size(listSample)/ROWS)
fig = plt.figure(figsize = (20, 15))
for row in range(ROWS):
cols = []
for col in range(COLS):
index = row * COLS + col
if index<np.size(listSample):
img = WImage(filename=path+listSample[index]+file)
a = fig.add_subplot(COLS, ROWS, index+1)
plt.axis('off')
plt.grid(b=None)
imgplot = plt.imshow(img)
a.set_title(listSample[index])
else:
fig = plt.figure(figsize = (15, 10))
a = fig.add_subplot(1, 1, 1)
if not(normal):
index = listSample.index(Sample)
img = WImage(filename=path+listSample[index]+file)
a.set_title(listSample[index])
else:
img = WImage(filename=path+Sample+file)
imgplot = plt.imshow(img)
plt.axis('off')
plt.grid(b=None)
imgplot = plt.imshow(img)
def multiPage(Sample,file,page,normal,listSample):
page = page-1
#listSample = [row[1] for row in batch]
path = "/storage/gluster/vol1/data/PUBLIC/SCAMBIO/ABT414_WES_Analysis/ABT414_Flank/ABT414_Flank/"
fig = plt.figure(figsize = (20, 15))
a = fig.add_subplot(1, 1, 1)
if not(normal):
index = listSample.index(Sample)
img = WImage(filename=path+listSample[index]+file+"["+str(page)+"]")
a.set_title(listSample[index])
else:
img = WImage(filename=path+Sample+file+"["+str(page)+"]")
imgplot = plt.imshow(img)
plt.axis('off')
plt.grid(b=None)
imgplot = plt.imshow(img)
def tableShow(Sample,file, cols,listSample):
path = "/storage/gluster/vol1/data/PUBLIC/SCAMBIO/ABT414_WES_Analysis/ABT414_Flank/ABT414_Flank/"
if Sample == 'ALL':
for index in range(np.size(listSample)):
print('\n'+listSample[index]+'\n')
table = []
filePath = path+listSample[index]+file
with open (filePath, 'r') as f:
for row in csv.reader(f,delimiter='\t'):
if np.size(row)>1:
content = [row[i] for i in cols]
table.append(content)
print(tabulate(table,headers="firstrow"))
else:
print(Sample+'\n')
table = []
filePath = path+Sample+file
with open (filePath, 'r') as f:
for row in csv.reader(f,delimiter='\t'):
if np.size(row)>1:
content = [row[i] for i in cols]
table.append(content)
print(tabulate(table,headers="firstrow"))
def commandsParallel(commands,commdsSize,commdsParallel):
if commdsParallel>commdsSize:
commdsParallel = commdsSize
print ("Numbers of samples in parallel: "+ str(commdsParallel))
itersPar = ceil(commdsSize/commdsParallel)
print("Numbers of iterations: "+ str(itersPar))
for i in range(itersPar):
try:
processes = [Popen(commands[(i*commdsParallel)+j], shell=True) for j in range(commdsParallel)]
except IndexError:
pass
exitcodes = [p.wait() for p in processes]
|
miccec/ExomePipeline
|
interactPlots.py
|
interactPlots.py
|
py
| 4,422 |
python
|
en
|
code
| 0 |
github-code
|
6
|
968977222
|
import pyodbc
import pandas as pd
# Connection steps to the server
from OnlineBankingPortalCSV2_code import Accounts, Customer
server = 'LAPTOP-SELQSNPH'
database = 'sai'
username = 'maram'
password = 'dima2k21'
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';UID='+username+';PWD='+ password)
cursor = cnxn.cursor()
# import data from csv
data = pd.read_csv (r'C:\Users\maram\PycharmProjects\pythonProject\OnlineBankingPortal_data_file3.csv')
# Transactions table
Transactions = pd.DataFrame(data, columns = ['Transaction_id','Acc_number','Transaction_type_code','Transaction_type_desc','Transaction_date','Card_number'])
Transactions = Transactions.astype('str')
Transactions['Transaction_id']=Transactions.groupby(['Transaction_date','Card_number'],sort=False).ngroup()+300
# Merge data inorder to get the required Id's
Merge_Transactions_Accounts=pd.merge(Transactions,Accounts,on='Acc_number')
Transactions['Account_id']=Merge_Transactions_Accounts.Account_id
Transactions['Customer_id']=Merge_Transactions_Accounts.Customer_id
print(Transactions)
Transactions['Transaction_date'] = Transactions['Transaction_date'].astype('datetime64[ns]')
# Cards table
Cards = pd.DataFrame(data, columns = ['Acc_number','Card_id','Card_number','Maximum_limit','Expiry_Date','Credit_score'])
Cards = Cards.astype('str')
Cards['Expiry_Date']= Cards['Expiry_Date'].astype('datetime64[ns]')
# Merge data inorder to get the required Id's
Merge_Cards_Accounts=pd.merge(Cards,Accounts,on='Acc_number')
Cards['Customer_id']=Merge_Cards_Accounts.Customer_id
Cards = Cards[Cards.Card_number != 'nan']
Cards['Card_id'] = Cards.groupby(['Card_number'],sort=False).ngroup()+400
Cards = Cards.drop_duplicates(subset=None, keep="first", inplace=False)
# Convert Credit score and Maximum limit from string->float->int
Cards['Credit_score']=Cards['Credit_score'].astype(float)
Cards['Credit_score']=Cards['Credit_score'].astype(int)
Cards['Maximum_limit']=Cards['Maximum_limit'].astype(float)
Cards['Maximum_limit']=Cards['Maximum_limit'].astype(int)
print(Cards)
# Transaction_details Table
Transaction_details = pd.DataFrame(data, columns = ['Transaction_Amount','Merchant_details','Acc_number','Transaction_date'])
Transaction_details = Transaction_details.astype('str')
# Merge data inorder to get the required Id's
Merge_Transaction_details_Transactions=pd.concat([Transactions,Transaction_details], ignore_index=True)
Transaction_details['Transaction_id']=Merge_Transaction_details_Transactions.Transaction_id
# Convert Transaction_id from string->float->int
Transaction_details['Transaction_id']=Transaction_details['Transaction_id'].astype(float)
Transaction_details['Transaction_id']=Transaction_details['Transaction_id'].astype(int)
print(Transaction_details)
# inserting data into tables
for row in Transactions.itertuples():
cursor.execute('''
INSERT INTO Transactions (Customer_id,Account_id,Acc_number,Transaction_type_code,Transaction_type_desc,Transaction_date)
VALUES (?,?,?,?,?,?)
''',
row.Customer_id,
row.Account_id,
row.Acc_number,
row.Transaction_type_code,
row.Transaction_type_desc,
row.Transaction_date,
)
for row in Cards.itertuples():
cursor.execute('''
INSERT INTO Cards (Customer_id,Acc_number,Card_number,Maximum_limit,Expiry_Date,Credit_score)
VALUES (?,?,?,?,?,?)
''',
row.Customer_id,
row.Acc_number,
row.Card_number,
row.Maximum_limit,
row.Expiry_Date,
row.Credit_score
)
for row in Transaction_details.itertuples():
cursor.execute('''
INSERT INTO Transaction_details (Transaction_id,Transaction_Amount,Merchant_details,Acc_number)
VALUES (?,?,?,?)
''',
row.Transaction_id,
row.Transaction_Amount,
row.Merchant_details,
row.Acc_number
)
cnxn.commit()
|
divyamaram/Database-Managment-systems
|
OnlineBankingPortalCSV3_code.py
|
OnlineBankingPortalCSV3_code.py
|
py
| 4,255 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10480951725
|
accounts = [[1,2,3],[2,3,4],[10,12]]
c = []
n = 0
for i in accounts:
n = 0
for j in i:
n = j + n
c.append(n)
print(c)
c.sort(reverse=True)
a = c
print(a)
print(a[0])
|
SmolinIvan/Ivan_Project
|
Training/leetcode/sample2.py
|
sample2.py
|
py
| 202 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31932908131
|
from pyspark.ml.classification import NaiveBayes
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
spark.sparkContext.setLogLevel("ERROR")
data = spark.read.format("libsvm").load("file:///usr/lib/spark/data/mllib/sample_libsvm_data.txt")
splits = data.randomSplit([0.6, 0.4], 1234)
train = splits[0]
test = splits[1]
nb = NaiveBayes(smoothing=1.0, modelType="multinomial")
model = nb.fit(train)
predictions = model.transform(test)
predictions.show()
evaluator = MulticlassClassificationEvaluator(labelCol="label", predictionCol="prediction",metricName="accuracy")
accuracy = evaluator.evaluate(predictions)
print("Test set accuracy = " + str(accuracy))
spark.stop()
|
geoffreylink/Projects
|
07 Machine Learning/SparkML/sparkML_CL_naivebayes.py
|
sparkML_CL_naivebayes.py
|
py
| 789 |
python
|
en
|
code
| 9 |
github-code
|
6
|
41466182049
|
num=int(input('Enter a Number : '))
copy=num
count=len(str(num))
add=0
while(num!=0):
rem=num%10
add+=rem**count
num//=10
if(copy==add):
print('Armstrong number')
else:
print('Not armstrong number')
|
Kanchana5/armstrong-number
|
Armstrong1.py
|
Armstrong1.py
|
py
| 219 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11948273979
|
#!/usr/bin/python3.8
# -*- coding: utf-8 -*-
#
# SuperDrive
# a live processing capable, clean(-ish) implementation of lane &
# path detection based on comma.ai's SuperCombo neural network model
#
# @NamoDev
#
# ============================================================================ #
# Parse arguments
import os
import warnings
import argparse
apr = argparse.ArgumentParser(description = "Predicts lane line and vehicle path using the SuperCombo neural network!")
apr.add_argument("--input", type=str, dest="inputFile", help="Input capture device or video file", required=True)
apr.add_argument("--disable-gpu", dest="disableGPU", action="store_true", help="Disables the use of GPU for inferencing")
apr.add_argument("--disable-warnings", dest="disableWarnings", action="store_true", help="Disables console warning messages")
apr.add_argument("--show-opencv-window", dest="showOpenCVVisualization", action="store_true", help="Shows OpenCV frame visualization")
args = apr.parse_args()
# Where are we reading from?
CAMERA_DEVICE = str(args.inputFile)
# Do we want to disable GPU?
if args.disableGPU == True:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# Do we want to disable warning messages?
if args.disableWarnings == True:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
warnings.filterwarnings("ignore")
# ============================================================================ #
import cv2
import sys
import time
import pathlib
import numpy as np
import tensorflow as tf
from parser import parser
import savitzkygolay as sg
from undistort.undistort import undistort
from timeit import default_timer as timer
# OpenPilot transformations (needed to get the model to output correct results)
from common.transformations.model import medmodel_intrinsics
from common.transformations.camera import transform_img, eon_intrinsics
# Are we running TF on GPU?
if tf.test.is_gpu_available() == True:
isGPU = True
tfDevice = "GPU"
else:
isGPU = False
tfDevice = "CPU"
# Initialize undistort
undist = undistort(frame_width=560, frame_height=315)
# Initialize OpenCV capture and set basic parameters
cap = cv2.VideoCapture(CAMERA_DEVICE)
cap.set(3, 1280)
cap.set(4, 720)
cap.set(cv2.CAP_PROP_AUTOFOCUS, 0)
# Load Keras model for lane detection
#
# path = [y_pos of path plan along x=range(0,192) |
# std of y_pos of path plan along x=range(0,192) |
# how many meters it can see]
# 12 * 128 * 256 is 2 consecutive imgs in YUV space of size 256 * 512
lanedetector = tf.keras.models.load_model(str(pathlib.Path(__file__).parent.absolute()) + "/supercombo.keras")
# We need a place to keep two separate consecutive image frames
# since that's what SuperCombo uses
fr0 = np.zeros((384, 512), dtype=np.uint8)
fr1 = np.zeros((384, 512), dtype=np.uint8)
# SuperCombo requires a feedback of state after each prediction
# (to improve accuracy?) so we'll allocate space for that
state = np.zeros((1, 512))
# Additional inputs to the steering model
#
# "Those actions are already there, we call it desire.
# It's how the lane changes work" - @Willem from Comma
#
# Note: not implemented in SuperDrive (yet)
desire = np.zeros((1, 8))
# We want to keep track of our FPS rate, so here's
# some variables to do that
fpsActual = 0;
fpsCounter = 0;
fpsTimestamp = 0;
# OpenCV named windows for visualization (if requested)
cv2.namedWindow("SuperDrive", cv2.WINDOW_AUTOSIZE)
cv2.namedWindow("Vision path", cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow("Vision path", 200, 500)
# Main loop here
while True:
# Get frame start time
t_frameStart = timer()
# FPS counter logic
fpsCounter += 1
if int(time.time()) > fpsTimestamp:
fpsActual = fpsCounter
fpsTimestamp = int(time.time())
fpsCounter = 0
# Read frame
(ret, frame) = cap.read()
# Resize incoming frame to smaller size (to save resource in undistortion)
frame = cv2.resize(frame, (560, 315))
# Undistort incoming frame
# This is standard OpenCV undistortion using a calibration matrix.
# In this case, a Logitech C920 is used (default for undistortion helper).
# Just perform chessboard calibration to get the matrices!
frame = undist.frame(frame)
# Crop the edges out and try to get to (512,256), since that's what
# the SuperCombo model uses. Note that this is skewed a bit more
# to the sky, since my camera can "see" the hood and that probably won't
# help us in the task of lane detection, so we crop that out
frame = frame[14:270, 24:536]
# Then we want to convert this to YUV
frameYUV = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV_I420)
# Use Comma's transformation to get our frame into a format that SuperCombo likes
frameYUV = transform_img(frameYUV, from_intr=eon_intrinsics,
to_intr=medmodel_intrinsics, yuv=True,
output_size=(512, 256)).astype(np.float32) \
/ 128.0 - 1.0
# We want to push our image in fr1 to fr0, and replace fr1 with
# the current frame (to feed into the network)
fr0 = fr1
fr1 = frameYUV
# SuperCombo input shape is (12, 128, 256): two consecutive images
# in YUV space. We concatenate fr0 and fr1 together to get to that
networkInput = np.concatenate((fr0, fr1))
# We then want to reshape this into the shape the network requires
networkInput = networkInput.reshape((1, 12, 128, 256))
# Build actual input combination
input = [networkInput, desire, state]
# Then, we can run the prediction!
# TODO: this is somehow very slow(?)
networkOutput = lanedetector.predict(input)
# Parse output and refeed state
parsed = parser(networkOutput)
state = networkOutput[-1]
# Now we have all the points!
# These correspond to points with x = <data in here>, y = range from
# 0 to 192 (output of model)
leftLanePoints = parsed["lll"][0]
rightLanePoints = parsed["rll"][0]
pathPoints = parsed["path"][0]
# We may also want to smooth this out
leftLanePoints = sg.savitzky_golay(leftLanePoints, 51, 3)
rightLanePoints = sg.savitzky_golay(rightLanePoints, 51, 3)
pathPoints = sg.savitzky_golay(pathPoints, 51, 3)
# Compute position on current lane
currentPredictedPos = (-1) * pathPoints[0]
# Compute running time
p_totalFrameTime = round((timer() - t_frameStart) * 1000, 2)
print("Frame processed on " + tfDevice + " \t" + str(p_totalFrameTime) + " ms\t" + str(fpsActual) + " fps")
# Output (enlarged) frame with text overlay
if args.showOpenCVVisualization == True:
canvas = frame.copy()
canvas = cv2.resize(canvas, ((700, 350)))
cv2.putText(canvas, "Vision processing time: " + str(p_totalFrameTime) + " ms (" + str(fpsActual) + " fps)", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)
cv2.putText(canvas, "Device: " + tfDevice, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)
cv2.putText(canvas, "Position: " + str(round(currentPredictedPos, 3)) + " m off centerline", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)
# Create canvas for graph plotting
plotCanvas = np.zeros((500, 200, 3), dtype=np.uint8)
# Plot points!
ppmY = 10
ppmX = 20
# We know we can only display 500 / ppmY = 50 meters ahead
# so limiting our loop will allow for a faster processing time
for i in range(51):
cv2.circle(plotCanvas, (int(100 - abs(leftLanePoints[i] * ppmX)), int(i * ppmY)), 2, (160, 160, 160), -1)
cv2.circle(plotCanvas, (int(100 + abs(rightLanePoints[i] * ppmX)), int(i * ppmY)), 2, (160, 160, 160), -1)
cv2.circle(plotCanvas, (int(100 - (pathPoints[i] * ppmX)), int(i * ppmY)), 4, (10, 255, 10), -1)
# Flip plot path for display
plotCanvas = cv2.flip(plotCanvas, 0)
# Add some texts for distance
cv2.putText(plotCanvas, "0 m", (10, 490), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (200,200,200), 1)
cv2.putText(plotCanvas, "10 m", (10, 400), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (200,200,200), 1)
cv2.putText(plotCanvas, "20 m", (10, 300), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (200,200,200), 1)
cv2.putText(plotCanvas, "30 m", (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (200,200,200), 1)
cv2.putText(plotCanvas, "40 m", (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (200,200,200), 1)
cv2.putText(plotCanvas, "50 m", (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (200,200,200), 1)
cv2.imshow("SuperDrive", canvas)
cv2.imshow("Vision path", plotCanvas)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
|
kaishijeng/SuperDrive
|
drive.py
|
drive.py
|
py
| 8,715 |
python
|
en
|
code
| 3 |
github-code
|
6
|
71361629947
|
from turtle import Turtle
FONT = ("Courier", 24, "normal")
class Scoreboard(Turtle):
def __init__(self):
super(Scoreboard, self).__init__()
self.hideturtle()
self.color('black')
self.penup()
self.level = 0
with open('data.txt') as high_score:
self.high_level = int(high_score.read())
self.goto(-250, 250)
self.update_scoreboard()
def update_scoreboard(self):
self.clear()
self.write(arg=f'Level: {self.level} High score: {self.high_level}' , align='left', font=FONT)
def add_point(self):
self.level += 1
self.update_scoreboard()
def reset(self):
if self.level > self.high_level:
self.high_level = self.level
with open('data.txt', mode='w') as high_score:
high_score.write(str(self.level))
self.level = 0
self.update_scoreboard()
|
Benji918/turtle-crossing-game
|
scoreboard.py
|
scoreboard.py
|
py
| 923 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4583110582
|
from __future__ import division
from copy import deepcopy
import torch
from torch.autograd import Variable
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
import numpy as np
import torch
def average_rule(keys, Temp_state_dict, neighbors):
aggr_state_dict = {}
# aggr_state_dict= torch.sum(Temp_state_dict, 0)
for key in keys:
temp_state_dict = [deepcopy(Temp_state_dict[key][i]) for i in neighbors]
aggr_state_dict[key] = torch.mean(torch.stack(temp_state_dict), 0)
return aggr_state_dict
def median_rule(keys, Temp_state_dict, neighbors):
aggr_state_dict = {}
for key in keys:
temp_state_dict = [Temp_state_dict[key][i] for i in neighbors]
aggr_state_dict[key], _ = torch.median(torch.stack(temp_state_dict), 0)
return aggr_state_dict
def actor_rule(agent_id, policy, Model_actor, Model_critic, Model_critic_2, ram, keys, ActorDict, neighbors, alpha, Accumu_Q_actor, filter, normalize=False, softmax=False):
random_batch_size = 256
# gamma = 1
s1, a1, s2, _, _ = ram.sample(random_batch_size)
# s1 = Variable(torch.from_numpy(np.float32(s1))).to(device)
for neigh in neighbors:
if policy == "TD3":
pred_a1 = Model_actor[neigh](s1)
Q_actor = Model_critic[agent_id].Q1(s1, pred_a1).mean()
# Accumu_loss_actor[agent_id, neigh] = (1 - gamma) * Accumu_loss_actor[agent_id, neigh] + gamma * loss_actor
Accumu_Q_actor[agent_id, neigh] = Q_actor
elif policy == "DDPG":
pred_a1 = Model_actor[neigh](s1)
Q_actor = Model_critic[agent_id].forward(s1, pred_a1).mean()
# Accumu_loss_actor[agent_id, neigh] = (1 - gamma) * Accumu_loss_actor[agent_id, neigh] + gamma * loss_actor
Accumu_Q_actor[agent_id, neigh] = Q_actor
elif policy == "PPO":
pass
elif policy == "SAC":
# Prediction π(a|s), logπ(a|s), π(a'|s'), logπ(a'|s'), Q1(s,a), Q2(s,a)
_, pi, log_pi = Model_actor[neigh](s1)
# Min Double-Q: min(Q1(s,π(a|s)), Q2(s,π(a|s))), min(Q1‾(s',π(a'|s')), Q2‾(s',π(a'|s')))
min_q_pi = torch.min(Model_critic[agent_id](s1, pi), Model_critic_2[agent_id](s1, pi)).squeeze(1)
# SAC losses
para = 0.2
policy_loss = (para * log_pi - min_q_pi).mean()
Accumu_Q_actor[agent_id, neigh] = -policy_loss
else:
raise NameError("Policy name is not defined!")
Q = deepcopy(Accumu_Q_actor[agent_id, :])
min_Q = np.min(Accumu_Q_actor[agent_id, neighbors])
max_Q = np.max(Accumu_Q_actor[agent_id, neighbors])
if normalize:
# Q = np.array([Q[neigh] - min_Q if neigh in neighbors else 0 for neigh in range(len(Q))])
# Q = Q / (max_Q - min_Q)
Q = [Q[neigh] - max_Q if neigh in neighbors else 0 for neigh in range(len(Q))]
Q = [np.exp(Q[neigh]) if neigh in neighbors else 0 for neigh in range(len(Q))]
if softmax:
if not normalize:
Q = [Q[neigh] - max_Q if neigh in neighbors else 0 for neigh in range(len(Q))]
Q = [np.exp(Q[neigh]) if neigh in neighbors else 0 for neigh in range(len(Q))]
if filter:
Q = [Q[neigh] if Q[neigh] >= Q[agent_id] else 0 for neigh in range(len(Q))]
Q[agent_id] *= alpha[agent_id]
sum_Q = sum(Q)
Weight = Q / sum_Q
# in case sum is not 1
Weight[agent_id] = 1 - sum(Weight[:agent_id]) - sum(Weight[agent_id + 1:])
print("agent %d, actor weight, loss" % agent_id, Weight, Accumu_Q_actor[agent_id, :])
aggr_state_dict = {}
for key in keys:
# temp_state_dict = [ActorDict[key][i] * Weight[i] * len(neighbors) for i in neighbors]
# aggr_state_dict[key] = torch.mean(torch.stack(temp_state_dict), 0)
temp_state_dict = [ActorDict[key][i] * Weight[i] for i in neighbors]
aggr_state_dict[key] = torch.sum(torch.stack(temp_state_dict), 0)
# filtering
# aggr_actor = deepcopy(Model_actor[agent_id])
# aggr_actor.load_state_dict(aggr_state_dict)
# pred_a1 = aggr_actor(s1)
# Q_actor = Model_critic[agent_id].Q1(s1, pred_a1).mean()
# if Q_actor > Accumu_Q_actor[agent_id, agent_id]:
# print("agent %d, return aggregate model" % agent_id)
# return aggr_state_dict
# else:
# return Model_actor[agent_id].state_dict()
return aggr_state_dict
def critic_rule(agent_id, policy, Model_actor, Model_critic, Model_critic_2, Model_target_critic, Model_target_critic_2, ram, keys, CriticDict, Critic2Dict, neighbors, alpha, Accumu_loss_critic, filter, softmax=False):
random_batch_size = 256
GAMMA = 0.99
gamma = 1
s1, a1, s2, r1, not_done = ram.sample(random_batch_size)
if policy == "SAC":
r1, not_done = r1.squeeze(1), not_done.squeeze(1)
for neigh in neighbors:
# Use target actor exploitation policy here for loss evaluation
if policy == "TD3":
a2_k = Model_actor[agent_id](s2).detach()
target_Q1, target_Q2 = Model_target_critic[agent_id].forward(s2, a2_k)
target_Q = torch.min(target_Q1, target_Q2)
# y_exp = r + gamma*Q'( s2, pi'(s2))
y_expected = r1 + not_done * GAMMA * target_Q
# y_pred = Q( s1, a1)
y_predicted_1, y_predicted_2 = Model_critic[neigh].forward(s1, a1)
# compute critic loss, and update the critic
loss_critic = F.mse_loss(y_predicted_1, y_expected) + F.mse_loss(y_predicted_2, y_expected)
elif policy == "DDPG":
a2_k = Model_actor[agent_id](s2).detach()
target_Q = Model_target_critic[agent_id].forward(s2, a2_k)
# y_exp = r + gamma*Q'( s2, pi'(s2))
y_expected = r1 + not_done * GAMMA * target_Q
# y_pred = Q( s1, a1)
y_predicted = Model_critic[neigh].forward(s1, a1)
# compute critic loss, and update the critic
loss_critic = F.mse_loss(y_predicted, y_expected)
elif policy == "PPO":
pass
elif policy == "SAC":
para = 0.2
# Prediction π(a|s), logπ(a|s), π(a'|s'), logπ(a'|s'), Q1(s,a), Q2(s,a)
_, next_pi, next_log_pi = Model_actor[agent_id](s2)
q1 = Model_critic[neigh](s1, a1).squeeze(1)
q2 = Model_critic_2[neigh](s1, a1).squeeze(1)
min_q_next_pi = torch.min(Model_target_critic[agent_id](s2, next_pi),
Model_target_critic_2[agent_id](s2, next_pi)).squeeze(1)
v_backup = min_q_next_pi - para * next_log_pi
q_backup = r1 + GAMMA * not_done * v_backup
qf1_loss = F.mse_loss(q1, q_backup.detach())
qf2_loss = F.mse_loss(q2, q_backup.detach())
loss_critic = qf1_loss + qf2_loss
else:
raise NameError("Policy name is not defined!")
Accumu_loss_critic[agent_id, neigh] = (1 - gamma) * Accumu_loss_critic[agent_id, neigh] + gamma * loss_critic
loss = deepcopy(Accumu_loss_critic[agent_id, :])
# if normalize:
# min_Q = np.min(loss)
# max_Q = np.max(loss)
# loss = (loss - min_Q) / (max_Q - min_Q)
reversed_Loss = np.zeros(len(Model_actor))
for neigh in neighbors:
if filter:
if Accumu_loss_critic[agent_id, neigh] <= Accumu_loss_critic[agent_id, agent_id]:
reversed_Loss[neigh] = 1 / loss[neigh]
else:
# if softmax:
# reversed_Loss[neigh] = np.exp(-loss[neigh]) # 1 / np.exp(loss[neigh])
# else:
reversed_Loss[neigh] = 1 / loss[neigh]
reversed_Loss[agent_id] *= alpha[agent_id]
sum_reversedLoss = sum(reversed_Loss)
# Weight = np.zeros(numAgent)
# for neigh in range(0, numAgent):
Weight = reversed_Loss / sum_reversedLoss
# in case sum is not 1
Weight[agent_id] = 1 - sum(Weight[:agent_id]) - sum(Weight[agent_id + 1:])
print("agent %d, critic weight, loss, reversedloss" % agent_id, Weight, loss, reversed_Loss)
# weight = torch.from_numpy(weight)
aggr_state_dict = {}
for key in keys:
# temp_state_dict = [ActorDict[key][i] * Weight[i] * len(neighbors) for i in neighbors]
# aggr_state_dict[key] = torch.mean(torch.stack(temp_state_dict), 0)
temp_state_dict = [CriticDict[key][i] * Weight[i] for i in neighbors]
aggr_state_dict[key] = torch.sum(torch.stack(temp_state_dict), 0)
if policy == "SAC":
aggr_state_dict_2 = {}
for key in keys:
# temp_state_dict = [ActorDict[key][i] * Weight[i] * len(neighbors) for i in neighbors]
# aggr_state_dict[key] = torch.mean(torch.stack(temp_state_dict), 0)
temp_state_dict_2 = [Critic2Dict[key][i] * Weight[i] for i in neighbors]
aggr_state_dict_2[key] = torch.sum(torch.stack(temp_state_dict_2), 0)
return aggr_state_dict, aggr_state_dict_2
return aggr_state_dict
|
cbhowmic/resilient-adaptive-RL
|
aggregateMethods.py
|
aggregateMethods.py
|
py
| 9,022 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28160427846
|
import asyncio
from time import time
from httpx import RequestError
from loguru import logger
from src.client import IteriosApiClient
from src.exceptions import FailedResponseError
from src.helpers import (
get_random_country, get_random_dep_city, get_search_start_payload, get_timing_results,
setup_logger,
)
from src.settings import settings
async def start_search(index: int):
logger.info(f'Start search #{index}')
start_time = time()
try:
async with IteriosApiClient() as client:
country = get_random_country()
dep_city = get_random_dep_city()
main_reference = await client.get_main_reference(
country_iso=country['iso_code'], dep_city_id=dep_city['id'],
)
payload = get_search_start_payload(
country_id=country['id'], dep_city_id=dep_city['id'], main_reference=main_reference,
)
await client.start_search(payload)
except (FailedResponseError, RequestError) as error:
logger.error(f'Fail search #{index} ({repr(error)})')
return index, None
elapsed_time = round(time() - start_time, 2)
logger.info(f'Finish search #{index} in {elapsed_time}s')
return index, elapsed_time
async def main():
logger.info(f'Test with {settings.request_count} requests')
requests = [
start_search(index)
for index in range(1, settings.request_count + 1)
]
timings = await asyncio.gather(*requests)
last_time = None
for timing in timings:
index, elapsed_time = timing
if not elapsed_time:
logger.info(f'#{index} - fail')
continue
if last_time:
difference = round(elapsed_time - last_time, 2)
logger.info(f'#{index} - {elapsed_time}s ({difference:+}s)')
else:
logger.info(f'#{index} - {elapsed_time}s')
last_time = elapsed_time
elapsed_times = [timing[1] for timing in timings]
results = get_timing_results(elapsed_times)
logger.info(f"Results: min({results['min']}s), max({results['max']}s), average({results['average']}s), fails({results['failed']}/{results['total']})") # noqa: E501
if __name__ == '__main__':
setup_logger()
asyncio.run(main())
|
qwanysh/iterios-stress
|
start_search.py
|
start_search.py
|
py
| 2,281 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27537219474
|
import objreader
def hexDig2hexStr(hexDig, length):
hexDig = hexDig.upper()
hexStr = hexDig[2:] # 0xFFFFF6 => FFFFF6
for i in range(0, length - len(hexStr)): # 位數不足補零
hexStr = '0' + hexStr
return hexStr
# Hex String => Dec Int Digit
def hexStr2decDig(hexStr, bits):
decDig = int(hexStr, 16) # 0xFFFFF6 => 16777206
if decDig & (1 << (bits-1)): # 2^0 << (bits-1) = 0x800000 => 8388608
decDig -= 1 << (bits) # Threshold Of Negative Number:Negative decDig > 7FFFFF >= Positive decDig
# 2^0 << (bits) = 0x1000000 => 16777216
# if decDig >= int(pow(2, bits-1)):
# decDig -= int(pow(2, bits))
return decDig
# Dec Int Digit => Hex Int Digit
def decDig2hexDig(decDig, bits):
return hex((decDig + (1 << bits)) % (1 << bits))
# e.g. hex[(-10 + 256) % 256] = 0xF6
# e.g. hex[( 10 + 256) % 256] = 0x0A
# Text Record
# Col. 2-7: Starting address for object code in this record
# Col. 8-9: Length of object code in this record in bytes
# e.g. 0A: 10 bytes (20 half-bytes)
# Col.10-69: Object code
def processTRecord(Tline, CSADDR, PROGADDR, MemoryContent):
TADDR = int(f'0x{Tline[1:7]}', 16) # 將 Address 從 string 更改成 hex digit
TADDR += CSADDR
TADDR -= PROGADDR
TADDR *= 2 # 將 1byte (Binary) 用 2個 數字(HEX)表示, 故需要將 Address 兩倍
# e.g. 1011 0110 => B6
length = int(f'0x{Tline[7:9]}', 16) # 將 Length 從 string 更改成 hex digit
for i in range(0, length * 2): # bytes = half-bytes * 2
MemoryContent[TADDR] = Tline[9 + i] # 將 Object code 照著 TADDR 的順序, 依序填入 MemoryContent 中
TADDR += 1
# Modification Record
# Col. 2-7: Starting location of the address field to be modified, relative to the beginning of the program
# Col. 8-9: Length of the address field to be modified (half-bytes)
# Col. 10: Modification flag (+ or -)
# Col. 11-16: External symbol whose value is to be added to or subtracted from the indicated field
def processMRecord(Mline, CSADDR, PROGADDR, MemoryContent, ESTAB):
MADDR = int(f'0x{Mline[1:7]}', 16) # 將 Address 從 string 更改成 hex digit
MADDR += CSADDR
MADDR -= PROGADDR
MADDR *= 2 # 將 1byte (Binary) 用 2個 數字(HEX)表示, 故需要將 Address 兩倍
# e.g. 1011 0110 => B6
length = int(f'0x{Mline[7:9]}', 16) # 將 Length 從 string 更改成 hex digit
if (length == 5): # "05"代表除了需要跳過 First Byte(OPCODE + n,i)
MADDR += 1 # 還需要跳過 Second Half-Byte(x,b,p,e)
# e.g."77100004" 跳過 "77" 與 "1", address field 才是 "00004"
# FFFFF6 = ['F', 'F', 'F', 'F', 'F', '6']
current = "".join(MemoryContent)[MADDR:MADDR + length]
# -10 = hexStr2decDig(0xFFFFF6, 24)
decDig = hexStr2decDig(f'0x{current}', length * 4)
# Mline 以 '\n' 結尾,故 token 的擷取位置是從 10 到 len(Mline)-1
key = Mline[10:len(Mline)-1]
if Mline[9] == '+':
decDig += ESTAB[key]
else:
decDig -= ESTAB[key]
modifiedHexStr = hexDig2hexStr(decDig2hexDig(decDig, length * 4), length)
for i in range(0, length): # 將更改後的 modifiedHexStr 照著 MADDR 的順序, 依序填入 MemoryContent 中
MemoryContent[MADDR] = modifiedHexStr[i]
MADDR += 1
def execute(ESTAB, PROGADDR, PROG, MemoryContent):
# Control Section Address
CSADDR = PROGADDR
for i in range(0, len(PROG)):
lines = objreader.readOBJFiles(PROG[i])
# Header Record
# Col. 2-7: Program name
# Col. 8-13: Starting address (hexadecimal)
# Col. 14-19 Length of object program in bytes
Hline = objreader.readRecordWithoutSpace(lines[0]) # Replace All Space for Header Line
# CSNAME = Hline[1:6]
CSLTH = int(f'{Hline[12:18]}', 16) # 將 Address 從 string 更改成 hex digit
for j in range(1, len(lines)):
# Text Record
if lines[j][0] == 'T':
processTRecord(lines[j], CSADDR, PROGADDR, MemoryContent)
# Modification Record
if lines[j][0] == 'M':
processMRecord(lines[j], CSADDR, PROGADDR, MemoryContent, ESTAB)
CSADDR += CSLTH
|
Yellow-Shadow/SICXE
|
LinkingLoader2021/LinkingLoader/pass2.py
|
pass2.py
|
py
| 4,795 |
python
|
en
|
code
| 0 |
github-code
|
6
|
53879462
|
from zohocrmsdk.src.com.zoho.api.authenticator import OAuthToken
from zohocrmsdk.src.com.zoho.crm.api import Initializer
from zohocrmsdk.src.com.zoho.crm.api.business_hours import BusinessHoursOperations, BodyWrapper, BusinessHours, \
BreakHoursCustomTiming, ActionWrapper, BusinessHoursCreated, APIException
from zohocrmsdk.src.com.zoho.crm.api.dc import USDataCenter
from zohocrmsdk.src.com.zoho.crm.api.util import Choice
class UpdateBusinessHours(object):
@staticmethod
def initialize():
environment = USDataCenter.PRODUCTION()
token = OAuthToken(client_id="clientID", client_secret="clientSecret", grant_token="grantToken")
Initializer.initialize(environment, token)
@staticmethod
def update_business_hours():
business_hours_operations = BusinessHoursOperations()
request = BodyWrapper()
business_hours = BusinessHours()
business_days = [Choice("Monday")]
business_hours.set_business_days(business_days)
business_hours.set_week_starts_on(Choice("Monday"))
business_hours.set_same_as_everyday(False)
business_hours.set_id(440248001017425)
bhct = BreakHoursCustomTiming()
bhct.set_days(Choice("Monday"))
business_timings = ["09:00", "17:00"]
bhct.set_business_timing(business_timings)
#
bhct1 = BreakHoursCustomTiming()
bhct1.set_days(Choice("Tuesday"))
business_timing1 = ["10:30", "17:00"]
bhct1.set_business_timing(business_timing1)
#
bhct2 = BreakHoursCustomTiming()
bhct2.set_days(Choice("Wednesday"))
business_timing2 = ["10:30", "17:00"]
bhct2.set_business_timing(business_timing2)
#
custom_timing = [bhct, bhct1, bhct2]
business_hours.set_custom_timing(custom_timing)
# when same_as_everyday is true
daily_timing = [Choice("10:00"), Choice("11:00")]
business_hours.set_daily_timing(daily_timing)
#
business_hours.set_type(Choice("custom"))
request.set_business_hours(business_hours)
response = business_hours_operations.update_business_hours(request)
if response is not None:
print('Status Code: ' + str(response.get_status_code()))
response_object = response.get_object()
if response_object is not None:
if isinstance(response_object, ActionWrapper):
action_response = response_object.get_business_hours()
if isinstance(action_response, BusinessHoursCreated):
print("Status: " + action_response.get_status().get_value())
print("Code: " + action_response.get_code().get_value())
print("Details")
details = action_response.get_details()
for key, value in details.items():
print(key + ' : ' + str(value))
print("Message: " + action_response.get_message().get_value())
elif isinstance(action_response, APIException):
print("Status: " + action_response.get_status().get_value())
print("Code: " + action_response.get_code().get_value())
print("Details")
details = action_response.get_details()
for key, value in details.items():
print(key + ' : ' + str(value))
print("Message: " + action_response.get_message().get_value())
elif isinstance(response_object, APIException):
print("Status: " + response_object.get_status().get_value())
print("Code: " + response_object.get_code().get_value())
print("Details")
details = response_object.get_details()
for key, value in details.items():
print(key + ' : ' + str(value))
print("Message: " + response_object.get_message().get_value())
UpdateBusinessHours.initialize()
UpdateBusinessHours.update_business_hours()
|
zoho/zohocrm-python-sdk-5.0
|
versions/1.0.0/samples/business_hours/UpdateBusinessHours.py
|
UpdateBusinessHours.py
|
py
| 4,189 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25538067967
|
import streamlit as st
import pandas as pd
import plotly.express as px
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
hide_st_style = """
<style>
footer {visibility: hidden;}
#MainMenu {visibility: hidden;}
header {visibility: hidden;}
#stException {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
import preprocessor, helper
#df2 = pd.read_csv("athlete_events.csv")
df = pd.read_csv('athlete_events.csv')
region_df = pd.read_csv('noc_regions.csv')
process_data = preprocessor.preprocess(df, region_df)
st.sidebar.image("https://i.ibb.co/mDH38WV/olympics-logo.png")
st.sidebar.title("Olympics Analysis")
user_menu = st.sidebar.radio(
'Select an option ',
('Overall Analysis','Medal Tally','country-wise-analysis','athlete-wise-analysis' )
)
st.sidebar.write(' ##### Developed by Somnath Paul')
# default home page display
# if user_menu radio button is
if user_menu == 'Medal Tally':
# year & country
year, country = helper.country_year_list(df,region_df)
# check box for year selection
selected_year = st.sidebar.selectbox("select year", year)
selected_country = st.sidebar.selectbox("select country", country)
# fetch dataframe for selected options
medal_df, title = helper.fetch_medal_tally(selected_year, selected_country, df, region_df,)
# display dataframe
st.title(title)
st.dataframe(medal_df)
elif user_menu == 'Overall Analysis':
cities, len_cities, country, len_countries, events, len_of_events, sports, len_of_sports, year, len_of_year, athletes, len_of_athletes = helper.overall_analysis(df, region_df)
st.title("STATISTICS :")
# first column
col1, col2= st.columns(2)
with col1:
st.write(""" ### Hosted Counties""")
st.title(len_cities)
with col2:
st.write(""" ### Counties Participated """)
st.title(len_countries)
# second columns
col1, col2, col3, col4 = st.columns(4)
with col1:
st.write("""### Sports""")
st.title(len_of_sports)
with col2:
st.write(""" ### Events""")
st.title(len_of_events)
with col3:
st.write(""" ### Editions""")
st.title(len_of_year)
with col4:
st.write(""" ### Athletes""")
st.title(len_of_athletes)
# graph 1
# number of countries participated
df_10 = helper.graph_1(df, region_df)
fig = px.line(df_10, x="Year", y="Count")
st.title("Countries participated in each year")
st.plotly_chart(fig)
# graph 2
# number of sports played in each year
df_11 = helper.graph_2(df, region_df)
fig = px.line(df_11, x="Year", y="Count")
st.title("Sports played in each year")
st.plotly_chart(fig)
# graph 3
# number of events played in each year
# events has many under one sport
df_12 = helper.graph_3(df, region_df)
fig = px.line(df_12, x="Year", y="Count")
st.title("Events played in each year")
st.plotly_chart(fig)
# graph 4 : heatmap
x_1 = helper.graph_4(df, region_df)
fig = px.imshow(x_1)
st.title("Over the year how many events played / sports")
st.plotly_chart(fig)
# table 2:
top_players = helper.table_2(df, region_df)
st.title("Top 10 player won medals")
st.dataframe(top_players.head(10))
elif user_menu == 'country-wise-analysis':
countries = helper.countries(df, region_df)
countries.insert(0, 'Not Selected')
options = st.selectbox("Select country",countries)
if options == 'Not Selected':
st.error('Please select country')
else:
df_13= helper.country_wise_analysis(df, region_df, options)
# line chart
fig = px.line(df_13, x='Year', y='Medal')
st.subheader(f'Number of medals won by {options} over the year')
st.plotly_chart(fig)
df_20 = helper.countries_good_at(df, region_df, options)
st.subheader(f'Medals won by {options} under different sports')
st.dataframe(df_20)
df_30 = helper.player_good_at_by_countries(df, region_df, options)
st.subheader(f'Medals won by players for {options}')
st.dataframe(df_30)
else:
# athletics wise analysis
x1, x2, x3, x4 = helper.pdf_histogram(process_data)
# histogram (PDF) of age in plotly
import plotly.figure_factory as ff
gl=['Gold player age', 'Silver player age', 'Bronze player age', 'Overall player age']
fig = ff.create_distplot([x1, x2, x3, x4], show_hist=False, show_rug=False, group_labels=gl)
st.title("Athlete Wise Analysis")
st.write(""" #### Age - Medals wise analysis :""")
st.plotly_chart(fig)
st.write(""" #### Player who won gold [ weight - height ]:""")
height_gold, weight_gold, height_silver,weight_silver, height_bronze,weight_bronze = helper.Player_who_won_gold(process_data)
plt.scatter(height_gold,weight_gold,color='gold')
plt.scatter(height_silver,weight_silver ,color='lightsteelblue')
plt.scatter(height_bronze,weight_bronze ,color='lavender')
plt.legend(["Gold" , "Silver", "Bronze"], bbox_to_anchor = (1 , 1))
st.pyplot(plt)
# Men vs Women participation over the years plot
df_73, df_74 = helper.Men_Women_participation(process_data)
st.write("### Men vs Women participation over the years")
plt.figure(figsize=(8,5))
plt.plot( df_73['Year'], df_73['Sex'], color='olive')
plt.plot( df_74['Year'], df_74['Sex'])
plt.legend(["Male" , "Female"], bbox_to_anchor = (1 , 1))
st.pyplot(plt)
# athletics age sport wise analysis
sports = process_data['Sport'].unique().tolist()
sports.insert(0, 'Not Selected')
sport = st.selectbox("Select a sport",sports)
if sport == 'Not Selected':
st.error('Please select sport')
else:
y1 = helper.age_histogram_sports(process_data, sport)
# labels
gl=[sport]
st.write(""" #### Age - sport wise analysis :""")
fig = ff.create_distplot([y1], show_hist=False, show_rug=False, group_labels=gl)
st.plotly_chart(fig)
|
Somnathpaul/Olympic-data-analysis
|
main.py
|
main.py
|
py
| 6,253 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31238312514
|
# 조건을 활용한 리스트 내포
# 리스트를 선언
array = ["사과", "자두", "초콜릿", "바나나", "체리"]
output = [fruit for fruit in array if fruit != "초콜릿"]
"""
array의 요소를 fruit이라고 할 때 초콜릿이 아닌 fruit으로 리스트를 재조합
실행함년 초콜릿을 제외한 요소만 모인 리스트를 만든다
if구문을 포함한 리스트 내포는 다음과 같은 형태로 사용
리스트 이름 = [표현식 for 반복자 in 반복할 수 있는 것 if 조건문]
"""
# 출력
print(output)
|
DreamisSleep/pySelf
|
chap4/array_comprehensions.py
|
array_comprehensions.py
|
py
| 555 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
37213848810
|
from collections import Counter, defaultdict
import pandas as pd
import os
import csv
import json
# get phoneme features from PHOIBLE
# note the path is resolved-phoible.csv that is corrected for mismatches between phonemes in PHOIBLE and the XPF Corpus
phoneme_features = pd.read_csv("Data/resolved-phoible.csv")
phoneme_features.drop(["InventoryID", "Glottocode","ISO6393","LanguageName","SpecificDialect","GlyphID","Allophones","Marginal","Source"], axis="columns", inplace=True)
phoneme_features = phoneme_features.rename(columns={'periodicGlottalSource':'voice'})
# list of all feature names in PHOIBLE table
features = phoneme_features.copy()
features.drop(["Phoneme","voice"],axis="columns", inplace=True)
features = features.columns.values.tolist()
# global variables
to_feat = {} #dictonary of phoneme: feature representation
phon_model = {} #dictionary of feature representation: {possible phonemes: # of occurrences}
def change_to_feat(phoneme, previous):
'''
Takes in a character string representing the IPA form of the phoneme and returns a feature representation of the phoneme based on PHOIBLE features
Input: phoneme - character string representing current phoneme
next - character string representing phoneme that follows
Output: feature representation of the phoneme - character string ('feature1/[+,-,NA]|feature2/[+,-,NA]|etc...')
each feature name/value pair is joined with '/' while separate feat/value pairs are joined with '|'
can split the string representation using these characters
'''
global to_feat
global phon_model
# create and add feature representation to to_feat dictionary if not already in it
if to_feat.get(phoneme) is None:
row = phoneme_features[phoneme_features["Phoneme"] == phoneme]
feat = []
#creates feature representations for only obstruents
if not row.empty:
if row["sonorant"].values.tolist()[0] == '-':
for f in features:
t = row[f].values.tolist()[0]
feat.append(t+'/'+f)
feat = '|'.join(feat)
to_feat[phoneme] = feat
else:
to_feat[phoneme] = phoneme
else:
to_feat[phoneme] = phoneme
#get feature
feat = to_feat.get(phoneme)
if previous != '':
#context
con = " ".join([previous, feat])
#add feature to phoneme model if it doesn't already exist
if phon_model.get(con) is None:
phon_model[con] = defaultdict(int)
# increment occurrence in phoneme model
phon_model[con][phoneme] += 1
return feat
def nphone_model(wordseglist, n=4, wordlen=8):
'''
Create n-gram models for the given word list of phonemes.
Params:
- wordseglist: a list of words, where each word is a list of a string of the IPA representation
such as [["b a"], ["d o"]]
- n: Number of preceding segments in context
- wordlen: Maximum length of words to use, including the word-initial and word-final tokens
Returns:
- consonant_vowel: A dictionary representing the CV n-gram model. Each key is a string representing
the context (perfect representation of n segments). Each value is another dictionary,
where the keys are whether the next segment is consonant, vowel, or word-final token,
and the values are the counts.
- consonant: A dictionary representing the consonant n-gram model. Each key is a string representing
the context (imperfect representation of n segments). Each value is another dictionary,
where the keys are the next consonant, and the values are the counts.
- vowel: A dictionary representing the vowel n-gram model. Each key is a string representing
the context (perfect representation of n segments). Each value is another dictionary,
where the keys are the next vowel, and the values are the counts.
'''
model = {}
prev_context = []
for word in wordseglist: # each word is a list of exactly one string, the word
prev_context = ['[_w'] # start of word
prev_phon = {}
# don't use words that aren't perfectly translated to IPA
if '@' in word.split(" "):
continue
# don't use words that aren't the same length as generated words
# n - 1 because [_w is included in generated words
# wordlen - 2 because both [_w and ]_w are included in generated words
if len(word.split(" ")) < (n - 1) or len(word.split(" ")) > (wordlen - 2):
continue
word = word.replace(" ː", "ː")
prev_p = ''
str_context = ''
for phoneme in word.split(" "):
if len(prev_context) == n:
prev_context.insert(0,prev_p)
f = []
for i in range(len(prev_context)-1):
f.append(change_to_feat(prev_context[i+1],prev_context[i]))
#con.extend(prev_context)
# if prev_context[0] == "[_w":
# f = ['[_w']
# for i in range(len(prev_context)-1):
# f.append(change_to_feat(prev_context[i+1],prev_context[i]))
# else:
# con = [prev_phon[" ".join(prev_context)]]
# con.extend(prev_context)
# f = []
# for i in range(len(prev_context)-1):
# f.append(change_to_feat(prev_context[i+1],prev_context[i]))
str_context = " ".join(f)
if model.get(str_context) is None:
model[str_context] = defaultdict(int)
model[str_context][phoneme] += 1
prev_context.pop(0)
prev_p = prev_context[0]
prev_context.pop(0) # remove earliest segment from context
# update context
prev_context.append(phoneme)
if len(prev_context) == n:
prev_phon[" ".join(prev_context)] = prev_p
# add word-final context once you've reached the end of the word
# remove voicing information at end of the word
if len(prev_context) >= n:
f = []
for i in range(len(prev_context)):
if i==0:
f.append(change_to_feat(prev_context[i],prev_phon[" ".join(prev_context)]))
else:
f.append(change_to_feat(prev_context[i],prev_context[i-1]))
str_context = " ".join(f)
if model.get(str_context) is None:
model[str_context] = defaultdict(int)
model[str_context][']_w'] += 1
return model
def main():
'''
NOTE: this file handles reading in data differently
#TODO: write down what code creates the word list used for this
'''
global to_feat
global phon_model
word_lists = []
lang_codes = []
identity ='5000_3' ##TODO: change this depending on inputs to translate04.py
f_name = "Data/word_list"+identity+".tsv"
# READ IN THE WORD LIST
tsv_file = open(f_name)
read_tsv = csv.reader(tsv_file, delimiter="\t")
for line in read_tsv:
line[1]=line[1].strip('\n')
word_lists.append(line)
# SPLIT LIST PER LANGUAGE
word_lists = word_lists[1:]
split_list = {}
l = []
for i in range(len(word_lists)):
lang_code = word_lists[i][0]
if split_list.get(lang_code) is None:
split_list[lang_code] = [word_lists[i][1]]
else:
split_list[lang_code].append(word_lists[i][1])
# GO THROUGH EACH LANGUAGE (can adjust the word length here)
for lang in split_list:
print(lang)
lang_codes.append(lang)
curr_list = split_list[lang]
model = nphone_model(curr_list,wordlen=10)
outfile = "./Data/utf8_ngram_models/"
if not os.path.exists(outfile):
os.mkdir(outfile)
for key, value in model.items():
k = key.split(" ")
if len(k) != 4:
print('oh no :(')
# save output model
with open(outfile + lang + "_model.json", 'w+', encoding='utf8') as fout:
json.dump(model, fout, ensure_ascii=False)
# CHANGE phon_model from # occurrence to probability
for feat in phon_model:
total = sum(phon_model.get(feat).values(),0.0)
phon_model[feat] = {k: v / total for k,v in phon_model.get(feat).items()}
# save phon_model
with open(outfile + lang + "_phon_model.json", 'w+', encoding='utf8') as fout:
json.dump(phon_model, fout, ensure_ascii=False)
# save feature conversion dict
with open(outfile + lang + "_to_feat.json", 'w+', encoding='utf8') as fout:
json.dump(to_feat, fout, ensure_ascii=False)
# reset to_feat and phon_model after each language
to_feat = {}
phon_model = {}
# save a list of all language codes used in this analysis
o_name = "Data/lang_codes" + identity + ".tsv"
with open(o_name, 'w+', newline='') as f:
write = csv.writer(f, delimiter="\t")
write.writerows(lang_codes)
return None
if __name__ == "__main__":
main()
|
daniela-wiepert/XPF-soft-constraints
|
FD/Code/ngram_model_fd.py
|
ngram_model_fd.py
|
py
| 9,512 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5355823795
|
R=[-1,1,0,0]
C=[0,0,-1,1]
from heapq import heappush,heappop
def dijkstra():
x=[(0,0)]
dis[0][0]=mat[0][0]
while(True):
boo=False
h=[]
for i in x:
a=i[0];b=i[1]
for j in range(4):
r=a+R[j]
c=b+C[j]
if(0<=r<n and 0<=c<n):
heappush(h,(dis[a][b]+mat[r][c],(r,c),i))
while(h):
v=heappop(h)
dis_2=dis[v[2][0]][v[2][1]]+mat[v[1][0]][v[1][1]]
if(dis[v[1][0]][v[1][1]]>dis_2):
boo=True
dis[v[1][0]][v[1][1]]=dis_2
x.append(v[1])
if(not boo):
break
return dis[-1][-1]
for _ in range(int(input())):
n=int(input())
temp=[int(i) for i in input().split()]
mat=[];dis=[]
for i in range(0,n*n,n):
mat.append(temp[i:i+n])
dis.append([1e9]*n)
visited=[[False]*n for i in range(n)]
ans=dijkstra()
print(ans)
|
avikram553/Basics-of-Python
|
Graph Algo/Dijkstra_on_matrix.py
|
Dijkstra_on_matrix.py
|
py
| 1,018 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.