seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
38431802866
|
import os
import random
train_precent=0.8
#base_root = r"C:\Users\29533\Desktop\szs_xc_0406-0408\rain_day_aug"
base_root = os.path.dirname(os.path.abspath(__file__))
print(base_root)
xml= base_root + "/Annotations/"#Annotations文件夹的路径
total_xml=os.listdir(xml)
num=len(total_xml)
tr=int(num*train_precent)
train=range(0,tr)
train_txt_path = base_root + "/train.txt"
val_txt_path = base_root + "/val.txt"
ftrain=open(train_txt_path,"w")#写你的要存储的train.txt的路径
ftest=open(val_txt_path,"w")#写你的val.txt的路径格式同上
for i in range(num):
name=total_xml[i][:-4]+"\n"
if i in train:
ftrain.write(name)
else:
ftest.write(name)
ftrain.close()
ftest.close()
|
HelloSZS/Common-tools_FOR_Object-detection
|
1VOC划分训练集测试集.py
|
1VOC划分训练集测试集.py
|
py
| 728 |
python
|
en
|
code
| 1 |
github-code
|
6
|
11004600528
|
from typing import List
class Solution:
def largestSumOfAverages(self, A: List[int], K: int) -> float:
n = len(A)
p = [0.0] * (n + 1)
for i in range(n):
p[i+1] = p[i]+A[i]
dp = [0.0] * n
for i in range(n):
dp[i] = (p[n] - p[i])/(n-i)
for k in range(K-1):
for i in range(n):
for j in range(i+1,n):
dp[i] = max(dp[i], dp[j] + (p[j] - p[i])/(j-i))
return dp[0]
print(Solution().largestSumOfAverages(
[9,1,2,3,9], 3
))
|
xixihaha1995/CS61B_SP19_SP20
|
temp/toy/python/813. Largest Sum of Averages.py
|
813. Largest Sum of Averages.py
|
py
| 555 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9910655539
|
from flask import Flask, render_template
import requests, json
NYTimes_API_KEY = 'ca470e1e91b15a82cc0d4350b08a3c0b:14:70189328'
app = Flask(__name__, static_folder='static', static_url_path='/static')
NYTimes_Search_URL = 'http://api.nytimes.com/svc/search/v2/articlesearch.json?q={0}+&api-key=' + NYTimes_API_KEY
def searchArticle(topic):
r = requests.get(NYTimes_Search_URL.format(topic))
data = json.loads(r.text)
return data['response']['docs']
@app.route("/")
def urlRoute():
return render_template('index.html', article=searchArticle('Artificial Intelligence'))
if __name__ == "__main__":
app.run()
|
NYUHackDays/NYTimes-Python-Done
|
nytimes.py
|
nytimes.py
|
py
| 614 |
python
|
en
|
code
| 0 |
github-code
|
6
|
354470885
|
from math import log
import numpy as np
from util.PreprocessUtil import PreprocessUtil
from algos.BaseMM import BaseMM
class HMM(BaseMM):
prior_prob = None # dict, [n_state,]
transition_prob = None # dict of dict, [n_state, n_state]
emission_prob = None # dict of dict, [n_state, n_obs]
default_emission_prob = None
default_prior_prob = None
default_transition_prob = None
states = None
obs_vocab = None
def train(self, file_train):
"""
:param file_train: path file
"""
self.prior_prob = {}
self.transition_prob = {}
self.emission_prob = {}
self.obs_vocab = set([])
self.states = set([])
iter = PreprocessUtil.file_iter(file_train)
sent = iter.__next__()
while sent:
prev_state = None
for i, tokconll in enumerate(sent):
obs, _, state = tokconll.strip().split("\t")
self.obs_vocab.add(obs)
self.states.add(state)
# update prior_prob2
# if i == 0:
# if state not in self.prior_prob:
# self.prior_prob[state] = 0
# self.prior_prob[state] += 1
# update prior_prob
if state not in self.prior_prob:
self.prior_prob[state] = 0
self.prior_prob[state] += 1
# update transition_prob
if prev_state:
if prev_state not in self.transition_prob:
self.transition_prob[prev_state] = {}
if state not in self.transition_prob[prev_state]:
self.transition_prob[prev_state][state] = 0
self.transition_prob[prev_state][state] += 1
# update emission_prob
if state not in self.emission_prob:
self.emission_prob[state] = {}
if obs not in self.emission_prob[state]:
self.emission_prob[state][obs] = 0
self.emission_prob[state][obs] += 1
prev_state = state
sent = iter.__next__()
# finalize prior_prob
sum_ = sum(self.prior_prob.values())
for s in self.prior_prob:
self.prior_prob[s] /= sum_
self.default_prior_prob = 1/len(self.prior_prob)
# finalize transition_prob
for s1, d in self.transition_prob.items():
sum_s1 = sum(d.values())
for s2 in d:
d[s2] /= sum_s1
self.default_transition_prob = 1/len(self.prior_prob)
# finalize emission_prob
for s, d in self.emission_prob.items():
sum_s = sum(d.values())
for o in d:
d[o] /= sum_s
self.default_emission_prob = 1/len(self.obs_vocab)
def viterbi(self, token_list):
"""
:param token_list:
:return:
"""
# step 0
prev_step = {}
for state, prior in self.prior_prob.items():
emission_prob = self.default_emission_prob
if state in self.emission_prob and token_list[0] in self.emission_prob[state]:
emission_prob = self.emission_prob[state][token_list[0]]
prev_step[state] = log(prior) + log(emission_prob)
# iteration
for tok in token_list[1:]:
current_step = {}
for current_state in self.states:
paths2current_state = {}
for prev_path in prev_step:
prev_state = prev_path[-1] if isinstance(prev_path, tuple) else prev_path
newpath= tuple(list(prev_path) + [current_state]) if isinstance(prev_path, tuple) else (prev_path, current_state)
paths2current_state[newpath] = prev_step[prev_path] \
+ log(PreprocessUtil.get_prob_from_2ddict(self.transition_prob, prev_state, current_state, self.default_transition_prob)) \
+ log(PreprocessUtil.get_prob_from_2ddict(self.emission_prob, current_state, tok, self.default_emission_prob))
maxpath, maxval = PreprocessUtil.get_max_path_val(paths2current_state)
current_step[maxpath] = maxval
prev_step = current_step
max_final_path, max_final_val = PreprocessUtil.get_max_path_val(prev_step)
return list(max_final_path)
def generate_seq(self, seq_length, initial_state=None):
"""
:param seq_length:
:param initial_state:
:return:
"""
assert self.emission_prob is not None
assert self.transition_prob is not None
assert self.prior_prob is not None
observations = []
states = []
transitions = []
emissions = []
if initial_state:
prev_state = initial_state
else:
prior_prob_array = PreprocessUtil.prob_dict2array(self.states, self.prior_prob, self.default_prior_prob)
prev_state = np.random.choice(list(self.states), 1, p=prior_prob_array)[0] # 采样初始状态
obs = np.random.choice(list(self.emission_prob[prev_state].keys()), 1,
p=PreprocessUtil.prob_dict2array(list(self.emission_prob[prev_state].keys()),
self.emission_prob[prev_state],
self.default_emission_prob))[0] # 采样得到序列第一个值
states.append(prev_state)
observations.append(obs)
transitions.append(self.prior_prob[prev_state])
emissions.append(self.emission_prob[prev_state][obs])
for i in range(1, seq_length):
# P(Zn+1)=P(Zn+1|Zn)P(Zn)
current_state = np.random.choice(list(self.transition_prob[prev_state].keys()), 1, p=PreprocessUtil.prob_dict2array(list(self.transition_prob[prev_state].keys()),
self.transition_prob[prev_state],
self.default_transition_prob))[0]
current_obs = np.random.choice(list(self.emission_prob[current_state].keys()), 1, p=PreprocessUtil.prob_dict2array(list(self.emission_prob[current_state].keys()),
self.emission_prob[current_state],
self.default_emission_prob))[0]
# P(Xn+1|Zn+1)
observations.append(current_obs)
states.append(current_state)
transitions.append(self.transition_prob[prev_state][current_state])
emissions.append(self.emission_prob[current_state][current_obs])
prev_state = current_state
return observations, states, transitions, emissions
|
hedwigi/statisticalSeqModels
|
algos/HMM.py
|
HMM.py
|
py
| 7,148 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73928041148
|
from pyvi import window
from pyvi.modes import normal
class Editor(object):
_command = None
active_tab = None
def __init__(self, tabs=None, config=None, normal=normal):
self.config = config
self.mode = self.normal = normal
self.count = None
if tabs is None:
tabs = self.tabs = [window.Tab(self)]
else:
tabs = self.tabs = list(tabs)
if tabs:
self.active_tab = tabs[0]
@property
def active_window(self):
return self.active_tab.active_window
def keypress(self, keys):
return self.mode.keypress(self, keys)
|
Julian/PyVi
|
pyvi/editor.py
|
editor.py
|
py
| 635 |
python
|
en
|
code
| 11 |
github-code
|
6
|
730586622
|
from selenium import webdriver
from selenium.webdriver.common.by import By
chrome_driver_path = r"C:\Users\Tobiloba\development\chromedriver.exe"
driver = webdriver.Chrome(executable_path=chrome_driver_path)
#driver.get('https://www.amazon.com/dp/B0963P9QTM/ref=sbl_dpx_kitchen-electric-cookware_B08GC6PL3D_0')
#price = driver.find_element(By.CLASS_NAME, "a-price")
#print(price.text)
driver.get('https://www.python.org/')
#
# search = driver.find_element(By.NAME, 'q')
# bug_link = driver.find_element(By.XPATH, '//*[@id="site-map"]/div[2]/div/ul/li[3]/a')
# print(bug_link.text, bug_link.get_attribute('a'))
# print(search.tag_name)
#
# driver.find_elements(By.XPATH, '')
event_times = driver.find_elements(By.CSS_SELECTOR, '.event-widget time')
events = driver.find_elements(By.CSS_SELECTOR, '.event-widget a')
event_dict = {}
# for i in range(len(event_times)):
# for time in event_times:
# for event in events:
# event_dict[i] = f'{time.text}, {event.text}'
for n in range(len(event_times)):
event_dict[n] = {
'name': events[n].text,
'time': event_times[n].text
}
print(event_dict)
#driver.close()
driver.quit()
|
adecool/python100days
|
day-48/main.py
|
main.py
|
py
| 1,180 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21705466300
|
from os.path import basename
from glob import glob
from tqdm import tqdm
def main():
"""
フルラベルファイルのp16に歌唱者名を仕込む。
"""
# フルラベルファイルが入ってるフォルダを指定
label_dir = input('label_dir: ').strip('"')
# フルラベル全ファイル取得
l = glob(f'{label_dir}/**/*.lab', recursive=True)
# ラベルファイルのp16部分に歌唱者名を埋め込む
for path_label in tqdm(l):
singer = basename(path_label).split('__')[0]
with open(path_label, 'r') as fl:
s = fl.read()
s = s.replace(']xx/A:', f']{singer}/A:')
with open(path_label, 'w') as fl:
fl.write(s)
if __name__ == '__main__':
main()
|
oatsu-gh/nnsvs_mixed_db
|
recipe/00-svs-world/utils/set_singername_p16.py
|
set_singername_p16.py
|
py
| 763 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
44248037473
|
import cv2
import numpy as np
import glob
import uuid
import caffe
import skimage.io
from util import histogram_equalization
from scipy.ndimage import zoom
from skimage.transform import resize
import random
#from project_face import project_face
import cv2
import numpy as np
from matplotlib import pyplot as plt
import dlib
from project_face import frontalizer
IMAGE_WIDTH = 32
IMAGE_HEIGHT = 32
class mouth_detector():
def __init__(self):
self.PATH_face_model = '../lib/shape_predictor_68_face_landmarks.dat'
self.md_face = dlib.shape_predictor(self.PATH_face_model)
self.fronter = frontalizer('../lib/ref3d.pkl')
self.face_det = dlib.get_frontal_face_detector() #HOG
def mouth_detect_single(self,image,isPath):
if isPath == True:
img = cv2.imread(image, cv2.IMREAD_UNCHANGED)
else:
img = image
img = cv2.resize(img, (300, 300), interpolation = cv2.INTER_CUBIC) #experimental
img = histogram_equalization(img)
facedets = self.face_det(img,1)
if len(facedets) > 0:
facedet_obj= facedets[0]
#cv2.rectangle(img, (facedet_obj.left(),facedet_obj.top()),(facedet_obj.right(),facedet_obj.bottom()),(0,255,0),4,0)
shape = self.md_face(img,facedet_obj)
p2d = np.asarray([(shape.part(n).x, shape.part(n).y,) for n in range(shape.num_parts)], np.float32)
#for n in range(shape.num_parts):
# cv2.circle(img, (shape.part(n).x,shape.part(n).y), 2, (0,0,255), thickness=4, lineType=8, shift=0)
rawfront, symfront = self.fronter.frontalization(img,facedet_obj,p2d)
symfront_bgr = cv2.cvtColor(symfront, cv2.COLOR_RGB2BGR)
face_hog_mouth = symfront_bgr[165:220, 130:190] #get half-bottom part
#face_hog = symfront_bgr[100:200, 110:205] #get face region for display
if(face_hog_mouth is not None):
gray_img = cv2.cvtColor(face_hog_mouth, cv2.COLOR_BGR2GRAY)
crop_img_resized = cv2.resize(gray_img, (IMAGE_WIDTH, IMAGE_HEIGHT), interpolation = cv2.INTER_CUBIC)
#crop_img_resized_full = cv2.resize(symfront_bgr, (IMAGE_WIDTH, IMAGE_HEIGHT), interpolation = cv2.INTER_CUBIC)
#cv2.imwrite("../img/output_test_img/mouthdetectsingle_crop_rezized.jpg",crop_img_resized)
#cv2.imwrite("../img/output_test_img/mouthdetectsingle_face.jpg",img)
#cv2.imwrite("../img/output_test_img/mouthdetectsingle_face_front.jpg",symfront_bgr)
#cv2.imwrite("../img/output_test_img/mouthdetectsingle_face_mouth.jpg",face_hog_mouth)
#cv2.imwrite("../img/output_test_img/mouthdetectsingle_face_front_.jpg",face_hog)
return crop_img_resized,facedet_obj.left(),facedet_obj.top(),facedet_obj.right(),facedet_obj.bottom()
else:
return None,-1,-1,-1,-1
else:
return None,-1,-1,-1,-1
def mouth_detect_bulk(self,input_folder,output_folder):
transformed_data_set = [img for img in glob.glob(input_folder+"/*jpg")]
for in_idx, img_path in enumerate(transformed_data_set):
mouth = self.mouth_detect_single(img_path,True)
if 'showingteeth' in img_path:
guid = uuid.uuid4()
uid_str = guid.urn
str_guid = uid_str[9:]
path = output_folder+"/"+str_guid+"_showingteeth.jpg"
cv2.imwrite(path,mouth)
else:
guid = uuid.uuid4()
uid_str = guid.urn
str_guid = uid_str[9:]
path = output_folder+"/"+str_guid+".jpg"
cv2.imwrite(path,mouth)
def negative_image(self,imagem):
imagem = (255-imagem)
return imagem
def adaptative_threashold(self,input_img_path):
img = cv2.imread(input_img_path,0)
img = cv2.medianBlur(img,3)
ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,11,2)
th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
#cv2.imwrite("../img/output_test_img/hmouthdetectsingle_adaptative.jpg",th3)
return th3
|
juanzdev/TeethClassifierCNN
|
src/mouth_detector_dlib.py
|
mouth_detector_dlib.py
|
py
| 4,369 |
python
|
en
|
code
| 3 |
github-code
|
6
|
36917846701
|
import logging
import os
import sys
from queue import Empty
from threading import Thread
import argparse
import jsonpickle
from polarity_server import globals
from polarity_server.rest import RestApi
class App:
thread_run = True
@classmethod
def run(cls):
parser = argparse.ArgumentParser()
parser.add_argument("--port", "-p", required=False, type=int,
default=5000, help="Port for REST API to listen on")
parser.add_argument("--input", "-i", required=False, type=str,
help="File to preload sessions from")
if sys.argv == 1:
parser.print_help()
sys.exit(os.EX_SOFTWARE)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.input:
if os.path.isfile(args.input):
with open(args.input) as file:
data = file.read()
globals.sessions = jsonpickle.decode(data)
else:
logging.error("Invalid input file specified")
sys.exit(os.EX_SOFTWARE)
RestApi.start_server(args.port)
thread = Thread(target=cls.runner)
thread.start()
command = ""
while command != "quit":
command = input("Command (\"help\" for options): ")
if command == "help":
cls.print_usage()
elif "sessions" in command:
print("")
if not globals.sessions:
print("No active sessions")
else:
if len(command.split()) == 1:
for i, ip_address in enumerate(globals.sessions):
print("{} - {}".format(str(i), ip_address))
else:
session_idx = command.split()[1].strip()
for i, ip_address in enumerate(globals.sessions):
if str(i) == session_idx:
for session in globals.sessions[ip_address]:
print("{} - {}".
format(session.username, ip_address))
print("")
elif "interact" in command:
if len(command.split()) > 2:
session_idx = command.split()[1].strip()
username = command.split()[2].strip()
session = cls.find_session(session_idx, username)
if session:
if not session.shell.is_alive():
session.shell.create_connection()
session.shell.interact()
else:
print("\nNo session found for specified id and/or username\n")
else:
print("\nSession id and username not specified\n")
elif "save" in command:
if len(command.split()) > 1:
filename = command.split()[1].strip()
with open(filename) as file:
file.write(jsonpickle.encode(globals.sessions))
else:
print("\nFilename not specified\n")
elif command != "quit":
print("\nInvalid command\n")
RestApi.stop_server()
cls.thread_run = False
thread.join()
if globals.sessions:
for ip_address in globals.sessions:
for session in globals.sessions[ip_address]:
session.shell.close_connection()
sys.exit(os.EX_OK)
@staticmethod
def print_usage():
print("""
Usage:
help: print this message
quit: exit the program
sessions: print the active session hosts
sessions <id>: print the active session host username's
interact <session id> <username>: interact with host session
save <filename>: save current state to file
""")
@staticmethod
def find_session(idx, username):
for i, ip_address in enumerate(globals.sessions):
if str(i) == idx:
for session in globals.sessions[ip_address]:
if session.username == username:
return session
return None
@classmethod
def runner(cls):
while cls.thread_run:
task = cls.get_task()
if task:
sessions = task.execute()
if sessions:
globals.sessions.update(sessions)
@staticmethod
def get_task():
try:
return globals.task_queue.get(timeout=1.0)
except Empty:
return None
|
willmfftt/polarityserver
|
polarity_server/app/app.py
|
app.py
|
py
| 4,759 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44055444234
|
print("Hello Adafruit!!!")
import sys
import random
import time
from Adafruit_IO import MQTTClient
import cv2
from read_serial import *
from simple_ai import *
AIO_FEED_ID = ["iot-hk222.light", "iot-hk222.pump"]
AIO_USERNAME = "vynguyen08122002"
AIO_KEY = "aio_jTpa00iRWo7ACInoo8sMTJ1I7Pr8"
def connected(client):
print("Ket noi thanh cong...")
for feed in AIO_FEED_ID:
client.subscribe(feed)
def subscribe(client, userdata, mid, granted_qos):
print("Subscribe thanh cong...")
def disconnected(client):
print("Ngat ket noi...")
sys.exit(1)
def message(client, feed_id, payload):
print("Nhan du lieu tu " + feed_id + " :" + payload)
if feed_id == "iot-hk222.light":
if payload == "1":
writeData("!BON#")
elif payload == "0":
writeData("!BOFF#")
elif feed_id == "iot-hk222.pump":
if payload == "1":
writeData("!PON#")
elif payload == "0":
writeData("!POFF#")
client = MQTTClient(AIO_USERNAME, AIO_KEY)
# call back with function pointer
client.on_connect = connected
client.on_disconnect = disconnected
client.on_message = message
client.on_subscribe = subscribe
client.connect()
client.loop_background()
counter = 5
sensor_type = 0
prev_image = ""
while True:
counter = counter - 1
if counter <= 0:
counter = 5
readSerial(client)
# if sensor_type == 0:
# temp = random.randint(0, 50)
# print("Cap nhat nhiet do: ", temp)
# client.publish("iot-hk222.temperature", temp)
# sensor_type = 1
# elif sensor_type == 1:
# humi = random.randint(0, 100)
# print("Cap nhat do am: ", humi)
# client.publish("iot-hk222.humidity", humi)
# sensor_type = 2
# elif sensor_type == 2:
# brightness = random.randint(0, 500)
# print("Cap nhat anh sang: ", brightness)
# client.publish("iot-hk222.brightness", brightness)
# sensor_type = 0
ai_image = image_detection()
if prev_image != ai_image:
print("AI Detection result: ", ai_image)
client.publish("iot-hk222.ai", ai_image)
prev_image = ai_image
time.sleep(1)
|
vynguyenkn0812/HK222_IoT
|
Gateway/main.py
|
main.py
|
py
| 2,285 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43691629803
|
#!/usr/bin/python3
""" Method that determines if all the boxes can be opened. """
def canUnlockAll(boxes):
if not boxes:
return False
boxLen = len(boxes)
boxOpen = [0]
for k in boxOpen:
for box in boxes[k]:
if box not in boxOpen and box < boxLen:
boxOpen.append(box)
if len(boxOpen) == boxLen:
return True
return False
|
vagava/holbertonschool-interview
|
0x00-lockboxes/0-lockboxes.py
|
0-lockboxes.py
|
py
| 400 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42367773251
|
# -*- coding: utf-8 -*-
from tornado.web import RequestHandler
from ..Apps import Apps
from ..Exceptions import AsyncyError
from ..Sentry import Sentry
class BaseHandler(RequestHandler):
logger = None
# noinspection PyMethodOverriding
def initialize(self, logger):
self.logger = logger
def handle_story_exc(self, app_id, story_name, e):
# Always prefer the app logger if the app is available.
try:
logger = Apps.get(app_id).logger
except BaseException:
logger = self.logger
logger.error(f'Story execution failed; cause={str(e)}', exc=e)
self.set_status(500, 'Story execution failed')
self.finish()
if isinstance(e, AsyncyError):
Sentry.capture_exc(e, e.story, e.line)
else:
if story_name is None:
Sentry.capture_exc(e)
else:
Sentry.capture_exc(e, extra={
'story_name': story_name
})
def is_finished(self):
return self._finished
def is_not_finished(self):
return self.is_finished() is False
|
rashmi43/platform-engine
|
asyncy/http_handlers/BaseHandler.py
|
BaseHandler.py
|
py
| 1,138 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29703199407
|
import bpy
import types
import sys
from select import select
import socket
import errno
import mathutils
import traceback
from math import radians
from bpy.props import *
from ast import literal_eval as make_tuple
from .callbacks import *
from ..nodes.nodes import *
def make_osc_messages(myOscKeys, myOscMsg):
envars = bpy.context.scene.nodeosc_envars
for item in myOscKeys:
if item.dp_format_enable == False:
# we cannot deal with a datapath string that has format syntax
#print( "sending :{}".format(item) )
prop = None
if item.node_type == 1:
prop = eval(item.data_path + ".getValue()")
else:
prop = eval(item.data_path)
# now make the values to be sent a tuple (unless its a string or None)
if isinstance(prop, (bool, int, float)):
prop = (prop,)
elif prop is None:
prop = 'None'
elif isinstance(prop, (mathutils.Vector, mathutils.Quaternion, mathutils.Euler, mathutils.Matrix)):
prop = tuple(prop)
stringProp = str(prop)
if not (item.filter_repetition and envars.repeat_argument_filter_OUT) and stringProp != item.value:
item.value = stringProp
# make sure the osc indices are a tuple
indices = make_tuple(item.osc_index)
if isinstance(indices, int):
indices = (indices,)
# sort the properties according to the osc_indices
if prop is not None and not isinstance(prop, str) and len(indices) > 0:
prop = tuple(prop[i] for i in indices)
myOscMsg[item.osc_address] = prop
return myOscMsg
#######################################
# PythonOSC Server BASE CLASS #
#######################################
class OSC_OT_OSCServer(bpy.types.Operator):
_timer = None
count = 0
#####################################
# CUSTOMIZEABLE FUNCTIONS:
#inputServer = "" #for the receiving socket
#outputServer = "" #for the sending socket
#dispatcher = "" #dispatcher function
def sendingOSC(self, context, event):
pass
# setup the sending server
def setupInputServer(self, context, envars):
pass
# setup the receiving server
def setupOutputServer(self, context, envars):
pass
# add method
def addMethod(self, address, data):
pass
# add default method
def addDefaultMethod():
pass
# start receiving
def startupInputServer(self, context, envars):
pass
# stop receiving
def shutDownInputServer(self, context, envars):
pass
#
#
#####################################
#######################################
# MODAL Function #
#######################################
def modal(self, context, event):
envars = bpy.context.scene.nodeosc_envars
if envars.isServerRunning == False:
return self.cancel(context)
if envars.message_monitor:
if len(envars.error) > 0:
for myError in envars.error:
self.report({myError.type}, myError.name + myError.value)
print(myError.name + myError.value)
envars.error.clear()
if event.type == 'TIMER':
#hack to refresh the GUI
self.count = self.count + envars.output_rate
if envars.message_monitor == True:
if self.count >= 100:
self.count = 0
for area in context.screen.areas:
if area.type == 'VIEW_3D':
area.tag_redraw()
# only available spot where updating the sorcar tree doesn't throw errors...
executeSorcarNodeTrees(context)
try:
start = time.perf_counter()
self.sendingOSC(context, event)
# calculate the execution time
end = time.perf_counter()
bpy.context.scene.nodeosc_envars.executionTimeOutput = end - start
except Exception as err:
self.report({'WARNING'}, "Output error: {0}".format(err))
return self.cancel(context)
return {'PASS_THROUGH'}
#######################################
# Setup OSC Receiver and Sender #
#######################################
def execute(self, context):
envars = bpy.context.scene.nodeosc_envars
if envars.port_in == envars.port_out:
self.report({'WARNING'}, "Ports must be different.")
return{'FINISHED'}
if envars.isServerRunning == False:
#Setting up the dispatcher for receiving
try:
self.setupInputServer(context, envars)
self.setupOutputServer(context, envars)
# all the osc messages handlers ready for registering to the server
oscHandlerDict = {}
oscHandleList = []
# register a message for executing
if envars.node_update == "MESSAGE" and hasAnimationNodes():
# oscHandleList content:
# callback type
# blender datapath (i.e. bpy.data.objects['Cube'])
# blender property (i.e. location)
# blender property index (i.e. location[index])
# osc argument index to use (should be a tuplet, like (1,2,3))
# node type
# datapath format string
# loop range string
# filter eval string
oscHandleList = (-1, None, None, None, None, 0, '', '', True)
self.addOscHandler(oscHandlerDict, envars.node_frameMessage, oscHandleList)
for item in bpy.context.scene.NodeOSC_keys:
filter_eval = True
if item.filter_enable:
filter_eval = item.filter_eval
if item.osc_direction != "OUTPUT" and item.enabled:
if item.dp_format_enable == False:
# make osc index into a tuple ..
oscIndex = make_tuple(item.osc_index)
# ... and don't forget the corner case
if isinstance(oscIndex, int):
oscIndex = (oscIndex,)
try:
oscHandleList = None
if item.data_path.find('script(') == 0:
raise Exception("using script() with format disabled is not allowed!")
elif item.data_path.find('][') != -1 and (item.data_path[-2:] == '"]' or item.data_path[-2:] == '\']'):
#For custom properties
# like bpy.data.objects['Cube']['customProp']
prop = item.data_path[item.data_path.rindex('['):]
prop = prop[2:-2] # get rid of [' ']
datapath = item.data_path[0:item.data_path.rindex('[')]
oscHandleList = [1, eval(datapath), prop, item.idx, oscIndex, item.node_type, '', '', filter_eval]
elif item.data_path[-1] == ']':
#For normal properties with index in brackets
# like bpy.data.objects['Cube'].location[0]
datapath = item.data_path[0:item.data_path.rindex('.')]
prop = item.data_path[item.data_path.rindex('.') + 1:item.data_path.rindex('[')]
prop_index = item.data_path[item.data_path.rindex('[') + 1:item.data_path.rindex(']')]
oscHandleList = [3, eval(datapath), prop, int(prop_index), oscIndex, item.node_type, '', '', filter_eval]
elif item.data_path[-1] == ')':
# its a function call
oscHandleList = [7, item.data_path, '', item.idx, oscIndex, item.node_type, '', '', filter_eval]
else:
#without index in brackets
datapath = item.data_path[0:item.data_path.rindex('.')]
prop = item.data_path[item.data_path.rindex('.') + 1:]
if isinstance(getattr(eval(datapath), prop), (int, float, str)):
# property is single value
oscHandleList = [2, eval(datapath), prop, item.idx, oscIndex, item.node_type, '', '', filter_eval]
else:
# property is array
oscHandleList = [4, eval(datapath), prop, item.idx, oscIndex, item.node_type, '', '', filter_eval]
if oscHandleList != None:
self.addOscHandler(oscHandlerDict, item.osc_address.strip(), oscHandleList)
else:
self.report({'WARNING'}, "Unable to create listener for: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
except Exception as err:
self.report({'WARNING'}, "Register custom handle: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
else:
oscIndex = item.osc_index
try:
oscHandleList = None
if item.data_path.find('script(') == 0:
if item.data_path.find(').'):
scriptName = item.data_path[7:item.data_path.find(').')]
functionName = item.data_path[item.data_path.find(').')+2:]
asModule = bpy.data.texts[scriptName].as_module()
asFunction = getattr(asModule, functionName)
oscHandleList = [11, scriptName + "." + functionName, asFunction, 0, item.osc_index, item.node_type, item.dp_format, '', filter_eval]
else:
if item.loop_enable:
oscHandleList = [10, item.data_path, '', 0, item.osc_index, item.node_type, item.dp_format, item.loop_range, filter_eval]
else:
oscHandleList = [10, item.data_path, '', 0, item.osc_index, item.node_type, item.dp_format, '', filter_eval]
if oscHandleList != None:
self.addOscHandler(oscHandlerDict, item.osc_address.strip(), oscHandleList)
else:
self.report({'WARNING'}, "Unable to create listener for: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
except Exception as err:
self.report({'WARNING'}, "Register custom handle: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
# lets go and find all nodes in all nodetrees that are relevant for us
nodes_createCollections()
for item in bpy.context.scene.NodeOSC_nodes:
filter_eval = True
if item.osc_direction != "OUTPUT":
# make osc index into a tuple ..
oscIndex = make_tuple(item.osc_index)
# ... and don't forget the corner case
if isinstance(oscIndex, int):
oscIndex = (oscIndex,)
try:
if item.node_data_type == "SINGLE":
oscHandleList = [5, eval(item.data_path), item.props, item.idx, oscIndex, item.node_type, '', '', filter_eval]
elif item.node_data_type == "LIST":
oscHandleList = [6, eval(item.data_path), item.props, item.idx, oscIndex, item.node_type, '', '', filter_eval]
self.addOscHandler(oscHandlerDict, item.osc_address.strip(), oscHandleList)
except Exception as err:
self.report({'WARNING'}, "Register node handle: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
# register all oscHandles on the server
for address, oscHandles in oscHandlerDict.items():
self.addMethod(address, oscHandles)
# this provides the callback functions with the oscHandles
setOscHandlers(oscHandlerDict)
# register the default method for unregistered addresses
self.addDefaultMethod()
# startup the receiving server
self.startupInputServer(context, envars)
# register the execute queue method
bpy.app.timers.register(execute_queued_OSC_callbacks)
#inititate the modal timer thread
context.window_manager.modal_handler_add(self)
self._timer = context.window_manager.event_timer_add(envars.output_rate/1000, window = context.window)
except Exception as err:
self.report({'WARNING'}, "Server startup: {0}".format(err))
return {'CANCELLED'}
envars.isServerRunning = True
self.report({'INFO'}, "Server successfully started!")
return {'RUNNING_MODAL'}
else:
self.report({'INFO'}, "Server stopped!")
envars.isServerRunning = False
return{'FINISHED'}
def cancel(self, context):
envars = bpy.context.scene.nodeosc_envars
self.shutDownInputServer(context, envars)
context.window_manager.event_timer_remove(self._timer)
# hack to check who is calling the cancel method.
# see https://blender.stackexchange.com/questions/23126/is-there-a-way-to-execute-code-before-blender-is-closing
traceback_elements = traceback.format_stack()
# if the stack has 2 elements, it is because the server stop has been pushed.
# otherwise it might be loading a new project which would cause an exception
# and stop the proper shutdown of the server..
if traceback_elements.__len__ == 2:
bpy.app.timers.unregister(execute_queued_OSC_callbacks)
return {'CANCELLED'}
# will take an address and a oscHandle data packet.
# if the address has already been used, the package will be added to the packagelist
def addOscHandler(self, handleDict, address, oscHandlePackage):
oldpackage = handleDict.get(address)
if oldpackage == None:
oldpackage = [oscHandlePackage]
else:
oldpackage += [oscHandlePackage]
handleDict[address] = oldpackage
|
maybites/blender.NodeOSC
|
server/_base.py
|
_base.py
|
py
| 16,277 |
python
|
en
|
code
| 100 |
github-code
|
6
|
825675496
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 10 04:27:29 2022
@author: ThinkPad
"""
from __future__ import print_function
import argparse
import os
import numpy as np
import random
import torch
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from PartialScan import PartialScans,unpickle,inferencePartialScans
from model import feature_transform_regularizer
from pointnetCls import PointNetCls
import torch.nn.functional as F
from tqdm import tqdm
import random
from random import sample
# import open3d as o3d
from normalizeData import normalizePoints
def add_shape_arguments(parser):
parser.add_argument(
'--batchSize', type=int, default=3, help='input batch size')
parser.add_argument(
'--num_points', type=int, default=2500, help='input batch size')
parser.add_argument(
'--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument(
'--nepoch', type=int, default=250, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='cls', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--checkpoint', type=str,
default='/gpfs/data/ssrinath/ychen485/TextCondRobotFetch/pointnet/cls/cls_model_10.pth',
help="checkpoint dir")
parser.add_argument('--feature_transform', action='store_true', help="use feature transform")
def inference(scanpoints, latentcode, classifier, opt, ref_paths):
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
points_r = normalizePoints(scanpoints)
points = np.random.rand(3, 1024, 3)
if points_r.shape[0] < 1024:
return False
points[0] = points_r[0:1024, :]
haveTarget = False
classifier = classifier.eval()
latent_dim = 512
for j in range(5):
ischair = 0
for i in range(10):
latents = np.zeros((1, latent_dim))
latents[0] = latentcode[j]
for k, path in enumerate(sample(ref_paths, 2), 1):
data = np.load(path)
scanpoints = data['points_r']
# points_r = normalizePoints(scanpoints)
points_r = scanpoints
points[k] = points_r[0:1024, :]
points_torch = torch.from_numpy(points[:, 0:1024, :]).to(torch.float32)
points_torch = points_torch.transpose(2, 1)
z = torch.from_numpy(latents).to(torch.float32)
points_cuda, z = points_torch.cuda(), z.cuda()
with torch.no_grad():
pred, trans, trans_feat = classifier(points_cuda, z)
pred = pred[0]
pred = torch.nn.functional.softmax(pred, dim=1)
ischair = int((pred.data.max(0)[1][1] == 0).cpu()) + ischair
print(ischair)
if ischair - 4 > 0:
haveTarget = True
break
return haveTarget
def get_text_model(opt):
classifier = PointNetCls(k=2, feature_transform=opt.feature_transform)
checkpoint = torch.load(opt.checkpoint)
classifier.load_state_dict(checkpoint)
if torch.cuda.is_available():
classifier.cuda()
return classifier
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_shape_arguments(parser)
opt = parser.parse_args()
print(opt)
blue = lambda x: '\033[94m' + x + '\033[0m'
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
latent_code = "/gpfs/data/ssrinath/ychen485/hyperPointnet/pointnet/03001627/ocnet_shapefeature_pc/embed_feats_train.pickle"
latent_code_test = "/gpfs/data/ssrinath/ychen485/hyperPointnet/pointnet/03001627/ocnet_shapefeature_pc/embed_feats_test.pickle"
latent_code_val = "/gpfs/data/ssrinath/ychen485/hyperPointnet/pointnet/03001627/ocnet_shapefeature_pc/embed_feats_val.pickle"
shape_folder = "/gpfs/data/ssrinath/ychen485/partialPointCloud/03001627"
latent_dim = 512
dataset = PartialScans(latentcode_dir = latent_code, shapes_dir = shape_folder)
test_dataset = PartialScans(latentcode_dir = latent_code_test, shapes_dir = shape_folder)
val_dataset = PartialScans(latentcode_dir = latent_code_val, shapes_dir = shape_folder)
inference_loader = inferencePartialScans(shapes_dir = "")
inferdataloader = torch.utils.data.DataLoader(
inference_loader,
batch_size=opt.batchSize,
shuffle=False,
num_workers=int(opt.workers))
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
testdataloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
valdataloader = torch.utils.data.DataLoader(
val_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
latent_dict = unpickle(latent_code)
keylist = list(latent_dict.keys())
latent_dict_test = unpickle(latent_code_test)
keylist_test = list(latent_dict_test.keys())
latent_dict_val = unpickle(latent_code_val)
keylist_val = list(latent_dict_val.keys())
print("train set lenth: "+ str(len(dataset)) +", test set length: "+ str(len(test_dataset)))
try:
os.makedirs(opt.outf)
except OSError:
pass
classifier = PointNetCls(k=2, feature_transform=opt.feature_transform)
if opt.checkpoint != " ":
checkpoint = torch.load(opt.checkpoint)
classifier.load_state_dict(checkpoint)
pass
classifier.cuda()
# # idx = random.randint(0, len(label) - 1)
# i = random.randint(0, 2)
# j = random.randint(0, 7)
# path = shape_folder + "/" + label[t_idx] + "/pointcloud" + str(j) + str(i) + "_partial.npz"
# data = np.load(path)
# scanpoints = data['points_r']
# # points_r = normalizePoints(scanpoints)
# points_r = scanpoints
# points[1] = points_r[0:1024, :]
#
# # idx = random.randint(0, len(label) - 1)
# i = random.randint(0, 2)
# j = random.randint(0, 7)
# path = shape_folder + "/" + label[t_idx] + "/pointcloud" + str(j) + str(i) + "_partial.npz"
# data = np.load(path)
# scanpoints = data['points_r']
# # points_r = normalizePoints(scanpoints)
# points_r = scanpoints
# points[2] = points_r[0:1024, :]
num_batch = len(dataset) / opt.batchSize
total_correct = 0
for epoch in range(1):
for i, data in enumerate(valdataloader, 0):
points_o, label = data
points = points_o[:,0:1024,:].to(torch.float32)
# print(points.shape)
points.to(torch.float32)
points = points.transpose(2, 1)
target_np = np.zeros((len(label),))
t_idx = random.randint(0,len(label)-1)
target_np[t_idx] = 1
target = torch.from_numpy(target_np).to(torch.int64)
latents = np.zeros((1, latent_dim))
latents[0] = latent_dict_val[label[t_idx]]
# for j in range(opt.batchSize):
# if target[j] == 1:
# latents[j] = latent_dict[label[j]]
# else:
# idx = random.randint(0,len(keylist))
# name = keylist[idx]
# while(name == label[j]):
# idx = random.randint(0,len(keylist))
# name = keylist[idx]
# latents[j] = latent_dict[name]
z = torch.from_numpy(latents).to(torch.float32)
points, target, z = points.cuda(), target.cuda(), z.cuda()
# optimizer.zero_grad()
classifier = classifier.train()
pred, trans, trans_feat = classifier(points, z)
# print(pred.shape)
pred = pred[0]
# loss = F.nll_loss(pred, target)
# if opt.feature_transform:
# loss += feature_transform_regularizer(trans_feat) * 0.001
# loss.backward()
# optimizer.step()
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
total_correct = total_correct + correct.item()
if i%100 == 0:
print('[%d: %d/%d] accuracy: %f' % (epoch, i, num_batch, total_correct / (100* opt.batchSize)))
total_correct = 0
print(pred,pred_choice)
# print(points)
# print("inferencing:" )
path = "testpoints.npz"
# path = "/gpfs/data/ssrinath/ychen485/partialPointCloud/03001627/ff9915c51ece4848cfc689934e433906/pointcloud70_partial.npz"
data = np.load(path)
# lst = data.files
scanpoints = data['points_r']
# pcd1 = o3d.io.read_point_cloud(path)
# scanpoints = np.asarray(pcd1.points)
# print(scanpoints.shape)
points_r = normalizePoints(scanpoints)
# points_r = scanpoints
# points_o[2] = points[0:1024,:]
points = np.random.rand(3,1024,3)
# points[0] = points_r[0:1024,:]
points[0] = points_r[0:1024,:]
idx = random.randint(0,len(label)-1)
i = random.randint(0,2)
j = random.randint(0,7)
path = shape_folder +"/" + label[t_idx] + "/pointcloud"+str(j)+str(i)+"_partial.npz"
# path = "/gpfs/data/ssrinath/ychen485/partialPointCloud/03001627/ff9915c51ece4848cfc689934e433906/pointcloud41_partial.npz"
data = np.load(path)
# lst = data.files
scanpoints = data['points_r']
# pcd1 = o3d.io.read_point_cloud(path)
# scanpoints = np.asarray(pcd1.points)
# print(scanpoints.shape)
points_r = normalizePoints(scanpoints)
points_r = scanpoints
# points_o[2] = points[0:1024,:]
# points = np.zeros((3,1024,3))
# points[0] = points_r[0:1024,:]
points[1] = points_r[0:1024,:]
idx = random.randint(0,len(label)-1)
i = random.randint(0,2)
j = random.randint(0,7)
path = shape_folder +"/" + label[t_idx] + "/pointcloud"+str(j)+str(i)+"_partial.npz"
# path = "/gpfs/data/ssrinath/ychen485/partialPointCloud/03001627/589e717feb809e7c1c5a16cc04345597/pointcloud62_partial.npz"
data = np.load(path)
# lst = data.files
scanpoints = data['points_r']
# pcd1 = o3d.io.read_point_cloud(path)
# scanpoints = np.asarray(pcd1.points)
# print(scanpoints.shape)
points_r = normalizePoints(scanpoints)
points_r = scanpoints
# points_o[2] = points[0:1024,:]
# points = np.zeros((3,1024,3))
# points[0] = points_r[0:1024,:]
points[2] = points_r[0:1024,:]
# from torch.autograd import Variable
# sim_data = Variable(torch.rand(32,3,1024))
# print(points)
# print(points_o)
points = torch.from_numpy(points[:,0:1024,:]).to(torch.float32)
points.to(torch.float32)
# print(points)
points = points.transpose(2, 1)
# print(points)
latents = np.zeros((1, latent_dim))
latents[0] = latent_dict['46323c7986200588492d9da2668ec34c']
z = torch.from_numpy(latents).to(torch.float32)
# print(z)
points, target, z = points.cuda(), target.cuda(), z.cuda()
classifier = classifier.eval()
pred, trans, trans_feat = classifier(points, z)
pred = pred[0]
pred_choice = pred.data.max(1)[1]
print(torch.exp(pred),pred_choice)
latents[0] = latent_dict_val['ba673ea75085e46cbfd72d7396bc040a']
z = torch.from_numpy(latents).to(torch.float32)
points, target, z = points.cuda(), target.cuda(), z.cuda()
classifier = classifier.train()
pred, trans, trans_feat = classifier(points, z)
pred = pred[0]
pred_choice = pred.data.max(1)[1]
print(torch.exp(pred),pred_choice)
latents[0] = latent_dict_test['ff9915c51ece4848cfc689934e433906']
z = torch.from_numpy(latents).to(torch.float32)
points, target, z = points.cuda(), target.cuda(), z.cuda()
classifier = classifier.train()
pred, trans, trans_feat = classifier(points, z)
pred = pred[0]
pred_choice = pred.data.max(1)[1]
print(torch.exp(pred),pred_choice)
latents[0] = latent_dict_test['fc07472e4dd1b6698ae97f14e63e7e01']
z = torch.from_numpy(latents).to(torch.float32)
points, target, z = points.cuda(), target.cuda(), z.cuda()
classifier = classifier.train()
pred, trans, trans_feat = classifier(points, z)
pred = pred[0]
pred_choice = pred.data.max(1)[1]
print(torch.exp(pred),pred_choice)
latents[0] = latent_dict['3bd437d38068f4a61f285be552b78f9a']
latents[0] = (np.load('../language2shape/results/shape_0032.npy')[2])
z = torch.from_numpy(latents).to(torch.float32)
# print(z)
points, target, z = points.cuda(), target.cuda(), z.cuda()
classifier = classifier.eval()
pred, trans, trans_feat = classifier(points, z)
pred = pred[0]
pred_choice = pred.data.max(1)[1]
path = "testpoints.npz"
# path = "/gpfs/data/ssrinath/ychen485/partialPointCloud/03001627/ff9915c51ece4848cfc689934e433906/pointcloud70_partial.npz"
data = np.load(path)
# lst = data.files
scanpoints = data['points_r']
# pcd1 = o3d.io.read_point_cloud(path)
# scanpoints = np.asarray(pcd1.points)
# print(scanpoints.shape)
points_r = normalizePoints(scanpoints)
inference(scanpoints, np.load('../language2shape/results/shape_0032.npy'),classifier)
|
FreddieRao/TextCondRobotFetch
|
pointnet/inference.py
|
inference.py
|
py
| 13,605 |
python
|
en
|
code
| 2 |
github-code
|
6
|
23055528773
|
"""
Creation:
Author: Martin Grunnill
Date: 13/09/2022
Description: Classes for Multnomial random draw seeding of infections.
Classes
-------
MultnomialSeeder
Makes multinomial draws selecting an infectious hosts branch and then state.
"""
from numbers import Number
import numpy as np
import math
class _InfectionBranch:
"""
Makes multinomial draws for selecting which stage of an infection pathway to place infected hosts.
Calculates normalised weighting of an infection branch's states, based on inverse outflow for states.
Parameters & Attributes
-----------------------
name : string
Name of branch.
outflows: dictionary {str or ints: string}
Keys are name or number given to state. Values are name given to parameter.
Methods
-------
calculate_weighting(parameters)
Calculate normalised weighting for each state.
seed_infections(self, n, parameters)
Make multinomial draw to select infectious stages of this branch to seed infection into.
"""
def __init__(self, name, outflows):
if not isinstance(name, str):
raise TypeError('name argument should be a string.')
self.name = name
outflows_err_msg = ('outflows argument should be a dictionary,'+
' with keys being strings or integers and values being string.')
if not isinstance(outflows,dict):
raise TypeError(outflows_err_msg)
if any(not isinstance(key,(int,str)) for key in outflows.keys()):
raise TypeError(outflows_err_msg)
if any(not isinstance(value,str) for value in outflows.values()):
raise TypeError(outflows_err_msg)
self.outflows = outflows
def calculate_weighting(self, parameters):
"""
Calculate normalised weighting for each state.
Parameters
----------
parameters : dict {str: Number}
Dictionary of parameter values.
Returns
-------
noramlised_weightings : dict {str: float}
Dictionary normalised weighting for each state.
"""
parameters_error = ('parameters argument should be a dictionary,' +
' with keys being strings and values being numbers.')
if not isinstance(parameters, dict):
raise TypeError(parameters_error)
if any(not isinstance(value, Number) for value in parameters.values()):
raise TypeError(parameters_error)
if any(not isinstance(key,str) for key in parameters.keys()):
raise TypeError(parameters_error)
weightings = {}
total = 0
for state, outflow in self.outflows.items():
weighting = parameters[outflow] ** -1
weightings[state] = weighting
total += weighting
noramlised_weightings = {state: weight/total for state, weight in weightings.items()}
return noramlised_weightings
def seed_infections(self, n, parameters, rng=None):
"""
Make multinomial draw to select infectious stages of this branch to seed infection into.
Parameters
----------
n : int
Number of infections to seed.
parameters : dict {str: Number}
Dictionary of parameter values.
rng : numpy random number generator, optional.
Random number generator to use.
Returns
-------
draw_dict : dict {str: int}
Keys are states values are number of infections in state.
"""
if rng is None:
rng = np.random.default_rng()
weighting = self.calculate_weighting(parameters)
pvals = list(weighting.values())
states = list(weighting.keys())
draw = rng.multinomial(n=n, pvals=pvals, size=1)
draw = draw[0]
draw_dict = {state: draw[index] for index, state in enumerate(states)}
return draw_dict
class MultnomialSeeder:
"""
Makes multinomial draws selecting an infectious hosts branch and then state.
Parameters
----------
branch_info : nested dict
First level keys are branches (str).
Second level keys are states (str or ints) and values are names of outflows for states (str).
Attributes
----------
branches : dict {str: InfectionBranch}
Infection branches that a host can be placed upon.
parameters : set of strings
Parameters (outflows) given in branch_info.
Methods
-------
seed_infections(n, branch_probability, parameters)
Draw selection of states to place infected hosts.
"""
def __init__(self, branch_info):
if not isinstance(branch_info,dict):
raise TypeError('branch_info should be a dictionary.')
self.branches = {}
self.parameters = set()
self.rng = None
for branch_name, outflows in branch_info.items():
self.parameters.update(list(outflows.values()))
if not isinstance(branch_info, dict):
raise TypeError('branch_info should be a dictionary of dictionaries.')
self.branches[branch_name] = _InfectionBranch(branch_name, outflows)
def set_seed(self,seed):
"""
Sets random number generator seed.
Parameters
----------
seed : int (>0)
"""
self.rng = np.random.default_rng(seed)
def _seed_branches(self, n, branch_probability):
"""
Make multinomial draw for which infection branch to place a host.
Parameters
----------
n : int
Number of infections to seed.
branch_probability : dict {string, float}
Probability of being on each infection branch.
Returns
-------
draw_dict : dict {str: int}
Keys are branches values are number of infections on branch.
"""
if self.rng is None:
rng = np.random.default_rng()
else:
rng = self.rng
pvals = list(branch_probability.values())
branches = list(branch_probability.keys())
draw = rng.multinomial(n=n, pvals=pvals, size=1)
draw = draw[0]
draw_dict = {branch: draw[index] for index, branch in enumerate(branches)}
return draw_dict
def seed_infections(self, n, branch_probability, parameters):
"""
Draw selection of states to place infected hosts.
Parameters
----------
n : int
Number of infections to seed.
branch_probability : dict {string, float}
Probability of being on each infection branch.
parameters : dict {str: Number}
Dictionary of parameter values.
Returns
-------
infections_draw : dict {str: int}
Keys are infected states values are number of hosts in state.
"""
prob_error = ', all proportion argument should be a number <=1 and >=0.'
for key, value in branch_probability.items():
if not isinstance(value, Number):
raise TypeError(key+' not a Number type'+ prob_error)
if value > 1 or value < 0:
raise ValueError(key+' is of value '+ str(value) + prob_error)
proportions_total = sum(branch_probability.values())
if not math.isclose(1, proportions_total, abs_tol=0.000001):
raise ValueError('The sum of dictionary values in proportions should equal 1, it is equal to ' +
str(proportions_total)+'.')
branch_draw = self._seed_branches(n, branch_probability)
infections_draw = {}
for branch_name, branch_seed in branch_draw.items():
branch = self.branches[branch_name]
branch_infection_draw = branch.seed_infections(branch_seed, parameters, self.rng)
states_already_drawn = set(infections_draw.keys()).union(set(branch_infection_draw.keys()))
updated_infection_draws = {state: branch_infection_draw.get(state, 0) + infections_draw .get(state, 0)
for state in states_already_drawn}
infections_draw = updated_infection_draws
return infections_draw
|
LIAM-COVID-19-Forecasting/Modelling-Disease-Mitigation-at-Mass-Gatherings-A-Case-Study-of-COVID-19-at-the-2022-FIFA-World-Cup
|
seeding_infections/multinomail_seeding.py
|
multinomail_seeding.py
|
py
| 8,269 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12771403336
|
import tensorflow as tf
from yolo import YOLO, detect_video
from PIL import Image
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "1"
def detect_img(yolo):
img = '10.jpg'
try:
image = Image.open(img)
except Exception as e:
print('Open Error! Try again!')
print(e)
else:
r_image = yolo.detect_image(image)
r_image.show()
# detect_img(YOLO())
path = '3.mp4'
output = './result/333333333.mp4'
detect_video(YOLO(), output_path=output)
|
Jerry-Z464/yolo
|
keras-yolo3/test.py
|
test.py
|
py
| 490 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7357482434
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This is an example script that uses DIC data from Carrol et al
as input for SIF to find K field and cracktip data
The output is written to a CSV file
@author: Swati Gupta
"""
import SIF_final as SIF
import numpy as np
from os import walk
import pdb
from datetime import datetime
import csv, timeit
from pandas import read_csv
synData = 0
r2 = 100
alpha = 2
N=-1
noise = -1
# provide path to directory containing displacement data files
path = 'DICresults/'
filenames = next(walk(path), (None, None, []))[2]
# exclude temp files
for file in filenames:
if file.startswith('.'):
filenames.remove(file)
filenames.sort() #sort assuming files are named chronologically
#initilize
lenF = len(filenames)
cracktip1 = np.zeros((lenF-1,2)) # Geo
cracktip2 = np.zeros((lenF-1,2)) # Sep
cracktip3 = np.zeros((lenF-1,2)) # DC
K_field1 = np.zeros((lenF-1,6)) # Geo
K_field2 = np.zeros((lenF-1,6)) # Sepp
K_field3 = np.zeros((lenF-1,6)) # DC
discr = 10
geo = 0 # = 1 if use geometrical method too or 0 if only use separability
mat_constants = [0.327,109.1,43] #poisson's ratio, E, shear modulus
prev = [545, 315]
startT = timeit.default_timer()
#loop over the set of files
for i in range(0,lenF-1):
file1 = path+filenames[i]
print('filename \n', file1)
# pdb.set_trace()
data = read_csv(file1)
x = data['x'].values
y = data['y'].values
u = data['u'].values
v = data['v'].values
cracktip1[i],cracktip2[i],cracktip3[i],K_field1[i],K_field2[i], K_field3[i] = \
SIF.SIF_projection(synData, r2 = 50, alpha = 2.5,coords = np.array([x,y]).T,
coords_ref = np.array([x+u, y+v]).T, guess = prev, h=discr,geo = geo, constants = mat_constants)
# prev = cracktip2[i]
endT = timeit.default_timer()
print("Time taken:", round(endT - startT, 2), "seconds to analyze ", lenF, "files")
## write output to file ##
currentDT = datetime.now() # get current date and time
outputFile = "DIC_" + str(currentDT) + ".csv"
with open(outputFile, 'w') as f:
writer = csv.writer(f)
if geo:
writer.writerow(['S.no','filename','x_geo','y_geo','K_I_geo','K_II_geo','T_geo','x_sep','y_sep','K_I_sep','K_II_sep','T_sep'])
writer.writerows(zip(range(1,lenF),filenames,cracktip1[:,0], cracktip1[:,1],K_field1[:,2],K_field1[:,3],K_field1[:,4],
cracktip2[:,0], cracktip2[:,1],K_field2[:,2],K_field2[:,3],K_field2[:,4]))
else:
writer.writerow(['S.no','filename','x','y','K_I','K_II','T'])
writer.writerows(zip(range(1,lenF),filenames,cracktip2[:,0], cracktip2[:,1],K_field2[:,2],K_field2[:,3],K_field2[:,4]))
|
sg759/separability
|
DICexample.py
|
DICexample.py
|
py
| 2,731 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7919794607
|
from random import randint
from time import sleep
def more_five(x):
if x > 5:
return True
new = [2, 5, 10 ,12, 15, 1 ,2]
res_map = map(more_five, new)
print(list(res_map))
list_cmp = [randint(0, 10) for i in range(10) if i % 2 == 0]
#print(list_cmp)
set_cmp = [randint(0, 10) for i in range(10)]
#print(set_cmp)
dict_cmp = {str(i): randint(0, 10) for i in range(10)}
#print(dict_cmp)
x = [1, 2, 3, 24]
#print(id(x))
x.append(17)
#print(id(x))
new_list_cmp = (randint(0, 10) for i in range(10) if i % 2 == 0)
# print(new_list_cmp.__next__())
# print(new_list_cmp.__next__())
# print(new_list_cmp.__next__())
# print(list(new_list_cmp)) # генерирует 1 раз
# print(list(new_list_cmp))
#
|
Savitskiyov/Python
|
Seminar 6/Seminar_1.py
|
Seminar_1.py
|
py
| 723 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9633391789
|
# W.A.P in Python to count the occurance of each character in your name and display each character of the string. #
str = input("Enter the name : ")
L = []
for i in str.lower() :
if i not in L :
L.append(i)
print("The total number of occurances of",i,"is",str.count(i))
L1 = list(str)
print(L1)
|
sunny-ghosh/Python-projects
|
Practical_24.py
|
Practical_24.py
|
py
| 336 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42598863082
|
#Project euler problem 10
#Problem link https://projecteuler.net/problem=10
def sumPrimes(n):
sum, sieve = 0, [True] * n
for p in range(2, n):
if sieve[p]:
sum += p
for i in range(p * p, n, p):
sieve[i] = False
return sum
print(sumPrimes(2000000))
|
mahimonga/Project-Euler
|
Problem5_10/summation_of_primes.py
|
summation_of_primes.py
|
py
| 310 |
python
|
en
|
code
| 2 |
github-code
|
6
|
14822509390
|
from sqlalchemy.orm import Session
from database_models import Task, TaskStatuses
from schemas import CreateTaskModel, UpdateTaskModel, DeleteTaskModel
from datetime import datetime
def create_task(db:Session, task: CreateTaskModel):
db_task = Task(
name = task.name,
description = task.description,
status = TaskStatuses.opened,
create_date = datetime.now()
)
db.add(db_task)
db.commit()
db.refresh(db_task)
return db_task
def update_task(db:Session, updated_task: UpdateTaskModel):
filtered_task = {k:v for k,v in updated_task.to_dict().items() if v is not None}
db.query(Task).filter(Task.id==updated_task.id).update(filtered_task)
db.commit()
return updated_task
def delete_task(db:Session, delete_task: DeleteTaskModel):
db.query(Task).filter(Task.id==delete_task.id).delete(synchronize_session=False)
db.commit()
return delete_task
def get_all_tasks(db:Session):
return db.query(Task).all()
|
maximzec/ToDoApp
|
crud.py
|
crud.py
|
py
| 993 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32257470125
|
# Ben Readman
# While loop Calculator
x = 0
go = 'y'
num = int(input("Please enter the first number: "))
minnum = num
maxnum = num
while go == 'y':
num2 = int(input("Please enter the next number: "))
avrg = num + num2
num = num2
if minnum > num2:
minnum = num2
else:
minnum = minnum
if maxnum < num2:
maxnum = num2
else:
maxnum = maxnum
print()
go = input("Would you like to enter another number (y/n)? ")
x +=1
if go == 'n':
avrg = avrg/x
print()
print("Okay, the minimum is ",minnum,"\nThe maximum is ",maxnum,"\nThe average is ",avrg,sep='')
else:
go = input("Please enter a valid answer (y/n): ")
|
ThatGuyBreadman1567/WhileLoopCalculator
|
WhileLoopCalculator.py
|
WhileLoopCalculator.py
|
py
| 717 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28028911172
|
#!/usr/bin/python3
import random
import sys
import os
#from turtle import clear
#from typing_extensions import TypeVarTuple
#from tkinter import N
from time import sleep
from functions import k_d_function, fselection, current_score, addTo, averageSolution
# what to keep track of? Ability to add more
# Features
# make short cuts to add amounts to which stat. ex. kd2 kd3.2 will add the k/d of 2 to player one
# and 3.2 to player 2. Add a charactor to initialize the insersion of expressions sytle entries.
# setup variables
name_done = False
start = False
game_stat_active = {}
players = []
num_players = 0
whole_end = False
kd_end = False
skip = False
type_kd = False
selection = 0
repeatStop = True
#game_stat_active = {"kd" : [0(kills),1(deaths),2(kd),3(#ofgames)], "kills" : [1,2,3], "deaths" : [1,2,3,4], "get_kill_cam" : [1,2,3,4],
# "in_kill_cam" : [1,2,3,4], "greatness" : [1,2,3,4]}
game_stat = [False] * 8
# functions ******put function in different file so I can call it anywhere
print("********** Trash talking stat tracker **********")
# main menu
while (start == False): #selection menu, creating selection list
os.system('clear')
print("Select as many as you want. Press 9 to start.")
if game_stat[0] == True:
print("X: 1. Standard tracking two players, k/d, on kill cam, you are the kill cam, *You win and kill other player on kill cam")
else:
print("1. Standard tracking two players, k/d, on kill cam, you are the kill cam, *You win and kill other player on kill cam")
if game_stat[1] == True:
print("X: 2. k/d")
else:
print("2. k/d")
if game_stat[2] == True:
print("X: 3. kills")
else:
print("3. kills")
if game_stat[3] == True:
print("X: 4. deaths")
else:
print("4. deaths")
if game_stat[4] == True:
print("X: 5. you are the kill cam")
else:
print("5. you are the kill cam")
if game_stat[5] == True:
print("X: 6. in the kill cam")
else:
print("6. in the kill cam")
if game_stat[6] == True:
print("X: 7. achieve greatness (win match and kill other player on your kill cam")
else:
print("7. achieve greatness")
if game_stat[7] == True:
print("X: 8. Wins")
else:
print("8. Wins")
print("9. Finished selecting")#make it except letters
try:
selection = int(input("select one that applies: "))
except:
print("Please enter a number from the menu.")
# main menu inputs, change False to True in List
if (selection == ""): print("Make a selection. enter 8 if finished.")
if (selection == 1): # game_stat[0] ******************need to check if this is correct code!!!!!!
players = [None] * 2
players[0] = input("first player: ")
players[1] = input("second player: ")
game_stat_active['player0'] = players[0]
game_stat_active['player1'] = players[1]
game_stat_active.update({"player0" : {"name" : players[0]}})
game_stat_active.update({"player1" : {"name" : players[1]}})
game_stat_active["player0"]['kd'] = [None, None, None, None]
game_stat_active["player0"]['get_kill_cam'] = [None, None, None, None]
game_stat_active["player0"]['in_kill_cam'] = [None, None, None, None]
game_stat_active["player0"]['greatness'] = [None, None, None, None]
game_stat_active["player1"]['kd'] = [None, None, None, None]
game_stat_active["player1"]['get_kill_cam'] = [None, None, None, None]
game_stat_active["player1"]['in_kill_cam'] = [None, None, None, None]
game_stat_active["player1"]['greatness'] = [None, None, None, None]
start = True
skip = True
if (selection == 2):
game_stat[1] = fselection(game_stat[1])
if (selection == 3):
game_stat[2] = fselection(game_stat[2])
if (selection == 4):
game_stat[3] = fselection(game_stat[3])
if (selection == 5):
game_stat[4] = fselection(game_stat[4])
if (selection == 6):
game_stat[5] = fselection(game_stat[5])
if (selection == 7):
game_stat[6] = fselection(game_stat[6])
if (selection == 8):
game_stat[7] = fselection(game_stat[7])
if (selection == 9):
start = True
os.system('clear')
i = 0
while (i != 10 and skip != True):#input names, create data dictionary
print("Press Enter if no more players.")
enter_name = input("player {} name: ".format(i))
if (enter_name == ""):
break
else:
players.append(enter_name)
num_players = num_players + 1
game_stat_active.update({"player"+str(i) : {"name" : enter_name}}) #, "kd" : [1,2,3,4], "kills" : [1,2,3,4], "deaths" : [1,2,3,4], "get_kill_cam" : [1,2,3,4],
#"in_kill_cam" : [1,2,3,4], "greatness" : [1,2,3,4]}})
if (game_stat[1] == True):
game_stat_active["player"+str(i)]['kd'] = [None, None, None, None]
if (game_stat[2] == True):
game_stat_active["player"+str(i)]["kills"] = [None, None, None, None]
if (game_stat[3] == True):
game_stat_active["player"+str(i)]["deaths"] = [None, None, None, None]
if (game_stat[4] == True):
game_stat_active["player"+str(i)]["get_kill_cam"] = [None, None, None, None]
if (game_stat[5] == True):
game_stat_active["player"+str(i)]["in_kill_cam"] = [None, None, None, None]
if (game_stat[6] == True):
game_stat_active["player"+str(i)]["greatness"] = [None, None, None, None]
if (game_stat[7] == True):
game_stat_active["player"+str(i)]["wins"] = [None, None, None, None]
if (selection == 8):
start = True
i += 1
print("enter the stats\n") #Main loop to enter all the stats
j = 0 #counts for players in game_stat_active for kd
l = 0
enter_kill = False
enter_death = False
deaths_end = False
get_kill_cam_end = True
in_kill_cam_end = True
get_greatness = True
get_wins = True
repeatStop = False
while (whole_end == False):#receive data from user
print(game_stat_active)
if ("kd" in game_stat_active["player"+str(j)]): #kd[(0)kills,(1)deaths,(3)k/d]
while (kd_end == False):
while (type_kd == False):#input kill and death or k/d ratio
kd_or_kd = int(input("Do you want to enter 1. kills & deaths or 2. k/d ratio\n"))
if (kd_or_kd < 1 or kd_or_kd > 2):
print("please enter (1) or (2)")
else:
type_kd = True
if (kd_or_kd == 1):#input kill and deaths
k = 0 #counts for dictionary player key
for i in players:
while(enter_kill == False):
print("{}'s kills: ".format(i))
try:
kills = int(input(""))
except:
print("Please enter a number.")
if ('kills' in game_stat_active["player"+str(k)]):
game_stat_active["player"+str(k)]['kills'][0] = addTo(game_stat_active["player"+str(k)]['kills'][0], kills)
enter_kill = True
repeatStop = True
game_stat_active["player"+str(k)]['kd'][0] = addTo(game_stat_active["player"+str(k)]['kd'][0], kills)
while (enter_death == False):
print("{}'s deaths: ".format(i))
try:
deaths = int(input(""))
except:
print("Please enter a number.")
if ('deaths' in game_stat_active["player"+str(k)]):
game_stat_active["player"+str(k)]['deaths'][1] = addTo(game_stat_active["player"+str(k)]['deaths'][1], deaths)
enter_death = True
repeatStop = True
temp_k_d = (k_d_function(kills, deaths))
game_stat_active["player"+str(k)]['kd'][0] = addTo(game_stat_active["player"+str(k)]['kd'][0], kills)
game_stat_active["player"+str(k)]['kd'][1] = addTo(game_stat_active["player"+str(k)]['kd'][1], deaths)
game_stat_active["player"+str(k)]['kd'][2] = averageSolution(game_stat_active["player"+str(k)]['kd'][2], temp_k_d)
enter_death = False
enter_kill = False
k += 1
kd_end = True
if (kd_or_kd == 2):#input k/d ratio
k = 0 #counts for dictionary player key
for i in players:
enter_kd = False
while(enter_kd == False):
print("{}'s K/D: ".format(i))
try:
kd = float(input(""))
enter_kd = True
except:
print("Please enter only numbers and a decimal")
game_stat_active["player"+str(k)]['kd'][2] = kd
k += 1
kd_end = True
kills_end = False
enter_kills = True
if ("kills" in game_stat_active["player"+str(j)]):
while (kills_end == False):
k = 0 #counts for dictionary player key
for i in players:
#If deaths is entered in K/D then it is transfered to kills without repeat input
try:
if (game_stat_active["player"+str(k)]['kd'][0] != None and repeatStop == True):
only_kills = game_stat_active["player"+str(k)]['kills'][0]
repeatStop = False
except:
enter_kills = True
while (enter_kills == True):
print("{}'s kills: ".format(i))
try:
only_kills = int(input(""))
enter_kills = False
except:
print("Please enter a number.")
game_stat_active["player"+str(k)]['kills'][0] = only_kills
k += 1
#repeatStop = False
kills_end = True
if ("deaths" in game_stat_active["player"+str(j)]):
while (deaths_end == False):
k = 0 #counts for dictionary player key
for i in players:
if ('kd' in game_stat_active["player"+str(k)]):
deaths = game_stat_active["player"+str(k)]['kd'][1]
else:
deaths_end = True
while (deaths_end == True):
print("{}'s deaths: ".format(i))
try:
deaths = int(input(""))
deaths_end = False
except:
print("Please enter a number.")
game_stat_active["player"+str(k)]['deaths'][0] = deaths
k += 1
deaths_end = True
if ("get_kill_cam" in game_stat_active["player"+str(j)]):
while (get_kill_cam_end == True):
k = 0 #counts for dictionary player key
for i in players:
getKillCam = True
while (getKillCam == True):
print("{}'s kill cam: ".format(i))
try:
get_kill_cam = int(input(""))
getKillCam = False
except:
print("Please enter a number.")
game_stat_active["player"+str(k)]['get_kill_cam'][0] = addTo(game_stat_active["player"+str(k)]['get_kill_cam'][0], get_kill_cam)
k += 1
get_kill_cam_end = False
if ("in_kill_cam" in game_stat_active["player"+str(j)]):
while (in_kill_cam_end == True):
k = 0 #counts for dictionary player key
for i in players:
inKillCam = True
while (inKillCam == True):
print("{}'s in kill cam: ".format(i))
try:
in_kill_cam = int(input(""))
inKillCam = False
except:
print("Please enter a number.")
game_stat_active["player"+str(k)]['in_kill_cam'][0] = in_kill_cam
k += 1
in_kill_cam_end = False
if ("greatness" in game_stat_active["player"+str(j)]):
while (get_greatness == True):
k = 0 #counts for dictionary player key
for i in players:
getGreatness = True
while (getGreatness == True):
print("{}'s achieved GREATNESS: ".format(i))
try:
get_greatnessCount = int(input(""))
getGreatness = False
except:
print("Please enter a number.")
game_stat_active["player"+str(k)]['greatness'][0] = get_greatnessCount
k += 1
get_greatness = False
if ("wins" in game_stat_active["player"+str(j)]):
while (get_wins == True):
k = 0 #counts for dictionary player key
for i in players:
getWin = True
while (getWin == True):
print("{}'s achieved GREATNESS: ".format(i))
try:
getWinCount = int(input(""))
getWin = False
except:
print("Please enter a number.")
game_stat_active["player"+str(k)]['wins'][0] = getWinCount
k += 1
get_Wins = False
else:
toEnd = input('Press "enter" to continue, "results" or "r" for results, or "stop" or "s" to end tracker')
toEnd = toEnd.lower()
if (toEnd == "enter" or toEnd == ""):
os.system('clear')
whole_end = False
kd_end = False
kills_end = False
deaths_end = False
get_kill_cam_end = True
in_kill_cam_end = True
get_greatness = True
get_wins = True
continue
elif (toEnd == "results" or toEnd == "r"):
os.system('clear')
print(current_score(game_stat_active, players))
continue
elif (toEnd == "stop" or toEnd == "s"):
os.system('clear')
whole_end = True
continue
else:
print("Please enter one of the options above.")
current_score(game_stat_active, players)
quit()
|
miturn/stat_tracker
|
stat_tracker.py
|
stat_tracker.py
|
py
| 15,046 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6484090494
|
from rest_framework import serializers
from django.contrib.auth import get_user_model
from session.serializers.recent_sessions import RecentSessionSerializer
User = get_user_model()
class ClientListSerializer(serializers.ModelSerializer):
number_of_sessions = serializers.SerializerMethodField()
latest_session = serializers.SerializerMethodField()
def get_number_of_sessions(self, obj):
return obj.client_sessions.count()
def get_latest_session(self, obj):
session = obj.client_sessions.latest('created')
return RecentSessionSerializer(session).data
class Meta:
model = User
fields = [
'id',
'full_name',
'coaches',
'email',
'about',
'location',
'phone_number',
'avatar',
'number_of_sessions',
'latest_session'
]
|
roberttullycarr/cyclingsimulator
|
backend/user/serializers/coach/list_clients.py
|
list_clients.py
|
py
| 909 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11110715644
|
# coding:utf-8
import pygame
class Main(object):
def __init__(self, title, height, width, Fps=60):
self.height = height
self.width = width
self.title = title
self.Fps = Fps
self.main()
self.vars()
self.events()
def main(self):
pygame.init() # 初始化pygame
pygame.mixer.init() # 背景音乐初始化
pygame.display.set_caption(self.title) # 设置窗口标题
self.screen = pygame.display.set_mode([self.height, self.width]) # 将屏幕赋值为全局变量方便调用
def events(self):
pygame.mixer.music.play(-1, 0) # 播放背景音乐(-1是循环播放,0是从0秒开始播放)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# 将背景图片载入到窗口0,0的位置。
self.screen.blit(self.New_Default_Background_Pic, (0, 0))
# 刷新背景(如果不刷新屏幕就不更新)
pygame.display.update()
def vars(self):
# 导入图片;
self.Old_Default_Background_Pic = pygame.image.load("bg_page.jpg")
# 将图片缩放到与窗口一样大;
self.New_Default_Background_Pic = pygame.transform.scale(self.Old_Default_Background_Pic, (self.height, self.width))
pygame.image.load("bg_page.jpg")
self.Old_Default_Background_Music = pygame.mixer.music.load("rainy-season.mp3")
if __name__ == "__main__":
Main("Pixel World", 1280, 768)
|
PatrickShun/pygameDemo
|
pygamedemo_run.py
|
pygamedemo_run.py
|
py
| 1,650 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
25316393069
|
from typing import List, Set, Callable, Optional, Iterator
import math
class Tile:
def __init__(self, tile: List[str], tile_id: int = 0):
self.tile = tile
self.id = tile_id
self.edge_len = len(tile)
def right_edge(self) -> str:
return "".join(t[-1] for t in self.tile)
def left_edge(self) -> str:
return "".join(t[0] for t in self.tile)
def top_edge(self) -> str:
return self.tile[0]
def bottom_edge(self) -> str:
return self.tile[-1]
def rotate_right(self) -> None:
rotated = []
for ix in range(self.edge_len):
rotated.append(
"".join(
[
self.tile[self.edge_len - jx - 1][ix]
for jx in range(self.edge_len)
]
)
)
self.tile = rotated
def flip(self) -> None:
flipped = []
for t in self.tile[::-1]:
flipped.append(t)
self.tile = flipped
def check(order: List[Tile], tile: Tile, edge_size: int) -> bool:
return (
False
if (
(len(order) + 1) % edge_size != 1
and tile.left_edge() != order[len(order) - 1].right_edge()
)
or (
len(order) >= edge_size
and tile.top_edge() != order[len(order) - edge_size].bottom_edge()
)
else True
)
reassemble: List[Callable[[Tile], Optional[Tile]]] = [
lambda tile: tile,
lambda tile: tile.rotate_right(),
lambda tile: tile.rotate_right(),
lambda tile: tile.rotate_right(),
lambda tile: tile.flip(),
lambda tile: tile.rotate_right(),
lambda tile: tile.rotate_right(),
lambda tile: tile.rotate_right(),
]
def recursion(
order: List[Tile], visited: Set[Tile], tiles: List[Tile], edge_size: int
) -> List[Tile]:
if len(order) == len(tiles):
return order
result = []
for tile in tiles:
if tile not in visited:
for r in reassemble:
r(tile)
if check(order, tile, edge_size):
result = recursion(
order + [tile], visited.union({tile}), tiles, edge_size
)
if result:
return result
return result
def part1(tiles: List[Tile]) -> int:
size = len(tiles)
edge_size = int(math.sqrt(size))
order = recursion([], set(), tiles, edge_size)
upper_left = 0
upper_right = edge_size - 1
bottom_left = size - edge_size
bottom_right = size - 1
return (
order[upper_left].id
* order[upper_right].id
* order[bottom_left].id
* order[bottom_right].id
)
def extract_data(lines: List[str]) -> Iterator[Tile]:
tile: List[str] = []
for line in lines + [""]:
if "Tile" in line:
tile_id = int(line.split()[1].strip(":"))
elif line:
tile += [line]
elif tile:
yield Tile(tile, tile_id)
tile = []
with open("input") as input_file:
lines = [line for line in input_file.read().splitlines()]
print(part1(list(extract_data(lines))))
|
stx73/aoc2020
|
day20/p1.py
|
p1.py
|
py
| 3,211 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40887076205
|
# Тестирование компонентов задач
import unittest
from pyodbc import Connection as PyodbcConnection
from connections import Connection1
from task_classes.db.mssqldb import MSSqlTarget
from task_classes.csv_task_classes import PrepareCsvBulkPackages
class TestMSSqlTarget(unittest.TestCase):
"""Класс тестирования MSSqlTarget"""
def setUp(self):
"""Проверка создания объекта"""
self.ms_sql_target = MSSqlTarget(
host=Connection1().host,
user=Connection1().user,
password=Connection1().password,
database=Connection1().database,
table=Connection1().table,
update_id='test1'
)
self.assertIsInstance(self.ms_sql_target, MSSqlTarget, "Объект должен создаваться корректно")
def test_01_connect(self):
"""Провека соединения с базой данных"""
db_conn = self.ms_sql_target.connect()
self.assertIsInstance(db_conn, PyodbcConnection, "Соединение с Connection1 должно быть успешным")
def test_02_touch(self):
"""Проверка записи в таблицу сессий загрузки"""
self.ms_sql_target.create_marker_table()
self.ms_sql_target.touch()
self.assertTrue(self.ms_sql_target.exists(), "Загрузка должа быть зарегистрирована и помечена как выполненная")
class TestCsv(unittest.TestCase):
"""Класс тестирования обработчиков csv-файлов"""
def setUp(self):
self.csv_task = PrepareCsvBulkPackages()
def test_02_package(self):
# print(self.csv_task.bulk_packages_directory)
self.assertEqual(self.csv_task.bulk_packages_directory, r'D:\temp\data\packages',
'Каталог для Bulk-пакетов должен быть указан верно')
def test_03_filename(self):
print(self.csv_task.filename)
self.assertEqual(self.csv_task.filename, r'D:\temp\data\packages\package.csv',
'Файл Bulk-пакетов должен быть указан верно')
if __name__ == '__main__':
unittest.main(failfast=True)
|
Foresco/luigivar
|
tests.py
|
tests.py
|
py
| 2,370 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
39803355853
|
import pickle
from pathlib import Path
script_location = Path(__file__).absolute().parent
data_loc = script_location / "name_gen_model"
from bangla_linga.BN_countvectorizer import CountVectorizer
import bangla_linga.BN_ngram as ng
class BN_gen_pred(object):
def __init__(self,model_name=data_loc):
self.model_name = model_name
with open(model_name, 'rb') as p:
self.ob = pickle.load(p)
def get_name_ara(self, name=None):
gram_2 = ng.n_gram(name, 2)
g2 = ' '.join(gram_2)
gram_3 = ng.n_gram(name, 3)
g3 = ' '.join(gram_3)
name = [name + " " + g2 + " " + g3]
ct = CountVectorizer()
test = ct.transform(name)
return test
def predict_gender(self, name="None"):
pred_gen = self.ob.predict(self.get_name_ara(name))
if pred_gen == 0:
return 'male'
else: return 'female'
|
Kowsher/Bangla-NLP
|
Bangla Linga/bangla_linga/gender_prediction.py
|
gender_prediction.py
|
py
| 846 |
python
|
en
|
code
| 11 |
github-code
|
6
|
26336217910
|
import streamlit as st
import extra_streamlit_components as stx
from datetime import datetime, timedelta
import Scripts.constants as constants
@st.experimental_singleton(suppress_st_warning=True)
def get_manager():
return stx.CookieManager()
# def get_user_cookies():
# COOKIES = constants.COOKIES.get(constants.COOKIE_ID, None)
# # print("COOKIES", COOKIES)
# if COOKIES != None:
# COOKIES = [x.strip() for x in COOKIES.split(";")]
# constants.CURR_USER = COOKIES[0]
# constants.CURR_USER_IS_DOC = eval(COOKIES[1])
# def set_user_cookies(VALUE):
# # Set final date of expiry
# # set the cookie
# VALUE = ''.join([VALUE[0], ";", str(VALUE[1])])
# constants.COOKIES[constants.COOKIE_ID] = VALUE
# constants.COOKIES.save()
def get_user_cookies():
COOKIES = constants.COOKIE_MANAGER.get_all()
COOKIE = COOKIES.get(constants.COOKIE_ID, None)
if COOKIE != None:
COOKIE = [x.strip() for x in COOKIE.split(";")]
constants.CURR_USER = COOKIE[0]
constants.CURR_USER_IS_DOC = eval(COOKIE[1])
def set_user_cookies(VALUE):
constants.COOKIE_MANAGER = get_manager()
# Set final date of expiry
EXPIRES_AT = datetime.now() + timedelta(days=constants.EXPIRES_IN_DAYS)
# set the cookie
constants.COOKIE_MANAGER.set(
cookie = constants.COOKIE_ID,
val = ''.join([VALUE[0], ";", str(VALUE[1])]),
expires_at = EXPIRES_AT
)
constants.CURR_USER = VALUE[0]
constants.CURR_USER_IS_DOC = VALUE[1]
|
PeaPals/docnets
|
Scripts/cookie_manager.py
|
cookie_manager.py
|
py
| 1,550 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37407227644
|
from matplotlib import pyplot as plt
from findiff import FinDiff
import pandas as pd
import numpy as np
from tqdm import tqdm
id_col = 'ID'
date_col = 'DATE'
px_close = 'px_last'
px_high = 'px_high'
px_low = 'px_low'
px_open = 'px_open'
def find_derivative(series): #1 day interval
'''
Compute the first and second derivatives (1-step interval) of a given series.
Parameters
----------
series: np.array
series of values to find derivatives
Returns
-------
mom: np.array
first derivative
momacc: np.array
second derivative
Examples
--------
>>> series
array([6.85, 7.06, 7.31, 8. , 7.72, 7.27, 6.57, 7.66, 8.27, 8.31])
>>> mom, momacc = find_derivative(series)
>>> mom
array([ 0.19 , 0.23 , 0.47 , 0.205, -0.365, -0.575, 0.195, 0.85 , 0.325, -0.245])
>>> momacc
array([-0.36, 0.04, 0.44, -0.97, -0.17, -0.25, 1.79, -0.48, -0.57, -0.66])
'''
d_dx = FinDiff(0, 1, 1)
d2_dx2 = FinDiff(0, 1, 2)
clarr = np.asarray(series)
mom = d_dx(clarr)
momacc = d2_dx2(clarr)
return mom, momacc
def find_local_min_max(series, mom, momacc):
'''
Find local minimum and maximum points from a series
Parameters
----------
series: np.array
series of values to find derivatives
mom: np.array
first derivative
momacc: np.array
sescond derivative
Returns
-------
local_mins: dict
dictionary of index and value of local minimum of the series
local_max: dict
dictionary of index and value of local maximum of the series
Examples
--------
>>> series
array([6.85, 7.06, 7.31, 8. , 7.72, 7.27, 6.57, 7.66, 8.27, 8.31])
>>> local_mins, local_maxs = find_local_min_max(series, mom, momacc)
>>> local_mins
{6: 6.57}
>>> local_maxs
{3: 8.0, 9: 8.31}
'''
local_mins = []
local_maxs = []
for i in range(len(mom)-1):
series_dict = {i: series[i], i+1: series[i+1]}
if mom[i] <0 and mom[i+1]> 0:
local_mins.append(min(series_dict, key=series_dict.get))
elif mom[i] > 0 and mom[i+1]<0:
local_maxs.append(max(series_dict, key=series_dict.get))
elif mom[i] == 0 and momacc[i] >0:
local_mins.append(i)
elif mom[i] == 0 and momacc[i] <0:
local_maxs.append(i)
local_mins = {i : series[i] for i in local_mins}
local_maxs = {j : series[j] for j in local_maxs}
return local_mins, local_maxs
def get_state_local_min_max(dff, col = 'px_high', ma1 = 5, ma2 = 22):
'''
Main function to get trendline. NOTE: shifted one day late to avoid look-ahead bias
Step 1:
Label period as up and down based on the spread between short ma and long ma
i) short ma > long ma: up trend
ii) long ma > short ma: down trend
Label state when there is a change in state up - down / down - up
state 1, 2, 3, ...
Aggregate max or min of state.
Step 2:
Find local min and max points of the col input
Step 3:
Filter rows where local_max == max_in_state or local_min == min_in_state
Transform the rows into wide form, calculate the m, c that connects the two points
Parameters
----------
dff: DataFrame
stock df with DATE and ohlc prices, re-index to start from 0 is necessary
col: str
price high or price low. px_high to get resistance line (down trend), px_low to get support line (up trend)
ma1: int
short moving average period (in days)
ma2: int
long moving average period (in days)
Returns
-------
dff2: DataFrame
dataframe with ma_1st, ma_2nd, state and local_min/max
line_df: DataFrame
dataframe of the y equation, start and end period date of the support/resist line
'''
# dff['ma_1st'] = dff[col].rolling(ma1).mean()
# dff['ma_2nd'] = dff[col].rolling(ma2).mean()
dff['ma_1st'] = dff[col].ewm(span=ma1, min_periods = ma1, adjust=False).mean()
dff['ma_2nd'] = dff[col].ewm(span=ma2, min_periods = ma2, adjust=False).mean()
dff['spread'] = (dff['ma_1st'] - dff['ma_2nd']).shift()
dff.dropna(subset=['spread'], inplace=True)
dff.reset_index(drop=True, inplace=True)
dff['sign'] = dff['spread'].map(lambda x: 'up' if x>0 else 'down')
dff['state'] = (dff['sign']!=dff['sign'].shift()).astype(int).cumsum()
mom, momacc = find_derivative(dff[col].values)
local_mins, local_maxs = find_local_min_max(dff[col].values, mom, momacc)
return dff, local_mins, local_maxs
def refine_end_filter(end_filter_df, local_):
end_of_state=end_filter_df.groupby('state')[date_col].rank(ascending=False) ==1
end_filter_df.loc[end_of_state, local_] = None
end_filter_df[local_] = end_filter_df.groupby('state')[local_].ffill()
return end_filter_df.dropna()
def get_line(df, local_='local_maxs', start_='up', agg = 'max', m_increase = 1):
'''
local_ = 'local_maxs'
start_ = 'up'
agg = 'max'
m_increase = 1
'''
start_rule = df['sign'] == start_
start_filter = df[start_rule].copy()
start_filter = start_filter[start_filter[local_] == start_filter.groupby('state')[local_].transform(agg)]\
.reset_index()[[id_col,'index', date_col,'state',local_]]
start_filter = start_filter.assign(state=start_filter.state+1)
next_start_filter = start_filter.assign(next_start_dt=start_filter[date_col].shift(-1)).fillna(df[date_col].max())
cols = list(start_filter.columns)
start_filter.columns = ['start_'+i if i not in [id_col,'state'] else i for i in start_filter.columns]
end_rule = df['sign'] != start_
end_filter = df[end_rule].dropna(subset=[local_]).reset_index()
# end_filter = refine_end_filter(end_filter, local_)
start_end_filter = start_filter.merge(end_filter[cols], on=[id_col,'state'], how='left').dropna()\
.merge(next_start_filter[[id_col, 'state','next_start_dt']], on=[id_col, 'state'], how='left') #######
start_end_filter['m'] = (start_end_filter[local_] - start_end_filter['start_' + local_]) / \
(start_end_filter['index'] - start_end_filter['start_index'])
start_end_filter['c'] = start_end_filter[local_] - start_end_filter['m']*start_end_filter['index']
gradient_sign = (m_increase*start_end_filter['m'] < m_increase*start_end_filter.groupby('state')['m'].shift()).map(lambda x: 1 if not x else None)
start_end_filter['m'] = (start_end_filter['m'] * gradient_sign).ffill()
start_end_filter['c'] = (start_end_filter['c'] * gradient_sign).ffill()
start_end_filter['line_group'] = gradient_sign.cumsum().ffill()
start_end_filter = start_end_filter[m_increase*start_end_filter['m']<0].drop_duplicates(subset=[date_col], keep='last')
dff2 = df.merge(start_end_filter.drop('index',1),
on=[id_col,date_col,'state', local_], how='left').ffill()
fillins = (dff2[date_col]>dff2['next_start_dt']).map(lambda x: None if x else 1)
dff2['y'] = (dff2['m']*dff2.index + dff2['c'])*fillins
dff2['y2'] = dff2['m']*dff2.index + dff2['c']
dff2['cross'] = m_increase*dff2[px_close] > m_increase*dff2['y']
first_cross = dff2[dff2['cross']==True].reset_index().groupby('line_group')[date_col].first().reset_index().assign(first_cross=1)
dff2 = dff2.merge(first_cross, on=['line_group',date_col], how='left').drop('cross',1)
dff2['first_cross'] = dff2['first_cross'].fillna(0)
start_end_filter = start_end_filter.merge(first_cross.rename(columns={date_col:'cross_'+date_col}), on='line_group', how='left')
return dff2, start_end_filter
def _trendline_doc_string(original):
def wrapper(target):
target.__doc__ = original.__doc__
return target
return wrapper
@_trendline_doc_string(get_state_local_min_max)
def get_down_trendline(dff, col = 'px_high', ma1 = 5, ma2 = 22):
dff = dff.reset_index(drop=True)
dff, _, local_maxs = get_state_local_min_max(dff, col, ma1, ma2)
dff['local_maxs'] = dff.index.map(local_maxs)
dff2, line_df = get_line(dff, local_='local_maxs', start_='up', agg = 'max', m_increase = 1)
return dff2, line_df
@_trendline_doc_string(get_state_local_min_max)
def get_up_trendline(dff, col='px_low', ma1=5, ma2=22):
dff = dff.reset_index(drop=True)
dff, local_mins, _ = get_state_local_min_max(dff, col, ma1, ma2)
dff['local_mins'] = dff.index.map(local_mins)
dff2, line_df = get_line(dff, local_='local_mins', start_='down', agg = 'min', m_increase = -1)
return dff2, line_df
def cal_ret(price_df, col='px_last', ret_days=None, shift_days=0):
'''
Calculate the future return, i.e. forward return from today.
Will return NaN if the days in future not present yet
Parameters
----------
price_df: DataFrame
dataframe with stock prices
Returns
-------
price_df: DataFrame
dataframe with forward returns calculated
'''
if ret_days == None:
ret_days = [10, 30]
for d in ret_days:
price_df['%dD_return'%d] = price_df[col].pct_change(d).shift(-1*(d+shift_days))*100
return price_df #[['DATE',col]+]
def add_features(df):
'''
Add feature to df (on the cross date)
Parameters
----------
df: DataFrame
df with required fields to generate features
Returns
-------
df: DataFrame
df with added features
'''
# cols = df.columns.tolist()
df['price_change_5D'] = df['px_last'].pct_change(5)*100
df['price_change_f0'] = df['px_last'].pct_change()*100
df['price_change_f1'] = df['px_last'].pct_change().shift(-1)*100
df['open-close_f0'] = (df['px_last']/df['px_open']-1)*100
df['open-close_f1'] = (df['px_last']/df['px_open']-1).shift(-1)*100
df['accel'] = df['px_high'].diff().diff()
df['avat'] = df['volume']/df['volume'].rolling(20).mean()
# feature_cols = list(set(df.columns).difference(set(cols)))
return df
def full_ma_line_run(df, col='px_high', ma1=5, ma2=22):
'''
Generate full trendline and crosses
get_down_trendline
Parameters
----------
df: DataFrame
full stock df with prices
col: str
px_high for downtrend, px_low for uptrend
ma1: int
short moving average (days)
ma2: int
long moving average (days)
Returns
-------
trend_line_df: DataFrame
line_df generated from trendline_func
stock_ma_line_df: DataFrame
full_stock_df with merged line_df and its repective crosses after the last_DATE
Examples
--------
>>> stock_ma_line_df, trend_line_df = full_ma_line_run(df, 'px_high', ma1=5, ma2=22, feature_func=add_features)
'''
if 'high' in col:
trendline_func = get_down_trendline
else:
trendline_func = get_up_trendline
stock_ma_line_df = pd.DataFrame()
trend_line_df = pd.DataFrame()
for stock in tqdm((sorted(df[id_col].unique()))):
dff = df[df[id_col]==stock].sort_values(date_col).copy()
try:
dff2, line_df = trendline_func(dff)
stock_ma_line_df = stock_ma_line_df.append(dff2)
trend_line_df = trend_line_df.append(line_df)
except Exception as e:
print(stock, e)
return stock_ma_line_df.reset_index(drop=True), trend_line_df
################################################ Channel Breakout ########################################################
from sklearn.linear_model import LinearRegression
def channel_lr(stock_df, start_date, end_date):
train_df = stock_df[(stock_df[date_col]>=start_date)&(stock_df[date_col]<=end_date)].copy()
y = train_df[px_close]
X = train_df.index.values
lr = LinearRegression()
lr.fit(X.reshape(-1,1), y)
a = lr.coef_[0]
b = lr.intercept_
y_pred = a*X + b
BU = max(train_df[px_high] - y_pred)
BL = min(train_df[px_low] - y_pred)
return dict(a=a, b=b, BU=BU, BL=BL)
def channel_project(stock_df, line_df, m_increase):
stock_df = stock_df.reset_index(drop=True)
line_df = line_df.drop_duplicates(subset=['line_group'])
channel_lr_df = []
for lrow in line_df.to_dict(orient='records'):
channel_lr_params = channel_lr(stock_df, lrow['start_' + date_col], lrow[date_col])
channel_lr_df.append({**lrow, **channel_lr_params})
channel_lr_df = pd.DataFrame(channel_lr_df)
stock_df = stock_df.merge(channel_lr_df[[id_col,date_col, 'a','b','BU','BL']], how='left').ffill()
fillins = (stock_df[date_col]>stock_df['next_start_dt']).map(lambda x: None if x else 1)
stock_df['project'] = (stock_df['a']*stock_df.index + stock_df['b'] + stock_df['a'] + m_increase*stock_df['BU'])*fillins
stock_df['cross'] = m_increase*stock_df[px_close] > m_increase*stock_df['project']
first_cross = stock_df[stock_df['cross']==True].reset_index().groupby('line_group')[date_col]\
.first().reset_index().assign(first_channel_cross=1)
stock_df = stock_df.merge(first_cross, on=['line_group',date_col], how='left').drop('cross',1)
stock_df['first_cross'] = stock_df['first_cross'].fillna(0)
channel_lr_df = channel_lr_df.merge(first_cross.rename(columns={date_col:'channel_cross_'+date_col}), on='line_group', how='left')
return stock_df, channel_lr_df
def full_channel_run(stock_ma_line_df, trend_line_df, col='px_high'):
m_increase = 1 if 'high' in col else -1
stock_channel_df = pd.DataFrame()
full_channel_df = pd.DataFrame()
for stock in tqdm((sorted(stock_ma_line_df[id_col].unique()))):
stock_df = stock_ma_line_df[stock_ma_line_df[id_col]==stock]
line_df = trend_line_df[trend_line_df[id_col]==stock]
try:
dff2, channel_df = channel_project(stock_df, line_df, m_increase)
stock_channel_df = stock_channel_df.append(dff2)
full_channel_df = full_channel_df.append(channel_df)
except Exception as e:
print(stock, e)
cross_dates = ['cross_%s'%date_col,'channel_cross_%s'%date_col]
full_channel_df['later_cross_date'] = full_channel_df[cross_dates].max(axis=1)
full_channel_df['both'] = full_channel_df[cross_dates].isnull().sum(axis=1).map(lambda x: 1 if x==0 else 0)
return stock_channel_df, full_channel_df
################################################ Visualization ########################################################
import plotly.graph_objects as go
from ipywidgets import interact, interactive, Dropdown, HTML, VBox, HBox
def plt_trendline(df, line_df, stock, col='px_high'):
'''
Plot price with trendline
Parameters
----------
df: DataFrame
dataframe with dates and stock prices
line_df: DataFrame
dataframe which contains start end index and date of trendline
stock: str
stock name for plot title
col: str
px_high or px_low
'''
if 'high' in col:
local_ = 'local_maxs'
else:
local_ = 'local_mins'
plt.rcParams['figure.figsize'] = (20,8)
fig, ax = plt.subplots()
df = df.set_index(date_col)
df[col].plot(color='black')
df[['ma_1st','ma_2nd']].plot(alpha=0.5, ax=ax) if 'ma_1st' in df.columns else None
plt.scatter(df.query('first_cross==1').index, df.query('first_cross==1')['y'], marker='x', color='red', s=100)
for line_g in df['line_group'].dropna().unique():
df_plot = df[df['line_group']==line_g].dropna(subset=['y']).iloc[[0, -1]].copy()
df_plot['y'].plot(color='red', linewidth=1)
for row in line_df.to_dict(orient='records'):
plt.plot([row['start_' + date_col], row[date_col]],
[row['start_' + local_] , row['m']*row['index'] + row['c']], color='purple', linewidth=1)
plt.title(stock)
return plt
def interactive_plt_trendline(df, ma1=5, ma2=22, direction='down'):
if direction == 'down':
trendline_func = get_down_trendline
col = 'px_high'
else:
trendline_func = get_up_trendline
col = 'px_low'
stock_selec = Dropdown(options = sorted(df.ID.unique()))
@interact()
def plot(stock = stock_selec):
dff = df[df[id_col]==stock].reset_index(drop=True).copy()
dff2, line_df = trendline_func(dff, ma1=ma1, ma2=ma2)
plt_trendline(dff2, line_df, stock, col)
def plt_channel(channel_df, channel_line_df, stock):
fig, ax = plt.subplots()
channel_df = channel_df.set_index(date_col)
channel_df[px_close].plot(color='black')
channel_df[['ma_1st','ma_2nd']].plot(alpha=0.5, ax=ax) if 'ma_1st' in channel_df.columns else None
for crow in channel_line_df.to_dict(orient='records'):
line_g = channel_df[channel_df['line_group']==crow['line_group']]
dff2_plot = line_g.dropna(subset=['project']).iloc[[0,-1]].copy()
dff2_plot['project'].plot(color='red', linewidth=1)
cross = line_g.query('first_channel_cross==1')
if cross.shape[0] :
plt.scatter(cross.index, cross[px_close], marker='x', color='red', s=100)
date_X = [crow['start_'+date_col], crow[date_col]]
X = np.array([crow['start_index'], crow['index']])
plt.plot(date_X, crow['a']*X+crow['b'], color='brown')
plt.plot(date_X, crow['a']*X+crow['b']+crow['BU'], color='cyan')
plt.plot(date_X, crow['a']*X+crow['b']+crow['BL'], color='cyan')
plt.title(stock)
return plt
def interactive_plt_channel(df, ma1=5, ma2=22, direction='down'):
if direction == 'down':
trendline_func = get_down_trendline
col = px_high
m_increase = 1
else:
trendline_func = get_up_trendline
col = px_low
m_increase = -1
stock_selec = Dropdown(options = sorted(df.ID.unique()))
@interact()
def plot(stock = stock_selec):
dff = df[df[id_col]==stock].reset_index(drop=True).copy()
dff2, line_df = trendline_func(dff, ma1=ma1, ma2=ma2)
dff3, channel_df = channel_project(dff2, line_df, m_increase)
plt_channel(dff3, channel_df, stock)
def interactive_plt_channel2(stock_channel_df, channel_line_df):
def _plot_cross(cross):
stock = stock_selec.value
stock_df = stock_channel_df[stock_channel_df[id_col]==stock].reset_index(drop=True).copy()
channel_df = channel_line_df[channel_line_df[id_col]==stock]
if cross == 'All':
plt_channel(stock_df, channel_df, stock)
else:
plt_channel(stock_df, channel_df.iloc[cross:cross+1], stock)
def update_cross_selec(stock):
cross_selec.options = ['All'] + list(range(channel_line_df[channel_line_df[id_col]==stock].shape[0]))
stock_selec = Dropdown(options = sorted(stock_channel_df[id_col].unique()))
init = channel_line_df[channel_line_df['ID']==stock_selec.value].shape[0]
cross_selec = Dropdown(options = range(init))
j = interactive(update_cross_selec, stock=stock_selec)
i = interactive(_plot_cross, cross=cross_selec)
k = VBox()
display(j)
display(i)
display(k)
import plotly.graph_objects as go
def plotly_trendline(df, line_df, stock, fig=None):
if not fig:
fig = go.Figure()
fig.add_trace(go.Candlestick(x=df[date_col],
open=df[px_open],
high=df[px_high],
low=df[px_low],
close=df[px_close], showlegend=False))
local_ = [i for i in line_df.columns if 'start_' in i and date_col not in i and 'index' not in i][0]
for row in line_df.to_dict(orient='records'):
line_g = df[df['line_group']==row['line_group']]
df_plot = line_g.dropna(subset=['y']).iloc[[0, -1]].copy()
fig.add_trace(go.Scatter(x=df_plot[date_col], y=df_plot['y'], mode='lines', showlegend=False,
hoverinfo='skip', line = dict(color = 'purple', width=1)))
cross = line_g.query('first_cross==1')
if cross.shape[0] :
fig.add_trace(go.Scatter(x=cross[date_col], y=cross[px_close], showlegend=False,
mode='markers', marker_symbol='x', marker_color='black'))
fig.add_trace(go.Scatter(x=[row['start_' + date_col], row[date_col]],
y=[row[local_] , row['m']*row['index'] + row['c']],
mode='lines', line_color='black', showlegend=False))
fig.update_layout(title=stock, template='ygridoff', xaxis_rangeslider_visible=False)
return fig
def plotly_channel(channel_df, channel_line_df, stock, fig=None):
if not fig:
fig = go.Figure()
# fig.add_trace(go.Scatter(x=channel_df[date_col], y=channel_df[px_close], line_color='black', showlegend=False))
fig.add_trace(go.Candlestick(x=channel_df[date_col],
open=channel_df[px_open],
high=channel_df[px_high],
low=channel_df[px_low],
close=channel_df[px_close], showlegend=False))
for line_g in channel_line_df['line_group'].dropna().unique():
dff2_plot = channel_df[channel_df['line_group']==line_g].iloc[[0,-1]].copy()
for crow in channel_line_df.to_dict(orient='records'):
date_X = [crow['start_'+date_col], crow[date_col]]
line_g = channel_df[channel_df['line_group']==crow['line_group']]
dff2_plot = line_g.dropna(subset=['project']).iloc[[0,-1]].copy()
fig.add_vline(dff2_plot[date_col].iloc[0] ,line_dash="dot", line=dict(color='black'))
fig.add_trace(go.Scatter(x=dff2_plot[date_col], y=dff2_plot['project'], mode='lines', showlegend=False,
hoverinfo='skip', line = dict(color = 'black', width=1)))
cross = line_g.query('first_channel_cross==1')
if cross.shape[0] :
fig.add_trace(go.Scatter(x=cross[date_col], y=cross[px_close], showlegend=False,
mode='markers', marker_symbol='x', marker_color='black'))
X = np.array([crow['start_index'], crow['index']])
fig.add_trace(go.Scatter(x=date_X, y=crow['a']*X+crow['b'], mode='lines', line_color='black', showlegend=False))
fig.add_trace(go.Scatter(x=date_X, y=crow['a']*X+crow['b']+crow['BU'], mode='lines',
hoverinfo='skip', showlegend=False,line = dict(color = 'blue', width=1)))
fig.add_trace(go.Scatter(x=date_X, y=crow['a']*X+crow['b']+crow['BL'], mode='lines',
hoverinfo='skip', showlegend=False,line = dict(color = 'blue', width=1)))
fig.update_layout(title=stock, template='ygridoff', xaxis_rangeslider_visible=False)
return fig
def interactive_plt_channel3(stock_channel_df, channel_line_df):
def _plot_cross(cross):
stock = stock_selec.value
stock_df = stock_channel_df[stock_channel_df[id_col]==stock].reset_index(drop=True).copy()
channel_df = channel_line_df[channel_line_df[id_col]==stock]
if cross == 'All':
fig = plotly_channel(stock_df, channel_df, stock)
fig2 = plotly_trendline(stock_df, channel_df, stock)
else:
fig = plotly_channel(stock_df, channel_df.iloc[cross:cross+1], stock)
fig2 = plotly_trendline(stock_df, channel_df.iloc[cross:cross+1], stock)
k.children= [go.FigureWidget(fig2), go.FigureWidget(fig)]
def update_cross_selec(stock):
cross_selec.options = ['All'] + list(range(channel_line_df[channel_line_df[id_col]==stock].shape[0]))
_plot_cross('All')
stock_selec = Dropdown(options = sorted(stock_channel_df[id_col].unique()))
init = channel_line_df[channel_line_df['ID']==stock_selec.value].shape[0]
cross_selec = Dropdown(options = range(init))
j = interactive(update_cross_selec, stock=stock_selec)
i = interactive(_plot_cross, cross=cross_selec)
k = VBox()
display(j)
display(i)
display(k)
|
etq-quant/etqbankloan
|
Lib/etiqalib/ta/turning_points.py
|
turning_points.py
|
py
| 24,726 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11896445749
|
from django.http import HttpRequest
from google_optimize.context_processors import google_experiment
def test_experiment_processor():
request = HttpRequest()
request.COOKIES["_gaexp"] = "GAX1.2.utSuKi3PRbmxeG08en8VNw.18147.1"
experiment = google_experiment(request)
assert experiment == dict(google_optimize={"redesign": "new_design"})
def test_context_processor_template(client):
client.cookies["_gaexp"] = "GAX1.2.utSuKi3PRbmxeG08en8VNw.18147.1"
response = client.get("/test")
assert response.context["google_optimize"] == {"redesign": "new_design"}
|
danihodovic/django-google-optimize
|
tests/test_context_processors.py
|
test_context_processors.py
|
py
| 585 |
python
|
en
|
code
| null |
github-code
|
6
|
44617869434
|
import numpy as np
import pandas as pd
class DataPreparationAirQuality(object):
def __init__(self, conf, utils, raw_repo, processed_repo) -> None:
self.target_col = "CO(GT)"
self.study_label = "Air Quality"
self.utils = utils
self.processed_data = pd.DataFrame()
self.is_seasonal = False
self.data_folder = conf.params["air_quality_data_folder"]
def apply(self) -> None:
raw_data = pd.read_csv(f"{self.data_folder}AirQualityUCI.csv", sep=';', decimal=',')
self.processed_data = self.prepare_data(raw_data)
print(
f"The sample time series is seasonal before the deseasonality "
f"{self.utils.is_seasonal(self.processed_data, self.target_col)}"
)
self.processed_data[self.target_col + "_deseasonalize"] = self.utils.deseasonalize_data(
data=self.processed_data, column_name=self.target_col, make_plot=False)
print(
f"The sample time series is seasonal after the deseasonality "
f"{self.utils.is_seasonal(self.processed_data, self.target_col)}"
)
self.utils.seasonal_decomposition(
data=self.processed_data, column_name=f"{self.target_col}_deseasonalize", make_plot=False)
@staticmethod
def prepare_data(raw_data) -> pd.DataFrame:
data = raw_data.copy(deep=True)
data.drop(columns=["Unnamed: 15", "Unnamed: 16"], inplace=True)
data.dropna(axis=0, inplace=True) # Drop rows with missing values
print(data.shape)
target_col = "CO(GT)"
data.dropna(subset=[target_col], inplace=True)
pd.to_datetime(data["Date"] + " " + data["Time"], format="%d/%m/%Y %H.%M.%S")
data["datetime"] = pd.to_datetime(data["Date"] + " " + data["Time"], format="%d/%m/%Y %H.%M.%S")
data.drop(columns=["Date", "Time"], inplace=True)
data.set_index("datetime", inplace=True)
return data
|
luis00rod/capgemini_tecnical_test
|
src/main/interactors/forecast/data_preparation_air_quality.py
|
data_preparation_air_quality.py
|
py
| 1,950 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32676437510
|
qtndPrimos = somaMultiplosTres = 0
qtndParesMaioresVinte = somaParesMaioresVinte = 0
for i in range(10):
div = 0
number = int(input("Número: "))
if number % 3 == 0:
somaMultiplosTres += number
if (number % 2 == 0 ) and (number > 20):
somaParesMaioresVinte += number
qtndParesMaioresVinte += 1
for p in range(1, number+1):
if number % p == 0:
div += 1
if div == 2:
qtndPrimos += 1
if qtndParesMaioresVinte > 0:
mediaParesMaiorVinte = somaParesMaioresVinte / qtndParesMaioresVinte
print("Média pares maiores que 20: {}".format(mediaParesMaiorVinte))
print("Quantidade de números primos: {}".format(qtndPrimos))
print("Soma dos números múltiplos de três: {}".format(somaMultiplosTres))
|
JLramosSoares/linguagem-de-programacao-exercicios
|
Lista_2021_1/exerc_4.py
|
exerc_4.py
|
py
| 737 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
11417571951
|
# -*- coding: utf-8 -*-
"""
(C) 2014-2019 Roman Sirokov and contributors
Licensed under BSD license
http://github.com/r0x0r/pywebview/
"""
import os
import sys
import logging
import json
import shutil
import tempfile
import webbrowser
from threading import Event, Semaphore
from ctypes import windll
from platform import architecture
from webview import WebViewException, _debug, _user_agent
from webview.serving import resolve_url
from webview.util import parse_api_js, interop_dll_path, parse_file_type, inject_base_uri, default_html, js_bridge_call
from webview.js import alert
from webview.js.css import disable_text_select
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Collections')
clr.AddReference('System.Threading')
import System.Windows.Forms as WinForms
from System import IntPtr, Int32, String, Action, Func, Type, Environment, Uri
from System.Threading.Tasks import Task, TaskScheduler, TaskContinuationOptions
from System.Drawing import Size, Point, Icon, Color, ColorTranslator, SizeF
archpath = 'x64' if architecture()[0] == '64bit' else 'x86'
os.environ['Path'] = interop_dll_path(archpath) + ';' + os.environ['Path']
clr.AddReference(interop_dll_path('Microsoft.Web.WebView2.Core.dll'))
clr.AddReference(interop_dll_path('Microsoft.Web.WebView2.WinForms.dll'))
from Microsoft.Web.WebView2.WinForms import WebView2, CoreWebView2CreationProperties
from Microsoft.Web.WebView2.Core import CoreWebView2Environment
logger = logging.getLogger('pywebview')
class EdgeChrome:
def __init__(self, form, window):
self.pywebview_window = window
self.web_view = WebView2()
props = CoreWebView2CreationProperties()
#props.UserDataFolder = os.path.join(os.getcwd(), 'profile')
props.UserDataFolder = os.path.join(os.environ['LOCALAPPDATA'], 'pywebview')
self.web_view.CreationProperties = props
form.Controls.Add(self.web_view)
self.js_results = {}
self.js_result_semaphore = Semaphore(0)
self.web_view.Dock = WinForms.DockStyle.Fill
#settings under on_webview_ready
self.web_view.CoreWebView2Ready += self.on_webview_ready
self.web_view.NavigationStarting += self.on_navigation_start
self.web_view.NavigationCompleted += self.on_navigation_completed
self.web_view.WebMessageReceived += self.on_script_notify
self.url = None
self.ishtml = False
self.html = None
if window.real_url:
self.load_url(window.real_url)
elif window.html:
self.html = window.html
self.load_html(window.html, '')
else:
self.html = default_html
self.load_html(default_html, '')
def evaluate_js(self, script, id, callback=None):
def _callback(result):
if callback is None:
self.js_results[id] = None if result is None or result == '' else json.loads(result)
self.js_result_semaphore.release()
else:
# future js callback option to handle async js method
callback(result)
self.js_results[id] = None
self.js_result_semaphore.release()
self.syncContextTaskScheduler = TaskScheduler.FromCurrentSynchronizationContext()
try:
result = self.web_view.ExecuteScriptAsync(script).ContinueWith(
Action[Task[String]](
lambda task: _callback(json.loads(task.Result))
),
self.syncContextTaskScheduler)
except Exception as e:
logger.exception('Error occurred in script')
self.js_results[id] = None
self.js_result_semaphore.release()
def get_current_url(self):
return self.url
def load_html(self, content, base_uri):
self.html = content
self.ishtml = True
self.web_view.EnsureCoreWebView2Async(None)
def load_url(self, url):
self.ishtml = False
self.web_view.Source = Uri(url)
def on_script_notify(self, _, args):
try:
func_name, func_param, value_id = json.loads(args.get_WebMessageAsJson())
if func_name == 'alert':
WinForms.MessageBox.Show(func_param)
elif func_name == 'console':
print(func_param)
else:
js_bridge_call(self.pywebview_window, func_name, func_param, value_id)
except Exception as e:
logger.exception('Exception occured during on_script_notify')
def on_new_window_request(self, _, args):
args.set_Handled(True)
#webbrowser.open(str(args.get_Uri()))
def on_webview_ready(self, sender, args):
sender.CoreWebView2.NewWindowRequested += self.on_new_window_request
settings = sender.CoreWebView2.Settings
settings.AreDefaultContextMenusEnabled = _debug
settings.AreDefaultScriptDialogsEnabled = True
settings.AreDevToolsEnabled = _debug
settings.IsBuiltInErrorPageEnabled = True
settings.IsScriptEnabled = True
settings.IsWebMessageEnabled = True
settings.IsStatusBarEnabled = _debug
settings.IsZoomControlEnabled = True
if self.html: sender.CoreWebView2.NavigateToString(self.html)
def on_navigation_start(self, sender, args):
pass
def on_navigation_completed(self, sender, args):
url = str(sender.Source)
self.url = None if self.ishtml else url
self.web_view.ExecuteScriptAsync('window.alert = (msg) => window.chrome.webview.postMessage(["alert", msg+"", ""])')
if _debug:
self.web_view.ExecuteScriptAsync('window.console = { log: (msg) => window.chrome.webview.postMessage(["console", msg+"", ""])}')
self.web_view.ExecuteScriptAsync(parse_api_js(self.pywebview_window, 'chromium'))
if not self.pywebview_window.text_select:
self.web_view.ExecuteScriptAsync(disable_text_select)
self.pywebview_window.loaded.set()
|
hanzzhu/chadle
|
venv/Lib/site-packages/webview/platforms/edgechromium.py
|
edgechromium.py
|
py
| 6,044 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7986369348
|
import basc_py4chan as chanapi
import requests
import argparse
import sys
import os
class FourchanDownloader:
def __init__(self):
self.boards_list = chanapi.get_all_boards()
def run(self):
self.verify_boards()
if len(self.board) == 0:
print("No existing boards selected, you fucking idiot!")
sys.exit(2)
elif self.board[0] == '*':
self.boards = chanapi.get_all_boards()
else:
self.boards = chanapi.get_boards(self.board)
if self.thread_id != None:
self.download_threads(self.boards[0])
else:
self.download_boards()
def board_exists(self, board_name):
for board in self.boards_list:
if board.name == board_name:
return True
return False
def thread_exists(self, thread_id):
return self.board[0].thread_exists(thread_id)
def verify_boards(self):
if self.board[0] == '*':
return
for f in self.board:
if not self.board_exists(f):
self.board.remove(f)
def download_threads(self, board):
for tid in self.thread_id:
print(" >Thread #{0} at /{1}/:".format(tid, board.name))
if (board.thread_exists(tid)):
t = board.get_thread(tid)
t.expand()
thread_files = t.files()
thread_files_sum = sum(1 for _ in thread_files)
fnum = 1
print(" =>Closed/sticky/archived?: {0}/{1}/{2}\n =>Bumplimit/imagelimit hit: {3}/{4}\n =>Posts: {5}\n =>Files: {6}\n =>Topic: {7}".format(
t.closed, t.sticky, t.archived, t.bumplimit, t.imagelimit, len(t.all_posts), thread_files_sum, t.topic.text_comment[:50].encode('utf-8')
))
for thread_file in t.files():
print("{0}/{1}".format(fnum, thread_files_sum))
self.download_image(thread_file, "{0}/{1}/{2}".format(self.directory, board.name, tid))
fnum += 1
else:
print(" =>Thread is 404 (don't exists or got deleted)")
print("")
def download_boards(self):
for b in self.boards:
self.thread_id = b.get_all_thread_ids()
self.download_threads(b)
def download_image(self, url, path):
file_name = url.split('/')[-1]
imgpath = "{0}/{1}".format(path, file_name)
if not os.path.exists(path):
os.makedirs(path)
print("Downloading image {0}".format(file_name))
response = requests.get(url, stream=True)
size = int(response.headers.get('content-length'))
if os.path.isfile(imgpath) and os.path.getsize(imgpath) == size:
print("File is already downloaded!")
return
f = open(imgpath, "wb")
if (size is None):
f.write(response.content)
else:
dl = 0
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(50 * dl / size)
sys.stdout.write("\r[{0}{1}]".format('=' * done, ' ' * (50-done)))
sys.stdout.flush()
print("")
def main():
parser = argparse.ArgumentParser(description="Download pics from your favourite fucking boards (or threads). Enter board names, or one board name and threads ID's.", epilog="op is a faggot")
parser.add_argument('-d', '--directory', default="4chan", help="directory or path in which pics will be saved (default: 4chan)")
parser.add_argument('-b', '--board', help="board(s) short name(s) from where pictures will be downloaded (* means all boards, enter multiple with spaces)", nargs='+')
parser.add_argument('-t', '--thread_id', help="thread ID's from where pics will be downloaded (you can enter multiple with spaces)", nargs='+')
dl = FourchanDownloader()
args = parser.parse_args(namespace=dl)
if dl.board == None:
print("You must enter at least one board, faggot!")
sys.exit(1)
dl.run()
if __name__ == "__main__":
main()
|
SteelPh0enix/4chanDownloader
|
4chan.py
|
4chan.py
|
py
| 4,179 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24990513085
|
# -*- coding: utf-8 -*-
import numpy as np
import time
import os
if __name__ == '__main__':
save_dir = '/home/shengby/Datasets/csv_generator/h750_w270_num100000/'
file_nums = 100000
for i in range(file_nums):
file_path = save_dir + str(i) + '.csv'
data = np.random.rand(750, 270)
np.savetxt(file_path, data, delimiter=',')
print(time.process_time())
|
Finallap/WiFi_Sensing_Python
|
data_loader/concurrent_study/csv_generator.py
|
csv_generator.py
|
py
| 391 |
python
|
en
|
code
| 5 |
github-code
|
6
|
10068857131
|
import cv2
import numpy as np
from time import sleep
import os
# global variables
bg = None
def run_avg(image, aWeight):
global bg
# initialize the background
if bg is None:
bg = image.copy().astype("float")
return
# compute weighted average, accumulate it and update the background
cv2.accumulateWeighted(image, bg, aWeight)
def segment(image, threshold=25):
global bg
# find the absolute difference between background and current frame
diff = cv2.absdiff(bg.astype("uint8"), image)
# threshold the diff image so that we get the foreground
thresholded = cv2.threshold(diff,
threshold,
255,
cv2.THRESH_BINARY)[1]
# get the contours in the thresholded image
(_, cnts, _) = cv2.findContours(thresholded.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# return None, if no contours detected
if len(cnts) == 0:
return
else:
# based on contour area, get the maximum contour which is the hand
segmented = max(cnts, key=cv2.contourArea)
return (thresholded, segmented)
if __name__ == "__main__" :
index = 0
aWeight = 0.5
camera = cv2.VideoCapture(0)
top, right, bottom, left = 10, 470, 250, 750
num_frames = 0
r = ""
while True:
(grabbed, frame) = camera.read()
frame = cv2.flip(frame, 1)
cv2.putText(frame,"predictio is "+r, (20,100), cv2.FONT_HERSHEY_PLAIN , 1.5, 100)
clone = frame.copy()
(height, width) = frame.shape[:2]
roi = frame[top:bottom, right:left]
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
if num_frames < 30:
run_avg(gray, aWeight)
else:
hand = segment(gray)
if hand is not None:
index +=1
if index % 30 == 0 :
(thresholded, segmented) = hand
thresholded = cv2.resize(thresholded, (64, 64))
cv2.drawContours(clone, [segmented + (right, top)], -1, (0, 0, 255))
cv2.imshow("Thresholded", thresholded)
sleep(3)
path = "test"+str(index)+".jpg"
cv2.imwrite(path,thresholded)
r = os.popen("python predict.py "+path).read()[:-1]
print("prediction is ",r)
os.popen("rm -fr "+path).read()
print("images taken: {}".format(index))
cv2.rectangle(clone, (left, top), (right, bottom), (0,255,0), 2)
num_frames += 1
cv2.imshow("recording", clone)
keypress = cv2.waitKey(1) & 0xFF
if keypress == ord("q") :
break
cv2.destroyWindow("recording")
cv2.destroyWindow("Thresholded")
camera = None
|
RemonIbrahimNashed/HandGestureUseingCNN
|
live.py
|
live.py
|
py
| 2,732 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36733100195
|
import os
import sys
import logging
import MySQLdb
#import datetime
logger = logging.getLogger(__name__)
locz = []
locz_file = ''
# 'locz' table fields: chat_id, chat_title, user_id, user_name, date_time, latitude, longitude
def add_loc(mess):
locstr = 'chat.id:' + str(mess.chat.id) + ',chat.title:' + str(mess.chat.title) + ',user.id:' + str(mess.from_user.id) + ',user.username:' + str(mess.from_user.username) + ',message.date:' + str(mess.date) + ',location.latitude:' + str(mess.location.latitude) + ',location.longitude:' + str(mess.location.longitude)
locz.append(locstr)
# logger.info('locstr: ' + locstr)
#sql = """insert into locz values({0}, '{1}', {2}, '{3}', '{4}', {5}, {6}).format(mess.chat.id, mess.chat.title, mess.from_user.id, mess.from_user.username, datetime.datetime.strptime(mess.date, '%Y-%m-%d %H:%M:%S'), mess.location.latitude, mess.location.longitude)
sql = """insert into locz values({0}, '{1}', {2}, '{3}', '{4}', {5}, {6})""".format(mess.chat.id, mess.chat.title, mess.from_user.id, mess.from_user.username, mess.date, mess.location.latitude, mess.location.longitude)
try:
db = MySQLdb.connect(host="nikodim.mysql.pythonanywhere-services.com", user="nikodim", passwd="IkuRa700", db="nikodim$ikuradb", charset='utf8')
try:
db.query(sql)
db.commit()
except:
logger.error('Location record to DB failure. ' + str(sys.exc_info()[0]) + '. sql: ' + sql)
finally:
db.close()
except:
logger.error('DB connection error. ' + str(sys.exc_info()[0]))
save_loc()
logger.info('Location added. ' + locstr)
def select_locz(chat_id, user_id):
sql = """select latitude, longitude, date_time from locz where chat_id = {0} and user_id = {1} order by date_time""".format(chat_id, user_id)
try:
db = MySQLdb.connect(host="nikodim.mysql.pythonanywhere-services.com", user="nikodim", passwd="IkuRa700", db="nikodim$ikuradb", charset='utf8')
try:
db.query(sql)
r = db.store_result()
rows = r.fetch_row(maxrows=0)
except:
logger.error('Select locations from DB failure. ' + str(sys.exc_info()[0]) + '. sql: ' + sql)
finally:
db.close()
if not rows:
return 'нет локов'
else:
res = ''
for tup in rows:
res = res + '|' + str(tup[0]) + ',' + str(tup[1])
return res
except:
logger.error('DB connection error. ' + str(sys.exc_info()[0]))
return 'ошибка'
def init_locz():
global locz
global locz_file
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
locz_file = os.path.join(THIS_FOLDER, '-locs', 'locz.tdb')
lf = open(locz_file, 'r', encoding='utf-8')
locz = lf.readlines()
lf.close()
locz = [l.replace('\n', '') for l in locz]
def save_loc():
global locz
global locz_file
locz = list(set(locz))
loczz = [l + '\n' for l in locz]
lf = open(locz_file, 'w')
lf.writelines(loczz)
lf.close()
# print('locz.db saved')
init_locz()
|
nikodim500/pyIkuraTeleBot
|
locationstore.py
|
locationstore.py
|
py
| 3,142 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72646625788
|
# some functions from discovery/scripts/cdisco/cdisco.py
import numpy as np
import torch
import torchvision
import PIL.Image as Image
from my_datasets import transform
from my_datasets import transform_normalize
def get_model_state(model, paths, y, dim_c, dim_w, dim_h, SAVEFOLD=''):
batch_size = 32
tot_acc = 0
i=0
batch_start=0
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
embeddings = np.zeros((len(y),2048))
gradients = np.zeros((len(y), 2048))
predictions = np.zeros((len(y), 1000))
conv_embeddings=np.zeros((len(y)
, dim_c))
gradients_wrt_conv_layer = np.zeros((len(y), dim_c, dim_w, dim_h), dtype=np.float32)
conv_maps = np.zeros((len(y),dim_c,dim_w,dim_h))
print(f"embeddings shape: {embeddings.shape}")
print(f"gradients shape: {gradients.shape}")
print(f"predictions shape: {predictions.shape}")
while batch_start+batch_size < len(y)+batch_size:
# preprocessing the inputs
print(batch_start)
inputs = torch.stack([transform_normalize(transform(Image.open(paths[i]).convert("RGB"))) for i in range(batch_start, min(batch_start+batch_size, len(y)))])
inputs = inputs.clone().detach().requires_grad_(True)
batch_y=y[batch_start:min(batch_start+batch_size, len(y))]
# transfering to GPU
inputs=inputs.to(device)
model=model.to(device)
# inference pass
outs = model(inputs)
# extracting embeddings
# note: convolutional outputs should be avg pooled for this to actually make sense
pooled_embeddings=torch.nn.functional.adaptive_avg_pool2d(outs['conv'], (1, 1))
conv_embeddings[batch_start:min(batch_start+batch_size, len(y)),:]=pooled_embeddings[:,:,0,0].cpu().detach().numpy()
embeddings[batch_start:min(batch_start+batch_size, len(y)),:]=outs['avgpool'][:,:,0,0].cpu().detach().numpy()
# computing prediction loss
loss = torch.nn.CrossEntropyLoss()
pred = outs['fc']
len_=pred.shape[0]
target=np.zeros((len_, 1000))
for i in range(len(pred)):
target[i,int(batch_y[i])]=1.
target=torch.tensor(target, requires_grad=True).to(device)
outloss = loss(pred, target)
# Storing predictions
softmaxf = torch.nn.Softmax(dim=1)
predictions[batch_start:min(batch_start+batch_size, len(y)),:]=softmaxf(pred).detach().cpu()
# Computing the gradients and storing them
grads_wrt_conv = torch.autograd.grad(outloss, outs['conv'], retain_graph=True)[0]
gradients_wrt_conv_layer[batch_start:min(batch_start+batch_size, len(y)),:,:,:] = grads_wrt_conv[:,:,:,:].cpu()
conv_maps[batch_start:min(batch_start+batch_size, len(y)),:,:,:] = outs['conv'].cpu().detach()
grads = torch.autograd.grad(outloss, outs['avgpool'], retain_graph=True)[0]
gradients[batch_start:min(batch_start+batch_size, len(y)),:] = grads[:,:,0,0].cpu()
batch_start += batch_size
print(f"gradients shape {gradients.shape}, conv_embs shape {conv_embeddings.shape}, conv_maps.shape {conv_maps.shape}")
"""
SAVE INTERMEDIATE RESULTS
"""
np.save(f"{SAVEFOLD}/predictions.npy", predictions)
np.save(f"{SAVEFOLD}/gradients_wrt_conv_layer.npy", gradients_wrt_conv_layer)
np.save(f"{SAVEFOLD}/conv_maps.npy", conv_maps)
|
lomahony/sw-interpretability
|
scripts/get_embeddings.py
|
get_embeddings.py
|
py
| 3,411 |
python
|
en
|
code
| 4 |
github-code
|
6
|
43493696921
|
# GCDMOD in python
# shivamgor498
# https://www.codechef.com/AUG18A/problems/GCDMOD/
def power(x, y, m) :
if (y == 0) :
return 1
p = power(x, y // 2, m) % m
p = (p * p) % m
if(y % 2 == 0) :
return p
else :
return ((x * p) % m)
def modInverse(a, m) :
return power(a, m - 2, m)
def computeGCD(x, y):
while(y):
x, y = y, x % y
return x
MOD = 1000000007
for _ in range(int(input())):
a, b, n = input().split()
a, b, n = int(a), int(b), int(n)
p = a-b
if(p):
temp = power(a,n,p)
temp1 = power(b,n,p)
temp = (temp+p)%p
temp1 = (temp1+p)%p
temp = (temp%p + temp1%p)%p
ans1 = computeGCD(temp,p)%MOD
print (ans1)
else :
temp = power(a,n,MOD)
temp1 = power(b,n,MOD)
temp = (temp%MOD + temp1%MOD)%MOD
print(temp)
|
shivamgor498/Codechef
|
GCDMOD.py
|
GCDMOD.py
|
py
| 877 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24293856933
|
def reshape_matrix(mat, x, y):
number_of_elements = len(mat) * len(mat[0])
if x * y != number_of_elements:
return None
result = [[None for _ in range(x)] for _ in range(y)]
current_row = 0
current_col = 0
for i in range(len(mat)):
for j in range(len(mat[0])):
if current_col == x:
current_col = 0
current_row += 1
result[current_row][current_col] = mat[i][j]
print(result)
current_col += 1
print(result)
return result
def main():
assert reshape_matrix([[1, 2], [3, 4]], 1, 4) == [[1], [2], [3], [4]]
assert not reshape_matrix([[1, 2], [3, 4]], 2, 3)
if __name__ == '__main__':
main()
|
ckallum/Daily-Interview-Pro
|
solutions/reshaping_matrix.py
|
reshaping_matrix.py
|
py
| 730 |
python
|
en
|
code
| 16 |
github-code
|
6
|
73919543869
|
from django.shortcuts import render
from resources.models import Resource
def resources(request):
resources = Resource.objects.all().order_by('order').filter(hidden=False)
context = {
'resources': resources
}
return render(request, 'resources.html', context)
|
ctiller15/Humanity-first-tracker
|
resources/views.py
|
views.py
|
py
| 288 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29099740995
|
from torch.utils.data import Dataset
from typing import List
import torch
import pandas as pd
class InferenceDataset(Dataset):
def __init__(self, texts: List[list], tokenizer, max_length: int):
self.texts = texts
self.tokenizer = tokenizer
self.max_length = max_length
def __len__(self):
return len(self.texts)
def __getitem__(self, item_index):
inputs = self.tokenizer.encode_plus(
text=self.texts[item_index],
max_length=self.max_length,
padding="max_length",
return_tensors="pt",
add_special_tokens=True,
truncation=True
)
return {"inputs_ids": inputs["input_ids"].flatten(),
"attention_mask": inputs["attention_mask"].flatten()}
class PairSarcasmDataset(Dataset):
def __init__(self, texts: list, text_pairs: list, targets: list, tokenizer, max_len):
self.texts = texts
self.text_pairs = text_pairs
self.targets = targets
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.texts)
def __getitem__(self, item_index):
text = self.texts[item_index]
text_pair = self.text_pairs[item_index]
target = self.targets[item_index]
inputs_ids = self.tokenizer.encode_plus(text=text,
text_pair=text_pair,
add_special_tokens=True,
max_length=2 * self.max_len,
return_tensors="pt",
padding="max_length",
truncation=True,
return_token_type_ids=True).input_ids
inputs_ids = inputs_ids.flatten()
return {"inputs_ids": inputs_ids, "targets": torch.tensor(target)}
class MultiSarcasmDataset(Dataset):
def __init__(self, data: pd.DataFrame, label_columns, tokenizer, max_len):
self.data = data
self.tokenizer = tokenizer
self.max_len = max_len
self.label_columns = label_columns
def __len__(self):
return len(self.data)
def __getitem__(self, item_index):
data_row = self.data.iloc[item_index]
text = data_row.tweets
target = data_row[self.label_columns]
inputs_ids = self.tokenizer.encode_plus(text=text,
add_special_tokens=True,
max_length=self.max_len,
return_tensors="pt",
padding="max_length",
truncation=True,
return_token_type_ids=True).input_ids
inputs_ids = inputs_ids.flatten()
return {"inputs_ids": inputs_ids, "label_sarcasm": torch.tensor(target[0]),
"label_irony": torch.tensor(target[1]),
"label_satire": torch.tensor(target[2]),
"label_understatement": torch.tensor(target[3]),
"label_overstatement": torch.tensor(target[4]),
"label_rhetorical_question": torch.tensor(target[5])}
|
MaryNJ1995/Sarcasm_Detection
|
src/inference/dataset.py
|
dataset.py
|
py
| 3,398 |
python
|
en
|
code
| 1 |
github-code
|
6
|
5257520483
|
import boto3
from secretss import accessKey, secretKey
# upload files to AWS S3 bucket
s3 = boto3.client('s3')
bucket_name = "mmc-video-bucket"
file_path = 'E:\Programming files\Home-Surveillance\\basicvideo.mp4'
object_key = 'basicvideo.mp4'
s3.upload_file(file_path, bucket_name, object_key)
|
Varun-Naik/Home-Surveillance
|
upload_to_s3.py
|
upload_to_s3.py
|
py
| 297 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39359091601
|
import time
from openpyxl import Workbook
from selenium import webdriver
import openpyxl
# from selenium.webdriver.common import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
driver=webdriver.Chrome()
driver.get('https://www.homechoice.co.za/home')
driver.maximize_window()
driver.implicitly_wait(5)
searchbox=driver.find_element(By.ID,'CC-headerWidget-Search')
searchbox.send_keys("beds")
clickbtn=driver.find_element(By.ID,'searchSubmit').click()
filterbtn=driver.find_element(By.XPATH,'//span[contains(text(),"HomeChoice")]')
filterbtn.click()
bedProducts=driver.find_elements(By.XPATH,'//h3[contains(@itemprop,"name")]')
print("beds present in current page",len(bedProducts))
mybeds=[]
myprice=[]
for bed in bedProducts:
# print(bed.text)
mybeds.append(bed.text)
print("=*"*50)
time.sleep(2)
bedPrices=driver.find_elements(By.XPATH,'//div[@itemprop="cash-price"]')
my_element_id = '//span[contains(@id,"CC-product-price-max")]'
ignored_exceptions=(NoSuchElementException,StaleElementReferenceException)
print("prices present in a current page",len(bedPrices))
bedPrices = WebDriverWait(driver,10,ignored_exceptions=ignored_exceptions)\
.until(expected_conditions.presence_of_all_elements_located((By.XPATH, my_element_id)))
for price in bedPrices:
# print(price.text)
myprice.append(price.text)
finallist=zip(mybeds,myprice)
# for data in list(finallist):
# print(data)
print("part1 completed")
wb=Workbook()
wb["Sheet"].title="BEDS DATA"
sh1=wb.active
sh1.append(["name","price"])
for x in list(finallist):
sh1.append(x)
wb.save("beddetail.xlsx")
print("part2 is completed")
|
Paviterence/Selenium-Python-BasicCodes
|
webScrapping.py
|
webScrapping.py
|
py
| 1,886 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6511550014
|
point1 = []
point2 = []
with open('day5-input.txt','r') as f:
for line in f.readlines():
p1, p2 = line.split(' -> ')
p1 = [int(i) for i in p1.strip('\n').split(',')]
p2 = [int(i) for i in p2.strip('\n').split(',')]
if p1[0] == p2[0] or p1[1] == p2[1]:
point1.append(p1)
point2.append(p2)
points = {}
for a,b in zip(point1, point2):
while a[0] < b[0]:
print(str(a))
if str(a) in points:
points[str(a)] += 1
else:
points[str(a)] = 1
a[0] += 1
while a[1] < b[1]:
if str(a) in points:
points[str(a)] += 1
else:
points[str(a)] = 1
a[1] += 1
while b[0] < a[0]:
if str(b) in points:
points[str(b)] += 1
else:
points[str(b)] = 1
b[0] += 1
while b[1] <= a[1]:
if str(b) in points:
points[str(b)] += 1
else:
points[str(b)] = 1
b[1] += 1
counter = 0
for point in points:
if points[point] >=2:
counter += 1
print(counter)
|
kebab01/advent-of-code-2021
|
day5_part1-2.py
|
day5_part1-2.py
|
py
| 896 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20519423740
|
"""!
@brief Examples of usage and demonstration of abilities of K-Medoids algorithm in cluster analysis.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.kmedoids import kmedoids
from pyclustering.utils import read_sample, calculate_distance_matrix
from pyclustering.utils import timedcall, distance_metric, type_metric
def template_clustering(start_medoids, path, tolerance=0.25, show=True, **kwargs):
ccore = kwargs.get('ccore', True)
data_type = kwargs.get('data_type', 'points')
original_data = read_sample(path)
sample = original_data
if data_type == 'distance_matrix':
sample = calculate_distance_matrix(sample)
metric = distance_metric(type_metric.EUCLIDEAN_SQUARE, data=sample)
kmedoids_instance = kmedoids(sample, start_medoids, tolerance, metric=metric, ccore=ccore, data_type=data_type)
(ticks, result) = timedcall(kmedoids_instance.process)
clusters = kmedoids_instance.get_clusters()
print("Iterations:", kmedoids_instance.get_iterations())
print([len(cluster) for cluster in clusters])
print(clusters)
medoids = kmedoids_instance.get_medoids()
print("Sample: ", path, "\t\tExecution time: ", ticks, "\n")
if show is True:
visualizer = cluster_visualizer(1)
visualizer.append_clusters(clusters, original_data, 0)
visualizer.append_cluster([original_data[index] for index in start_medoids], marker='*', markersize=15)
visualizer.append_cluster(medoids, data=original_data, marker='*', markersize=15)
visualizer.show()
return original_data, clusters
def cluster_sample1():
template_clustering([2, 9], SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
def cluster_sample2():
template_clustering([3, 12, 20], SIMPLE_SAMPLES.SAMPLE_SIMPLE2)
def cluster_sample3():
template_clustering([4, 12, 25, 37], SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
def cluster_sample4():
template_clustering([4, 15, 30, 40, 50], SIMPLE_SAMPLES.SAMPLE_SIMPLE4)
def cluster_sample5():
template_clustering([4, 18, 34, 55], SIMPLE_SAMPLES.SAMPLE_SIMPLE5)
def cluster_elongate():
template_clustering([8, 56], SIMPLE_SAMPLES.SAMPLE_ELONGATE)
def cluster_lsun():
template_clustering([10, 275, 385], FCPS_SAMPLES.SAMPLE_LSUN)
def cluster_target():
template_clustering([10, 160, 310, 460, 560, 700], FCPS_SAMPLES.SAMPLE_TARGET)
def cluster_two_diamonds():
template_clustering([10, 650], FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS)
def cluster_wing_nut():
template_clustering([19, 823], FCPS_SAMPLES.SAMPLE_WING_NUT)
def cluster_chainlink():
template_clustering([30, 900], FCPS_SAMPLES.SAMPLE_CHAINLINK)
def cluster_hepta():
template_clustering([0, 35, 86, 93, 125, 171, 194], FCPS_SAMPLES.SAMPLE_HEPTA)
def cluster_tetra():
template_clustering([0, 131, 214, 265], FCPS_SAMPLES.SAMPLE_TETRA)
def cluster_atom():
template_clustering([0, 650], FCPS_SAMPLES.SAMPLE_ATOM)
def cluster_engy_time():
template_clustering([10, 3000], FCPS_SAMPLES.SAMPLE_ENGY_TIME)
def display_fcps_clustering_results():
(lsun, lsun_clusters) = template_clustering([10, 275, 385], FCPS_SAMPLES.SAMPLE_LSUN, 0.1, False)
(target, target_clusters) = template_clustering([10, 160, 310, 460, 560, 700], FCPS_SAMPLES.SAMPLE_TARGET, 0.1, False)
(two_diamonds, two_diamonds_clusters) = template_clustering([10, 650], FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 0.1, False)
(wing_nut, wing_nut_clusters) = template_clustering([19, 823], FCPS_SAMPLES.SAMPLE_WING_NUT, 0.1, False)
(chainlink, chainlink_clusters) = template_clustering([30, 900], FCPS_SAMPLES.SAMPLE_CHAINLINK, 0.1, False)
(hepta, hepta_clusters) = template_clustering([0, 35, 86, 93, 125, 171, 194], FCPS_SAMPLES.SAMPLE_HEPTA, 0.1, False)
(tetra, tetra_clusters) = template_clustering([0, 131, 214, 265], FCPS_SAMPLES.SAMPLE_TETRA, 0.1, False)
(atom, atom_clusters) = template_clustering([0, 650], FCPS_SAMPLES.SAMPLE_ATOM, 0.1, False)
visualizer = cluster_visualizer(8, 4)
visualizer.append_clusters(lsun_clusters, lsun, 0)
visualizer.append_clusters(target_clusters, target, 1)
visualizer.append_clusters(two_diamonds_clusters, two_diamonds, 2)
visualizer.append_clusters(wing_nut_clusters, wing_nut, 3)
visualizer.append_clusters(chainlink_clusters, chainlink, 4)
visualizer.append_clusters(hepta_clusters, hepta, 5)
visualizer.append_clusters(tetra_clusters, tetra, 6)
visualizer.append_clusters(atom_clusters, atom, 7)
visualizer.show()
cluster_sample1()
cluster_sample2()
cluster_sample3()
cluster_sample4()
cluster_sample5()
cluster_elongate()
cluster_lsun()
cluster_target()
cluster_two_diamonds()
cluster_wing_nut()
cluster_chainlink()
cluster_hepta()
cluster_tetra()
cluster_atom()
cluster_engy_time()
display_fcps_clustering_results()
|
annoviko/pyclustering
|
pyclustering/cluster/examples/kmedoids_examples.py
|
kmedoids_examples.py
|
py
| 5,155 |
python
|
en
|
code
| 1,113 |
github-code
|
6
|
8495271737
|
import bpy
from bpy_extras.object_utils import world_to_camera_view
import numpy as np
from util import poissonDiscSampling
import math
import random
from mathutils import Euler, Vector
import os
import glob
import sys
class ForegroundObjectPlacementRandomizer:
"""
A randomizer class which randomly spawns virtual human objects.
The foreground generation process of the blender scene involves selecting a subset from a pool of 3D human assets.
These chosen 3D human assets are placed randomly within the region above the background objects.
The placement positions of the foreground objects and their distances from each other are determined through
Poisson distribution sampling within the specified spatial area.
Attributes
----------
__scene (bpy.types.Scene): The blender scene data-block of current virtual environment.
__camera (bpy.types.Camera): The blender camera data-block.
__clip_start (float): Camera near clipping distance.
__clip_end (float): Camera far clipping distance.
num_foreground_object_in_scene_range (dict of str: int): The distribution of the number of retail items within the blender scene.
__num_foreground_object_in_scene (int): The number of retail items within the blender scene.
foreground_area (list of float): Spatial distribution area of foreground objects.
__foreground_domain_size (numpy.ndarray): Spatial distribution area of foreground objects(convert foreground_area to ndarray).
foreground_poisson_disk_sampling_radius (float): Foreground objects separation distance.
asset_foreground_object_folder_path (str): The path to foreground object assets.
__foreground_object_collection (bpy.types.Collection): The blender collection data-block of foreground objects.
__n_particle (int): Number of generated particles of the poisson disks sampling.
__particle_coordinates (numpy.ndarray): Coordinates of the poisson disks sampling.
__particle_coordinates_can_see_in_view (list of list of float): Coordinates of the poisson disks sampling.
Methods
-------
__error_check(): Check assigned background object assets folder path isn't empty.
__load_object(): Load asset from other blendfile to the current blendfile.
__posson_disc_sampling(): Using poisson disk sampling algorithm to generate the sampling.
__import_foreground_object_asset(): Import a number of __n_particle foreground objects into current blender scene.
__check_particle_in_cam_view(): Check if the particles from the Poisson disk sampling are within the camera's view.
foreground_object_placement_randomize(): Generate foreground.
"""
def __init__(self,
num_foreground_object_in_scene_range = {"min": 1 , "max": 5}, # Must <= 5
foreground_area = [9, 7, 4],
foreground_poisson_disk_sampling_radius = 1.5,
asset_foreground_object_folder_path = "C:/Users/user/Documents/project/PeopleSansPeople/Asset/Human/Procedural"
):
self.__scene = bpy.data.scenes["Scene"]
self.__camera = bpy.data.objects['Camera']
self.__clip_start = bpy.data.objects['Camera'].data.clip_start
self.__clip_end = bpy.data.objects['Camera'].data.clip_end
self.num_foreground_object_in_scene_range = num_foreground_object_in_scene_range
self.__num_foreground_object_in_scene = None
self.foreground_area = foreground_area
self.__foreground_domain_size = np.array(self.foreground_area)
self.foreground_poisson_disk_sampling_radius = foreground_poisson_disk_sampling_radius
self.asset_foreground_object_folder_path = asset_foreground_object_folder_path
self.__foreground_object_collection = bpy.data.collections["HumanCollection"]
self.__n_particle = None
self.__particle_coordinates = None # np.array
self.__particle_coordinates_can_see_in_view = list()
def __error_check(self,asset_path_list):
"""Check assigned background object assets folder path isn't empty.
Args:
asset_path_list (list of str): list of the path to background object assets.
"""
num_asset_in_folder = len(asset_path_list)
if num_asset_in_folder < 1:
print(f'ERROR!!! can not find any foreground asset in {self.asset_foreground_object_folder_path}')
input("Press Enter to continue...")
sys.exit()
def __load_object(self,filepath):
"""Load asset from other blendfile to the current blendfile.
Args:
filepath (str): The path to background object assets.
References
----------
https://studio.blender.org/training/scripting-for-artists/5eabe54d521eafd0953f6d45/
https://docs.blender.org/api/current/bpy.types.BlendDataLibraries.html
https://blender.stackexchange.com/questions/17876/import-object-without-bpy-ops-wm-link-append/33998#33998
https://blender.stackexchange.com/questions/34540/how-to-link-append-a-data-block-using-the-python-api?noredirect=1&lq=1
"""
# Append object from .blend file
with bpy.data.libraries.load(filepath, link = False,assets_only = True) as (data_from, data_to):
data_to.objects = data_from.objects
# Link object to current scene
for obj in data_to.objects:
if obj is not None:
self.__foreground_object_collection.objects.link(obj)
def __posson_disc_sampling(self):
"""Generate the sampling with a spatially variable sampling radius."""
# It seem like function poisson_disc_sampling sometimes will break (mtbf:2000-3000 cycle), when it break , return a empty list[]
# add condition check len(self.__particle_coordinates) must >= 1
while self.__n_particle == None or self.__n_particle == 0:
self.__particle_coordinates = poissonDiscSampling.poisson_disc_sampling(radius = self.foreground_poisson_disk_sampling_radius,
sample_domain_size = self.__foreground_domain_size,
sample_rejection_threshold = 30)
self.__n_particle = len(self.__particle_coordinates)
print(f"nParticle Prev : {self.__n_particle}") # Show posson disc sampling caculated particle num
loc_offset = np.array([self.__foreground_domain_size[0]/2,self.__foreground_domain_size[1]/2,-2])
self.__particle_coordinates -= loc_offset
def __import_foreground_object_asset(self):
"""Import a number of __n_particle foreground objects into current blender scene."""
# Check n_particle must bigger than num_foreground_object_in_scene
if self.__n_particle < self.__num_foreground_object_in_scene:
print('Warning!!! nParticle:{} must bigger than fg_obj_in_scene_num:{}'.format(self.__n_particle,self.__num_foreground_object_in_scene))
input("Press Enter to continue...")
sys.exit()
# Get foreground object asset path
foreground_object_path_list = glob.glob(os.path.join(self.asset_foreground_object_folder_path, "*.blend"))
self.__error_check(asset_path_list = foreground_object_path_list)
num_fg_obj = len(foreground_object_path_list)
print("num fg obj in folder: {}".format(num_fg_obj))
# Shuffle foreground_object_path_list
random.shuffle(foreground_object_path_list)
# Check num_foreground_object_in_scene is bigger than num_fg_obj
if self.__num_foreground_object_in_scene >= num_fg_obj:
# Loop importforeground object
num_loop = self.__num_foreground_object_in_scene // num_fg_obj
num_remain = self.__num_foreground_object_in_scene % num_fg_obj
for i in range(num_loop):
for fg_obj_path in foreground_object_path_list:
self.__load_object(filepath = fg_obj_path)
if num_remain != 0:
for i in range(num_remain):
self.__load_object(filepath = foreground_object_path_list[i])
else:
# Randomly select n(n=num_foreground_object_in_scene) fg_obj from foreground_object_path_list, then import to scene
foreground_object_path_list_selected = random.sample(foreground_object_path_list, self.__num_foreground_object_in_scene)
for fg_obj_path in foreground_object_path_list_selected:
self.__load_object(filepath = fg_obj_path)
def __check_particle_in_cam_view(self):
"""Check if the particles from the Poisson disk sampling are within the camera's view.
References
----------
https://blender.stackexchange.com/questions/284884/what-does-world-to-camera-view-depend-on
https://blender.stackexchange.com/questions/258000/how-to-update-world-transformation-matrices-without-calling-a-scene-update/258002#258002
"""
# Update camera object matrix_world
self.__camera.matrix_world = self.__camera.matrix_basis
for coordinates in self.__particle_coordinates:
# World space to ndc space
vector_p = Vector(coordinates)
co_ndc = world_to_camera_view(self.__scene, self.__camera, vector_p)
# Check wether point is inside frustum
if (0.0 < co_ndc.x < 1.0 and 0.0 < co_ndc.y < 1.0 and self.__clip_start < co_ndc.z < self.__clip_end):
self.__particle_coordinates_can_see_in_view.append(coordinates)
# Update __particle_coordinates and __n_particle var value
self.__particle_coordinates = np.array(self.__particle_coordinates_can_see_in_view)
self.__n_particle = len(self.__particle_coordinates_can_see_in_view)
def foreground_object_placement_randomize(self):
"""Generate foreground.
References
----------
[1]https://stackoverflow.com/questions/14262654/numpy-get-random-set-of-rows-from-2d-array
"""
self.__num_foreground_object_in_scene = random.randint(self.num_foreground_object_in_scene_range["min"], self.num_foreground_object_in_scene_range["max"])
# PoissonDiskSampling
self.__posson_disc_sampling()
# Select particles which can see in cam view
self.__check_particle_in_cam_view()
# Import background object asset
self.__import_foreground_object_asset()
# Randomly select n(n=num_foreground_object_in_scene) location from __particle_coordinates [1]
selected_indices = np.random.choice(self.__particle_coordinates.shape[0],
size = self.__num_foreground_object_in_scene,
replace = False)
fg_location = self.__particle_coordinates[selected_indices]
print("fg_num: {} ".format(len(fg_location)))
print("fg_location:\n {} ".format(fg_location))
# Move all foregeound objects to fg_location
fg_obj_list = []
for fg_obj in self.__foreground_object_collection.objects:
if fg_obj.type == "ARMATURE": # Select armature object only
fg_obj_list.append(fg_obj)
for i in range(self.__num_foreground_object_in_scene):
obj_location = (fg_location[i][0],fg_location[i][1], fg_location[i][2])
fg_obj_list[i].location = obj_location
print("Particles in cam view num : {}".format(self.__n_particle)) # Show particle in cam view num
print("Foreground Object Placement Randomize COMPLERED !!!")
if __name__ == '__main__':
randomizer = ForegroundObjectPlacementRandomizer()
randomizer.foreground_object_placement_randomize()
|
MichaelLiLee/Synthetic-Data-Generator-for-Human-Detection
|
HumanSDG/HumanSDG_020_ForegroundObjectPalcementRandomizer.py
|
HumanSDG_020_ForegroundObjectPalcementRandomizer.py
|
py
| 11,870 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35463051615
|
import math
from cmath import exp
import kwant
def hopping(sitei, sitej, phi, salt):
xi, yi = sitei.pos
xj, yj = sitej.pos
return -exp(-0.5j * phi * (xi - xj) * (yi + yj))
def onsite(site, phi, salt):
return 0.3 * kwant.digest.gauss(repr(site), salt) + 4
def test_qhe(W=16, L=8):
def central_region(pos):
x, y = pos
return -L < x < L and abs(y) < W - 5.5 * math.exp(-x**2 / 5**2)
lat = kwant.lattice.square(norbs=1)
syst = kwant.Builder()
syst[lat.shape(central_region, (0, 0))] = onsite
syst[lat.neighbors()] = hopping
lead = kwant.Builder(kwant.TranslationalSymmetry((-1, 0)))
lead[(lat(0, y) for y in range(-W + 1, W))] = 4
lead[lat.neighbors()] = hopping
syst.attach_lead(lead)
syst.attach_lead(lead.reversed())
syst = syst.finalized()
#### The following chunk of code can be uncommented to visualize the
#### conductance plateaus.
# from matplotlib import pyplot
# import numpy
# reciprocal_phis = numpy.linspace(0.1, 7, 200)
# conductances = []
# for phi in 1 / reciprocal_phis:
# smatrix = kwant.smatrix(syst, 1.0, [phi, ""])
# conductances.append(smatrix.transmission(1, 0))
# pyplot.plot(reciprocal_phis, conductances)
# pyplot.show()
for r_phis, T_nominal, max_err in [((1.3, 2.1), 1, 1e-7),
((3.2, 3.7), 2, 1e-3),
((5.2, 5.5), 3, 1e-1)]:
for r_phi in r_phis:
params = dict(phi=1.0 / r_phi, salt="")
pc = syst.precalculate(1.0, params=params, what='all')
for result in [kwant.smatrix(pc, 1, params=params),
kwant.solvers.default.greens_function(pc, 1, params=params)]:
assert abs(T_nominal - result.transmission(1, 0)) < max_err
|
kwant-project/kwant
|
kwant/tests/test_comprehensive.py
|
test_comprehensive.py
|
py
| 1,848 |
python
|
en
|
code
| 76 |
github-code
|
6
|
21594560177
|
from django.shortcuts import render, redirect
import csv
from django.http import HttpResponse
from django.template.loader import render_to_string
# from weasyprint import HTML
# Create your views here.
from .models import Members, Loans, Deposits
from django.db.models import Avg, Sum
from .forms import MemberForm
def home(request):
total_dep = Deposits.objects.aggregate(mytotal=Sum('Amount_deposit'))
total_loan = Loans.objects.aggregate(myloan=Sum('Amount_loan'))
maximum_dep = Deposits.objects.aggregate(mymax=Avg('Amount_deposit'))
member_list=Members.objects.all()
total_members=member_list.count()
context={'member_list':member_list,'total_members':total_members,'total_dep':total_dep,'maximum_dep':maximum_dep,'total_loan':total_loan}
return render(request,'information_system/home.html',context)
def members(request,pk):
members=Members.objects.get(id=pk)
deposits=Deposits.objects.get(id=pk)
deposit = members.deposits_set.all()
deptotal=deposits.Amount_deposit
# myFilter=MemberFilter(request., qs=deposit)
# deposit=myFilter.qs
dep_count=deposit.count()
fname_by=members.FirstName
lname_by=members.LastName
dep_by=fname_by+lname_by
dep_amount=deposits.Amount_deposit
total_depos = Deposits.objects.aggregate(mytotal=Sum('Amount_deposit'))
context={'members':members,'dep_amount':dep_amount,'dep_by':dep_by,'deposit':deposit,'total_depos':total_depos,'deptotal':deptotal}
return render(request,'information_system/Members.html',context)
def export(request):
response=HttpResponse(content_type='text/csv')
writer=csv.writer(response)
writer.writerow(['Account Number','First Name','Last Name','Date_start'])
for member in Members.objects.all().values_list('AccountNumber','FirstName','LastName','Date_start'):
writer.writerow(member)
response['Content-Disposition'] = 'attachment; filename="Member_List.csv"'
return response
def update(request, pk):
member=Members.objects.get(id=pk)
form=MemberForm(instance=member)
if request.method == 'POST':
form=MemberForm(request.Post, instance=member)
if form.is_valid():
form.save()
return redirect('/')
context = {'form':form}
return render(request,'information_system/memberform.html',context)
def loan(request):
loan_list=Loans.objects.all()
total_loans=loan_list.count()
context={'total_loans':total_loans,'loan_list':loan_list}
return render(request,'information_system/loan.html',context)
def deposit(request):
deposit_list=Deposits.objects.all()
##To get total deposit
total_dep = Deposits.objects.aggregate(mytotal=Sum('Amount_deposit'))
maximum_dep=Deposits.objects.aggregate(mymax=Avg('Amount_deposit'))
context={'total_dep':total_dep,'maximum_dep':maximum_dep,'deposit_list':deposit_list}
return render(request,'information_system/Deposit.html',context)
|
laloluka/sol
|
information_system/views.py
|
views.py
|
py
| 2,960 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14035574166
|
from ADMIN import *
from VIP_csv import *
from USER import *
if __name__ == '__main__':
sign = 0
user = 0
# loop program, can exit with "end"
while True:
if sign == 0:
print("Sign in as:")
who = ""
# not able to sign in as long as name is admin vip_user or name from DB
# admin: able to create names and categories
# vip user: able to create remove tasks or to add deadlines
# users (names from NameDB added by admin): complete task
# view_task, sort_task, filter_task, info - overall access
while True:
who = input()
find = 0
if path.exists("NameDB.txt"):
f = open("NameDB.txt", "r")
lines = f.readlines()
for line in lines:
if line.strip("\n") == who:
find = 1
f.close()
if who == "admin" or who == "vip_user" or find == 1:
sign = 1
break
elif who == "end":
sign = 0
break
else:
print("Login types: [admin, vip_user, name_user]")
if who == "end":
break
# giving privileges
if who == "admin":
user = 1
if who == "vip_user":
user = 2
if find == 1:
user = 3
# displaying rights
if user == 1:
print("Admin Full Rights:")
print(" add_name \t\t-->Adds name to NameDB\n remove_name \t-->Removes name from NameDB\n list_name "
"\t-->Lists names from NameDB")
print(" list_task \t-->including: Delete Task - 'D'\n\t\t\t\t\t\t\t Sort Column by Task, Date, "
"Name and Category - 'S'")
if user == 2:
print("Vip_user Partial Rights:")
print(" list_name")
print(" add_task -->including: Add Task, Date, Name and Category - 'A'\n\t\t\t\t\t\tDelete Task - "
"'D'\n\t\t\t\t\t\tSort Column by Task, Date, Name and Category - 'S'")
if user == 3:
print(who, "Limited Rights:")
print(" list_task -->including: Done Task - 'D'\n\t\t\t\t\t\t\tSort Column by Task, Date, "
"Name and Category - 'S'")
print("Additional 'exit' to sign out")
print("Additional 'end' to end session")
# sign = 0 not sign in, 1 sign in
if sign == 1:
command = input()
'full access'
if command == "exit":
sign = 0
if command == "end":
sign = 0
break
# astea 2 mai tarziu
if command == "info":
print("info mai tarziu")
if command == "altceva":
print("unknown command")
# User 1
if command == "add_name":
if user == 1:
name = input()
if name == "admin" or name == "vip_user":
print("admin/vip_user key words")
else:
add_name(name)
else:
print("No admin rights")
if command == "remove_name":
if user == 1:
name = input()
remove_name(name)
else:
print("No admin rights")
if command == "list_name":
if user == 1 or user == 2:
list_name()
else:
print("No rights")
# User 2
if command == "add_task":
if user == 2:
add_task()
else:
print("No admin rights")
# User 3
if command == "list_task":
if user == 3 or user == 1:
list_task()
else:
print("No admin rights")
|
ChitzAK/Curs_Python
|
TODOES.py
|
TODOES.py
|
py
| 4,249 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37512481914
|
import os
import pytest
from contextlib import contextmanager
from tempfile import TemporaryDirectory, NamedTemporaryFile
from unittest.mock import patch
from zipfile import ZipFile
from repo2docker.contentproviders import Hydroshare
from repo2docker.contentproviders.base import ContentProviderException
def test_content_id():
with patch.object(Hydroshare, "urlopen") as fake_urlopen:
fake_urlopen.return_value.url = (
"https://www.hydroshare.org/resource/b8f6eae9d89241cf8b5904033460af61"
)
def read():
return '{"dates": [{"type": "modified", "start_date": "2019-09-25T16:09:17.006152Z"}]}'
fake_urlopen.return_value.read = read
hydro = Hydroshare()
hydro.detect("10.4211/hs.b8f6eae9d89241cf8b5904033460af61")
assert hydro.content_id == "b8f6eae9d89241cf8b5904033460af61.v1569427757"
def test_detect_hydroshare():
with patch.object(Hydroshare, "urlopen") as fake_urlopen:
fake_urlopen.return_value.url = (
"https://www.hydroshare.org/resource/b8f6eae9d89241cf8b5904033460af61"
)
def read():
return '{"dates": [{"type": "modified", "start_date": "2019-09-25T16:09:17.006152Z"}]}'
fake_urlopen.return_value.read = read
# valid Hydroshare DOIs trigger this content provider
expected = {
"host": {
"hostname": [
"https://www.hydroshare.org/resource/",
"http://www.hydroshare.org/resource/",
],
"django_irods": "https://www.hydroshare.org/django_irods/download/bags/",
"version": "https://www.hydroshare.org/hsapi/resource/{}/scimeta/elements",
},
"resource": "b8f6eae9d89241cf8b5904033460af61",
"version": "1569427757",
}
assert (
Hydroshare().detect(
"https://www.hydroshare.org/resource/b8f6eae9d89241cf8b5904033460af61"
)
== expected
)
# assert a call to urlopen was called to fetch version
assert fake_urlopen.call_count == 1
assert (
Hydroshare().detect("10.4211/hs.b8f6eae9d89241cf8b5904033460af61")
== expected
)
# assert 2 more calls were made, one to resolve the DOI and another to fetch the version
assert fake_urlopen.call_count == 3
assert (
Hydroshare().detect(
"https://doi.org/10.4211/hs.b8f6eae9d89241cf8b5904033460af61"
)
== expected
)
# assert 2 more calls were made, one to resolve the DOI and another to fetch the version
assert fake_urlopen.call_count == 5
with patch.object(Hydroshare, "urlopen") as fake_urlopen:
# Don't trigger the Hydroshare content provider
assert Hydroshare().detect("/some/path/here") is None
assert Hydroshare().detect("https://example.com/path/here") is None
# don't handle DOIs that aren't from Hydroshare
fake_urlopen.return_value.url = (
"http://joss.theoj.org/papers/10.21105/joss.01277"
)
def read():
return '{"dates": [{"type": "modified", "start_date": "2019-09-25T16:09:17.006152Z"}]}'
fake_urlopen.return_value.read = read
assert Hydroshare().detect("https://doi.org/10.21105/joss.01277") is None
@contextmanager
def hydroshare_archive(prefix="b8f6eae9d89241cf8b5904033460af61/data/contents"):
with NamedTemporaryFile(suffix=".zip") as zfile:
with ZipFile(zfile.name, mode="w") as zip:
zip.writestr("{}/some-file.txt".format(prefix), "some content")
zip.writestr("{}/some-other-file.txt".format(prefix), "some more content")
yield zfile
class MockInfo:
def __init__(self, content_type):
self.content_type = content_type
def get_content_type(self):
return self.content_type
class MockResponse:
def __init__(self, content_type, status_code):
self.content_type = content_type
self.status_code = status_code
self.mock_info = MockInfo(self.content_type)
def getcode(self):
return self.status_code
def info(self):
return self.mock_info
def test_fetch_bag():
# we "fetch" a local ZIP file to simulate a Hydroshare resource
with hydroshare_archive() as hydro_path:
with patch.object(
Hydroshare,
"urlopen",
side_effect=[
MockResponse("application/html", 200),
MockResponse("application/zip", 200),
],
):
with patch.object(
Hydroshare, "_urlretrieve", side_effect=[(hydro_path, None)]
):
hydro = Hydroshare()
hydro.resource_id = "b8f6eae9d89241cf8b5904033460af61"
spec = {
"host": {
"hostname": [
"https://www.hydroshare.org/resource/",
"http://www.hydroshare.org/resource/",
],
"django_irods": "https://www.hydroshare.org/django_irods/download/bags/",
},
"resource": "123456789",
}
with TemporaryDirectory() as d:
output = []
for l in hydro.fetch(spec, d):
output.append(l)
unpacked_files = set(os.listdir(d))
expected = set(["some-other-file.txt", "some-file.txt"])
assert expected == unpacked_files
def test_fetch_bag_failure():
with hydroshare_archive():
with patch.object(
Hydroshare, "urlopen", side_effect=[MockResponse("application/html", 500)]
):
hydro = Hydroshare()
spec = {
"host": {
"hostname": [
"https://www.hydroshare.org/resource/",
"http://www.hydroshare.org/resource/",
],
"django_irods": "https://www.hydroshare.org/django_irods/download/bags/",
},
"resource": "123456789",
}
with TemporaryDirectory() as d:
with pytest.raises(
ContentProviderException,
match=r"Failed to download bag\. status code 500\.",
):
# loop for yield statements
for l in hydro.fetch(spec, d):
pass
def test_fetch_bag_timeout():
with hydroshare_archive():
with patch.object(
Hydroshare, "urlopen", side_effect=[MockResponse("application/html", 200)]
):
hydro = Hydroshare()
spec = {
"host": {
"hostname": [
"https://www.hydroshare.org/resource/",
"http://www.hydroshare.org/resource/",
],
"django_irods": "https://www.hydroshare.org/django_irods/download/bags/",
},
"resource": "123456789",
}
with TemporaryDirectory() as d:
with pytest.raises(
ContentProviderException,
match=r"Bag taking too long to prepare, exiting now, try again later\.",
):
# loop for yield statements
for l in hydro.fetch(spec, d, timeout=0):
pass
|
igorkatinas/jupyter
|
tests/unit/contentproviders/test_hydroshare.py
|
test_hydroshare.py
|
py
| 7,638 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12902368672
|
#!/usr/bin/python3
import sqlite3
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
dbfile = 'TimeTrack4237.db'
dbconn = sqlite3.connect(dbfile)
student_hours = None
with dbconn:
dbcursor = dbconn.cursor()
dbcursor.execute("SELECT name, SUM( ROUND( CAST( (JULIANDAY(checkout) - JULIANDAY(checkin)) * 24 AS REAL), 2)) \
FROM activity, students \
WHERE activity.id = students.id \
AND checkin IS NOT NULL \
AND checkout IS NOT NULL \
GROUP BY name \
ORDER BY name")
student_hours = dbcursor.fetchall()
if student_hours is not None:
credentials = ServiceAccountCredentials.from_json_keyfile_name('credentials/timetrack4237-12f97a6ef02f.json', scope)
gc = gspread.authorize(credentials)
workbook = gc.open("TimeTrack4237")
workbook.sheet1.clear()
workbook.values_update(
'Sheet1!A1',
params={'valueInputOption': 'RAW'},
body={'values': student_hours}
)
|
washide/TimeTrack4237
|
UploadTotalHours.py
|
UploadTotalHours.py
|
py
| 1,173 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28041597167
|
import unittest
import os
from conans.test.utils.test_files import temp_folder
from conans.util.files import save
from time import sleep
class SaveTestCase(unittest.TestCase):
def setUp(self):
folder = temp_folder()
self.filepath = os.path.join(folder, "file.txt")
# Save some content and keep timestamp
self.content = "my content"
save(self.filepath, self.content)
self.timestamp = os.path.getmtime(self.filepath)
sleep(1) # precission is seconds, so we need to sleep
def only_if_modified_true_test(self):
save(self.filepath, self.content, only_if_modified=True)
self.assertEqual(self.timestamp, os.path.getmtime(self.filepath))
def only_if_modified_false_test(self):
save(self.filepath, self.content, only_if_modified=False)
self.assertNotEqual(self.timestamp, os.path.getmtime(self.filepath))
def modified_only_true_test(self):
save(self.filepath, "other content", only_if_modified=True)
self.assertNotEqual(self.timestamp, os.path.getmtime(self.filepath))
def modified_only_false_test(self):
save(self.filepath, "other content", only_if_modified=False)
self.assertNotEqual(self.timestamp, os.path.getmtime(self.filepath))
|
pianoslum/conan
|
conans/test/util/files_test.py
|
files_test.py
|
py
| 1,276 |
python
|
en
|
code
| null |
github-code
|
6
|
73652360829
|
# 给你一个长度固定的整数数组 arr,请你将该数组中出现的每个零都复写一遍,并将其余的元素向右平移。
# 注意:请不要在超过该数组长度的位置写入元素。
# 要求:请对输入的数组 就地 进行上述修改,不要从函数返回任何东西。
class Solution(object):
def duplicateZeros(self, arr):
"""
:type arr: List[int]
:rtype: None Do not return anything, modify arr in-place instead.
"""
i = 0
while(i < len(arr)):
if arr[i] == 0:
if i != len(arr) - 1:
for j in range(len(arr) - 1, i + 1, -1):
arr[j] = arr[j - 1]
arr[i + 1] = 0
i += 1
i += 1
return arr
arr = [1,0,2,3,0,4,5,0]
a = Solution()
print(a.duplicateZeros(arr))
|
xxxxlc/leetcode
|
array/duplicateZeros.py
|
duplicateZeros.py
|
py
| 912 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
14374651405
|
"""Bridgy App Engine config.
"""
import logging
class StubsFilter(logging.Filter):
"""Suppress these INFO logs:
Sandbox prevented access to file "/usr/local/Caskroom/google-cloud-sdk"
If it is a static file, check that `application_readable: true` is set in your app.yaml
"""
def filter(self, record):
msg = record.getMessage()
if (msg.startswith('Sandbox prevented access to file')
or msg.startswith('If it is a static file, check that')):
return 0
return 1
logging.getLogger().addFilter(StubsFilter())
|
snarfed/bridgy-fed
|
appengine_config.py
|
appengine_config.py
|
py
| 580 |
python
|
en
|
code
| 219 |
github-code
|
6
|
17707768416
|
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, concatenate, Dropout
from model.create_layers import create_conv_layers
class Decoder:
def __init__(self, inputs, conv_layers, output_channels, dropout=0.3, name="Decoder"):
self.inputs = inputs
self.dropout = dropout
self.name = name
feature1, feature2, feature3, feature4 = conv_layers
block1 = self._create_block(inputs=inputs, conv_output=feature4, filters=[512, 512], kernel_size=(3, 3),
strides=(2, 2), dropout=0.3)
block2 = self._create_block(inputs=block1, conv_output=feature3, filters=[256, 256], kernel_size=(3, 3),
strides=(2, 2), dropout=0.3)
block3 = self._create_block(inputs=block2, conv_output=feature2, filters=[128, 128], kernel_size=(3, 3),
strides=(2, 2), dropout=0.3)
block4 = self._create_block(inputs=block3, conv_output=feature1, filters=[64, 64], kernel_size=(3, 3),
strides=(2, 2), dropout=0.3)
self.outputs = Conv2D(filters=output_channels, kernel_size=(1, 1), activation='softmax')(block4)
def _create_block(self, inputs, conv_output, filters, kernel_size, strides, dropout):
conv_t = Conv2DTranspose(filters[0], kernel_size, strides=strides, padding='same')(inputs)
output = concatenate([conv_t, conv_output])
output = Dropout(dropout)(output)
output = create_conv_layers(output, filters, kernel_size=(3, 3))
return output
def get_output(self):
return self.outputs
|
amahiner7/UNet_oxford_iiit_pet-Tensorflow
|
model/decoder.py
|
decoder.py
|
py
| 1,643 |
python
|
en
|
code
| 0 |
github-code
|
6
|
650276737
|
#! /bin/python
# IMPORTANT do threadctl import first (before numpy imports)
from threadpoolctl import threadpool_limits
import os
import sys
import json
import luigi
import nifty.tools as nt
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
from elf.io.label_multiset_wrapper import LabelMultisetWrapper
from elf.label_multiset import create_multiset_from_labels, serialize_multiset
class CreateMultisetBase(luigi.Task):
""" CreateMultiset base class
"""
task_name = 'create_multiset'
src_file = os.path.abspath(__file__)
allow_retry = False
# input and output volumes
input_path = luigi.Parameter()
input_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
# dependency
dependency = luigi.TaskParameter()
def requires(self):
return self.dependency
@staticmethod
def default_task_config():
config = LocalTask.default_task_config()
config.update({'compression': 'gzip'})
return config
def run_impl(self):
# get the global config and init configs
shebang, block_shape, roi_begin, roi_end = self.global_config_values()
self.init(shebang)
# get shape and make block config
shape = vu.get_shape(self.input_path, self.input_key)
# load the create_multiset config
config = self.get_task_config()
compression = config.get('compression', 'gzip')
# require output dataset
with vu.file_reader(self.output_path) as f:
f.require_dataset(self.output_key, shape=shape, chunks=tuple(block_shape),
compression=compression, dtype='uint8')
# update the config with input and output paths and keys
# as well as block shape
config.update({'input_path': self.input_path, 'input_key': self.input_key,
'output_path': self.output_path, 'output_key': self.output_key,
'block_shape': block_shape})
block_list = vu.blocks_in_volume(shape, block_shape, roi_begin, roi_end)
self._write_log('scheduling %i blocks to be processed' % len(block_list))
n_jobs = min(len(block_list), self.max_jobs)
# prime and run the jobs
self.prepare_jobs(n_jobs, block_list, config)
self.submit_jobs(n_jobs)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs)
class CreateMultisetLocal(CreateMultisetBase, LocalTask):
"""
CreateMultiset on local machine
"""
pass
class CreateMultisetSlurm(CreateMultisetBase, SlurmTask):
"""
CreateMultiset on slurm cluster
"""
pass
class CreateMultisetLSF(CreateMultisetBase, LSFTask):
"""
CreateMultiset on lsf cluster
"""
pass
#
# Implementation
#
@threadpool_limits.wrap(limits=1) # restrict the numpy threadpool to 1 to avoid oversubscription
def _create_multiset_block(blocking, block_id, ds_in, ds_out):
fu.log("start processing block %i" % block_id)
block = blocking.getBlock(block_id)
bb = vu.block_to_bb(block)
labels = ds_in[bb]
# we can't encode the paintra ignore label
paintera_ignore_label = 18446744073709551615
pignore_mask = labels == paintera_ignore_label
if pignore_mask.sum() > 0:
labels[pignore_mask] = 0
if labels.sum() == 0:
fu.log("block %i is empty" % block_id)
fu.log_block_success(block_id)
return
# compute multiset from input labels
multiset = create_multiset_from_labels(labels)
ser = serialize_multiset(multiset)
chunk_id = tuple(bs // ch for bs, ch in zip(block.begin, ds_out.chunks))
ds_out.write_chunk(chunk_id, ser, True)
fu.log_block_success(block_id)
def write_metadata(ds_out, max_id):
attrs = ds_out.attrs
attrs['maxId'] = max_id
attrs['isLabelMultiset'] = True
@threadpool_limits.wrap(limits=1) # restrict the numpy threadpool to 1 to avoid oversubscription
def create_multiset(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
with open(config_path, 'r') as f:
config = json.load(f)
# read the input cofig
input_path = config['input_path']
input_key = config['input_key']
block_shape = list(config['block_shape'])
block_list = config['block_list']
# read the output config
output_path = config['output_path']
output_key = config['output_key']
shape = list(vu.get_shape(output_path, output_key))
# get the blocking
blocking = nt.blocking([0, 0, 0], shape, block_shape)
# submit blocks
with vu.file_reader(input_path, 'r') as f_in, vu.file_reader(output_path) as f_out:
ds_in = f_in[input_key]
if ds_in.attrs.get('isLabelMultiset', False):
ds_in = LabelMultisetWrapper(ds_in)
ds_out = f_out[output_key]
for block_id in block_list:
_create_multiset_block(blocking, block_id, ds_in, ds_out)
if job_id == 0:
max_id = ds_in.attrs['maxId']
write_metadata(ds_out, max_id)
# log success
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
create_multiset(job_id, path)
|
constantinpape/cluster_tools
|
cluster_tools/label_multisets/create_multiset.py
|
create_multiset.py
|
py
| 5,506 |
python
|
en
|
code
| 32 |
github-code
|
6
|
3026246186
|
# -*- coding: utf-8
from woo import utils,pack,export,qt
import gts,os
def Plane(v1,v2,v3,v4):
pts = [ [Vector3(v1),Vector3(v2),Vector3(v3),Vector3(v4)] ]
return pack.sweptPolylines2gtsSurface(pts,capStart=True,capEnd=True)
# Parameters
tc=0.001# collision time
en=0.3 # normal restitution coefficient
es=0.3 # tangential restitution coefficient
frictionAngle=radians(35)#
density=2700
kw=utils.getViscoelasticFromSpheresInteraction(10e3,tc,en,es)
params=utils.getViscoelasticFromSpheresInteraction(10e3,tc,en,es)
# facets material
facetMat=O.materials.append(ViscElMat(frictionAngle=frictionAngle,**params))
# default spheres material
dfltSpheresMat=O.materials.append(ViscElMat(density=density,frictionAngle=frictionAngle))
O.dt=.2*tc # time step
Rs=0.02 # mean particle radius
Rf=0.01 # dispersion (Rs±Rf*Rs)
nSpheres=1000# number of particles
# Create geometry
pln=Plane( (-.5, -.5, 0), (.5, -.5, -.05), (.5, .5, 0), (-.5, .5, -.05) );
plnIds=O.bodies.append(pack.gtsSurface2Facets(pln.faces(),material=facetMat,color=(0,1,0)))
fct=Plane( (-.25, -.25, .5), (.25, -.25, .5), (.25, .25, .5), (-.25, .25, .5) );
fctIds=O.bodies.append(pack.gtsSurface2Facets(fct.faces(),material=facetMat,color=(1,0,0),noBound=True))
# Create spheres
sp=pack.SpherePack();
sp.makeCloud(Vector3(-.5, -.5, 0),Vector3(.5, .5, .2), Rs, Rf, int(nSpheres), False)
spheres=O.bodies.append([utils.sphere(s[0],s[1],color=(0.929,0.412,0.412),material=dfltSpheresMat) for s in sp])
for id in spheres:
s=O.bodies[id]
p=utils.getViscoelasticFromSpheresInteraction(s.state['mass'],tc,en,es)
s.mat['kn'],s.mat['cn'],s.mat['ks'],s.mat['cs']=p['kn'],p['cn'],p['ks'],p['cs']
# Create engines
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Facet_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(), Ig2_Facet_Sphere_ScGeom()],
[Ip2_ViscElMat_ViscElMat_ViscElPhys()],
[Law2_ScGeom_ViscElPhys_Basic()],
),
GravityEngine(gravity=[0,0,-9.81]),
NewtonIntegrator(damping=0),
ResetRandomPosition(factoryFacets=fctIds,velocity=(0,0,-2),virtPeriod=0.01,ids=spheres,point=(0,0,-.5),normal=(0,0,1),maxAttempts=100),
]
#renderer = qt.Renderer()
#qt.View()
#O.saveTmp()
#O.run()
|
Azeko2xo/woodem
|
scripts/test-OLD/ResetRandomPosition.py
|
ResetRandomPosition.py
|
py
| 2,200 |
python
|
en
|
code
| 2 |
github-code
|
6
|
19678508262
|
from os import path
import os
from .core import ZephyrBinaryRunner, get_env_or_bail
DEFAULT_PYOCD_GDB_PORT = 3333
class PyOcdBinaryRunner(ZephyrBinaryRunner):
'''Runner front-end for pyocd-flashtool.'''
def __init__(self, target, flashtool='pyocd-flashtool',
gdb=None, gdbserver='pyocd-gdbserver',
gdb_port=DEFAULT_PYOCD_GDB_PORT, tui=None,
bin_name=None, elf_name=None,
board_id=None, daparg=None, debug=False):
super(PyOcdBinaryRunner, self).__init__(debug=debug)
self.target_args = ['-t', target]
self.flashtool = flashtool
self.gdb_cmd = [gdb] if gdb is not None else None
self.gdbserver = gdbserver
self.gdb_port = gdb_port
self.tui_args = [tui] if tui is not None else []
self.bin_name = bin_name
self.elf_name = elf_name
board_args = []
if board_id is not None:
board_args = ['-b', board_id]
self.board_args = board_args
daparg_args = []
if daparg is not None:
daparg_args = ['-da', daparg]
self.daparg_args = daparg_args
def replaces_shell_script(shell_script, command):
return (command in {'flash', 'debug', 'debugserver'} and
shell_script == 'pyocd.sh')
def port_args(self):
return ['-p', str(self.gdb_port)]
def create_from_env(command, debug):
'''Create runner from environment.
Required:
- PYOCD_TARGET: target override
Optional:
- PYOCD_DAPARG: arguments to pass to pyocd tool, default is none
- PYOCD_BOARD_ID: ID of board to flash, default is to prompt
Required for 'flash':
- O: build output directory
- KERNEL_BIN_NAME: name of kernel binary
Optional for 'flash':
- PYOCD_FLASHTOOL: flash tool path, defaults to pyocd-flashtool
Required for 'debug':
- O: build output directory
- KERNEL_ELF_NAME
- GDB: gdb executable
Optional for 'debug', 'debugserver':
- TUI: one additional argument to GDB (e.g. -tui)
- GDB_PORT: pyocd gdb port, defaults to 3333
- PYOCD_GDBSERVER: gdb server executable, defaults to pyocd-gdbserver
'''
target = get_env_or_bail('PYOCD_TARGET')
o = os.environ.get('O', None)
bin_ = os.environ.get('KERNEL_BIN_NAME', None)
elf = os.environ.get('KERNEL_ELF_NAME', None)
bin_name = None
elf_name = None
if o is not None:
if bin_ is not None:
bin_name = path.join(o, bin_)
if elf is not None:
elf_name = path.join(o, elf)
flashtool = os.environ.get('PYOCD_FLASHTOOL', 'pyocd-flashtool')
board_id = os.environ.get('PYOCD_BOARD_ID', None)
daparg = os.environ.get('PYOCD_DAPARG', None)
gdb = os.environ.get('GDB', None)
gdbserver = os.environ.get('PYOCD_GDBSERVER', 'pyocd-gdbserver')
gdb_port = os.environ.get('GDB_PORT', DEFAULT_PYOCD_GDB_PORT)
tui = os.environ.get('TUI', None)
return PyOcdBinaryRunner(target, flashtool=flashtool, gdb=gdb,
gdbserver=gdbserver, gdb_port=gdb_port,
tui=tui, bin_name=bin_name, elf_name=elf_name,
board_id=board_id, daparg=daparg, debug=debug)
def run(self, command, **kwargs):
if command not in {'flash', 'debug', 'debugserver'}:
raise ValueError('{} is not supported'.format(command))
if command == 'flash':
self.flash(**kwargs)
else:
self.debug_debugserver(command, **kwargs)
def flash(self, **kwargs):
if self.bin_name is None:
raise ValueError('Cannot flash; bin_name is missing')
cmd = ([self.flashtool] +
self.daparg_args +
self.target_args +
self.board_args +
[self.bin_name])
print('Flashing Target Device')
self.check_call(cmd)
def print_gdbserver_message(self):
print('pyOCD GDB server running on port {}'.format(self.gdb_port))
def debug_debugserver(self, command, **kwargs):
server_cmd = ([self.gdbserver] +
self.daparg_args +
self.port_args() +
self.target_args +
self.board_args)
if command == 'debugserver':
self.print_gdbserver_message()
self.check_call(server_cmd)
else:
if self.gdb_cmd is None:
raise ValueError('Cannot debug; gdb is missing')
if self.elf_name is None:
raise ValueError('Cannot debug; elf is missing')
client_cmd = (self.gdb_cmd +
self.tui_args +
[self.elf_name] +
['-ex', 'target remote :{}'.format(self.gdb_port),
'-ex', 'load',
'-ex', 'monitor reset halt'])
self.print_gdbserver_message()
self.run_server_and_client(server_cmd, client_cmd)
|
rogerioprando/zephyr
|
scripts/support/runner/pyocd.py
|
pyocd.py
|
py
| 5,225 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19400189989
|
from typing import List
import random
# 398. 随机数索引
# https://leetcode-cn.com/problems/random-pick-index/
# 蓄水池抽样
class Solution:
def __init__(self, nums: List[int]):
self.nums = nums
def pick(self, target: int) -> int:
ans = -1
k = 1
for i, each in enumerate(self.nums):
if each == target:
rand = random.randint(1, k)
if rand == 1:
# print('hit')
ans = i
k += 1
return ans
nums = [1, 2, 3, 3, 3]
# Your Solution object will be instantiated and called as such:
obj = Solution(nums)
param_1 = obj.pick(3)
print(param_1)
|
Yigang0622/LeetCode
|
randomNumIndexing.py
|
randomNumIndexing.py
|
py
| 693 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25353649574
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 19 12:30:02 2021
@author: Nassim
"""
import os
from tkinter import *
from tkinter import filedialog, ttk
import numpy as np
import esat
def main():
root = Tk()
root.title("ESAT")
root.configure(bg="lightsteelblue")
root.geometry("800x400")
main_folder = filedialog.askdirectory()
welcome_label = Label(
root, text="Welcome!", font=("Helvetica", 12), bg="lightsteelblue"
).grid(column=1, row=0)
starts = []
stops = []
folders = [folder for folder in os.listdir(main_folder)]
input_frame = Frame(root, bg="lightsteelblue")
input_frame.grid(column=0, row=1)
head_label = Label(input_frame, text="Filenames", bg="lightsteelblue")
head_label.grid(column=0, row=2, columnspan=3)
start_label = Label(input_frame, text="Time of first [h]", bg="lightsteelblue")
start_label.grid(column=2, row=3)
stop_label = Label(input_frame, text="Time of last [h]", bg="lightsteelblue")
stop_label.grid(column=3, row=3)
for index, folder in enumerate(folders):
row = index + 5
fold_label = Label(input_frame, text=folder, bg="lightsteelblue", width=5)
fold_label.grid(column=1, row=row)
start_entry = Entry(input_frame, borderwidth=3)
start_entry.grid(column=2, row=row)
starts.append(start_entry)
stop_entry = Entry(input_frame, borderwidth=3)
stop_entry.grid(column=3, row=row)
stops.append(stop_entry)
grow_entry = Entry(input_frame, borderwidth=3)
grow_entry.grid(column=2, row=row + 2)
grow_label = Label(input_frame, text="upper limit [%]", bg="lightsteelblue")
grow_label.grid(column=1, row=row + 2)
shrink_entry = Entry(input_frame, borderwidth=3)
shrink_entry.grid(column=2, row=row + 3)
shrink_label = Label(input_frame, text="lower limit [%]", bg="lightsteelblue")
shrink_label.grid(column=1, row=row + 3)
def init():
import imp
imp.reload(esat)
print("reloaded esat")
t_start = []
t_end = []
if len(folders) == 0:
start = float(starts[0].get())
stop = float(stops[0].get())
t_start.append(start)
t_end.append(stop)
else:
for i in range(len(folders)):
t_start.append(float(starts[i].get()))
t_end.append(float(stops[i].get()))
times = [t_start, t_end]
try:
growth = float(grow_entry.get())
shrink = float(shrink_entry.get())
esat.run(
main_folder, times, growth=growth, shrink=shrink
)
except ValueError:
esat.run(main_folder, times)
run_but = Button(input_frame, text="Run analysis", borderwidth=3, command=init)
run_but.grid(column=3, row=row + 1)
def restart():
new_main = filedialog.askdirectory()
if new_main == main_folder:
pass
else:
for widget in input_frame.winfo_children():
widget.destroy()
main()
def help_text():
help_frame = Frame(root, height=800, width=400)
help_frame.grid(column=0, row=0)
help_label = Label(
help_frame,
text="""
Welcome to this Embryonic Structure Analysis Tool, or ESAT for short.
To help you with this application, here are some basic instructions:
With the button \'Open files\', under the Files menu, you can browse
through your computer to select the folder where the pictures you want
to be analysed are saved.
Please note, for the analysis to work properly it is essential that you
select a folder which contains a (or multiple) folder(s) which contains
the images to be analysed. For example, we have folder "Embryos" which
contains 3 folders "embryo_1", "embryo_2" and ""embryo_3",
each of these folders contains a certain amount of pictures.
We now can select the "Embryos" main folder to run the analysis
on all 3 embryos seperately.
In the box \'Time of first\', fill in the number situated on
the bottom right corner of the first picture. And in the entry box
\'Time of last\', fill in the number situated on the bottom right corner
of the last picture.
Note: Please use points (.) to separate decimal numbers, not comma's (,).
Furthermore the prefered maximum growth and collaps of the embryo are
adjustable in the entry boxes below the weight entry box.
The growth limits the percentual difference between 2 data points,
e.g. the percentual growth between two data points will maximally be 'upper limit',
The default value is 20%.
The results are automatically exported in the same location as the main folder
in the form of an excel table with 2 sheets,
the first sheet gives a column of all measured surface aresas of all folders,
the second contains the slopes of each folder.
Last a folder with the name [selected_folder]_analysedis added to the directory,
which will contain a folder per embryo holding all of the images with the detected circles drawn in them,
and a folder containing the plots of all embryos.
""",
font=("helvetica", 9),
bg="lightsteelblue",
).grid(column=1, row=0)
back_button = Button(
help_frame,
text="Back to analysis",
bg="lightsteelblue",
command=help_frame.destroy,
)
back_button.grid(column=0, row=0)
root.option_add("*tearOff", FALSE)
menu = Menu(root)
root.config(menu=menu)
subfile = Menu(menu)
menu.add_cascade(menu=subfile, label="Folders")
subfile.add_command(label="open", command=restart)
# subfile.add_command(label='close', command = restart_program)
# optie: change file als blijkt dat cirkel niet goed is
# sub-dropdown-menu
subhelp = Menu(menu)
menu.add_cascade(menu=subhelp, label="Help")
subhelp.add_command(label="Help", command=help_text)
# helpfunctie koppelen aan dit submenu
root.mainloop()
def gridit(item, row, column):
item.grid(column=column, row=row)
main()
|
NassimOumessoud/esat
|
scripts/main.py
|
main.py
|
py
| 6,588 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24826877634
|
import numpy as np
from timeit import default_timer as timer
import utils
from utils import *
from PyCuNN import *
from scipy.spatial.distance import euclidean as euc
import pickle
class rnn(object):
def __init__(self, layers):
super(rnn, self).__init__()
self.layers = layers
self.w1 = init_weights([self.layers[0],self.layers[1]])
self.b1 = init_weights([1,self.layers[1]])
self.w2 = init_weights([self.layers[1],self.layers[2]])
self.b2 = init_weights([1,self.layers[2]])
self.wr = init_weights([self.layers[1],self.layers[1]])
self.br = init_weights([1,self.layers[1]])
self.gw1 = zeros([self.layers[0],self.layers[1]])
self.gw2 = zeros([self.layers[1],self.layers[2]])
self.gwr = zeros([self.layers[1],self.layers[1]])
self.gb1 = zeros([1,self.layers[1]])
self.gb2 = zeros([1,self.layers[2]])
self.gbr = zeros([1,self.layers[1]])
self.gOutput = zeros([1,self.layers[2]])
self.gInput = zeros([1,self.layers[2]])
self.gRecurrent = zeros([1,self.layers[1]])
self.delta = zeros([1,self.layers[1]])
self.updates_tm1 = [self.gw2,self.gb2,self.gw1,self.gb1,self.gwr,self.gbr]
self.output = zeros([1,self.layers[2]])
self.outputs = []
self.h = zeros([1,self.layers[1]])
self.r = zeros([1,self.layers[1]])
self.hs = [zeros([1,self.layers[1]])]
self.inputs=[]
self.lr = 0.01
def forward(self,x):
assert x.shape[1] == self.layers[0]
fp(x,self.w1,self.b1,self.h)
fp(self.hs[-1],self.wr,self.br,self.r)
mmadd(self.h,self.r,self.h)
mtanh(self.h,self.h)
fp(self.h,self.w2,self.b2,self.output)
mtanh(self.output,self.output)
self.inputs.append(mcopy(x))
self.hs.append(mcopy(self.h))
self.outputs.append(mcopy(self.output))
return self.output
def bptt(self,t):
for q in range(len(t)-2):
assert t[-1].shape[1] == self.layers[2]
mmsubtract(t[-1],self.outputs[-1],self.gOutput)
msmult(self.gOutput,-1.,self.gOutput)
bp(self.gOutput,self.w2,self.gw2,self.gb2,self.hs[-1],self.delta)
mmadd(self.delta,self.gRecurrent,self.delta)
mtanh_deriv(self.delta,self.hs[-1],self.delta)
bp(self.delta,self.wr,self.gwr,self.gbr,self.hs[-2],self.gRecurrent)
bp(self.delta,self.w1,self.gw1,self.gb1,self.inputs[-1],self.gInput)
#print(np.argmax(asarray(self.inputs[-1])))
self.hs.pop()
self.inputs.pop()
self.outputs.pop()
t.pop()
def updateWeights(self):
mclip(self.gwr)
mclip(self.gw2)
mclip(self.gw1)
update_weights(self.w2,self.gw2,self.lr)
update_weights(self.b2,self.gb2,self.lr)
update_weights(self.w1,self.gw1,self.lr)
update_weights(self.b1,self.gb1,self.lr)
update_weights(self.wr,self.gwr,self.lr)
update_weights(self.br,self.gbr,self.lr)
#print(asarray(self.wr))
self.forget()
def train(self,ds,epochs,lr=0.001,decay=0.99):
#assert ds_x.shape[0] is ds_t.shape[0], "Size Mismatch: Ensure number of examples in input and target datasets is equal"
self.lr = lr
acc = 0
w = 0
time = 0.
wps = 0.
for epoch in range(epochs):
start = timer()
correct = 0
count = 0
for seq in range(len(ds)-1):
x = ds[seq]
targets = []
st = timer()
for t in range(len(x)-1):
count += 1
w += 1
inval = encode(x[t])
#print('inval',asarray(inval))
tarval = encode(x[t+1])
#print('tarval',asarray(tarval))
self.forward(inval)
#print('output',asarray(self.output))
targets.append(mcopy(tarval))
if most_similar(self.outputs[-1]) == x[t+1]:
#print('correct')
correct += 1
#print(targets)
acc = float(correct)/float(count)
self.bptt(targets)
#print('output',asarray(self.outputs[-1]))
#print(asarray(self.outputs[-1]))
#print('Outputs:',decode(self.outputs[-2]),decode(self.outputs[-1]),'Input',x[-2],'Target',decode(x[-1]))
#print('gw2',self.gw2.asarray(),'gb2',self.gb2.asarray(),'iifog',cm.sum(self.hidden_layer.gi_IFOG,axis=1).sum(axis=0).asarray(),'hifog',self.hidden_layer.hm1_IFOG.asarray())
self.updateWeights()
time += timer()-st
wps = float(w)/time
#print('wps:',wps,"eta:",(float(utils.total)/wps)/60,'min')
#if (seq % 100 == 0) and (self.lr > 0.005):
#self.lr = self.lr * decay
time = timer() - start
sent = [ds[10][0]]
for i in range(15):
x = encode(sent[-1])
y = self.forward(x)
sent.append(most_similar(y))
self.forget()
print('Trained Epoch:',epoch+1,"With Accuracy:",acc, 'in', time, 'seconds', 'Learning Rate:',self.lr, 'wps',wps)
print('Generated Sentence:',sent)
def reset_grads(self):
mzero(self.gw1)
mzero(self.gw2)
mzero(self.gwr)
mzero(self.gb1)
mzero(self.gb2)
mzero(self.gbr)
mzero(self.gOutput)
mzero(self.gInput)
mzero(self.gRecurrent)
mzero(self.delta)
self.updates_tm1 = [self.gw2,self.gb2,self.gw1,self.gb1,self.gwr,self.gbr]
def reset_activations(self):
mzero(self.output)
self.outputs = []
mzero(self.h)
mzero(self.r)
mzero(self.delta)
mzero(self.gRecurrent)
mzero(self.gInput)
self.hs = [zeros([1,self.layers[1]])]
self.inputs=[]
def forget(self):
self.reset_grads()
self.reset_activations()
def test_rnn():
ds = load_sentences_data('../data/ptb.train.short.txt',use_embeddings=True)
net = rnn([500,1000,500])
net.train(ds,150)
|
tylerpayne/PyCuNN
|
nn/rnn.py
|
rnn.py
|
py
| 5,216 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5479249067
|
"""
Proximal Policy Optimization Algorithms (PPO):
https://arxiv.org/pdf/1707.06347.pdf
Related Tricks(May not be useful):
Mastering Complex Control in MOBA Games with Deep Reinforcement Learning (Dual Clip)
https://arxiv.org/pdf/1912.09729.pdf
A Closer Look at Deep Policy Gradients (Value clip, Reward normalizer)
https://openreview.net/pdf?id=ryxdEkHtPS
Revisiting Design Choices in Proximal Policy Optimization
https://arxiv.org/pdf/2009.10897.pdf
Learning Complex Dexterous Manipulation with Deep Reinforcement Learning and Demonstrations (DAPG):
https://arxiv.org/pdf/1709.10087.pdf
"""
from collections import defaultdict
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from maniskill2_learn.env import build_replay
from maniskill2_learn.networks import build_actor_critic, build_model
from maniskill2_learn.utils.torch import build_optimizer
from maniskill2_learn.utils.data import DictArray, GDict, to_np, to_torch
from maniskill2_learn.utils.meta import get_logger, get_world_rank, get_world_size
from maniskill2_learn.utils.torch import BaseAgent, RunningMeanStdTorch, RunningSecondMomentumTorch, barrier, get_flat_grads, get_flat_params, set_flat_grads
from ..builder import MFRL
@MFRL.register_module()
class PPO(BaseAgent):
def __init__(
self,
actor_cfg,
critic_cfg,
env_params,
gamma=0.99,
lmbda=0.95,
max_kl=None,
obs_norm=False,
rew_norm=True,
adv_norm=True,
recompute_value=True,
eps_clip=0.2,
critic_coeff=0.5,
entropy_coeff=0.0,
num_epoch=10,
critic_epoch=-1,
actor_epoch=-1,
num_mini_batch=-1,
critic_warmup_epoch=0,
batch_size=256,
max_grad_norm=0.5,
rms_grad_clip=None,
dual_clip=None,
critic_clip=False,
shared_backbone=False,
detach_actor_feature=True,
debug_grad=False,
demo_replay_cfg=None,
dapg_lambda=0.1,
dapg_damping=0.995,
ignore_dones=True,
visual_state_coeff=-1,
visual_state_mlp_cfg=None,
**kwargs
):
super(PPO, self).__init__()
assert dual_clip is None or dual_clip > 1.0, "Dual-clip PPO parameter should greater than 1.0."
assert max_grad_norm is None or rms_grad_clip is None, "Only one gradient clip mode is allowed!"
assert (
(num_epoch > 0 and (actor_epoch < 0 and critic_epoch < 0)) or (num_epoch < 0 and (actor_epoch > 0 and critic_epoch > 0)),
"We need only one set of the parameters num_epoch > 0, (actor_epoch > 0 and critic_epoch > 0).",
)
if not rew_norm:
assert not critic_clip, "Value clip is available only when `reward_normalization` is True"
actor_cfg = deepcopy(actor_cfg)
critic_cfg = deepcopy(critic_cfg)
actor_optim_cfg = actor_cfg.pop("optim_cfg", None)
critic_optim_cfg = critic_cfg.pop("optim_cfg", None)
obs_shape = env_params["obs_shape"]
self.is_discrete = env_params["is_discrete"]
self.gamma = gamma
self.lmbda = lmbda
self.adv_norm = adv_norm
self.obs_rms = RunningMeanStdTorch(obs_shape, clip_max=10) if obs_norm else None
self.rew_rms = RunningMeanStdTorch(1) if rew_norm else None
self.critic_coeff = critic_coeff
self.entropy_coeff = entropy_coeff
self.eps_clip = eps_clip
self.dual_clip = dual_clip
self.critic_clip = critic_clip
self.max_kl = max_kl
self.recompute_value = recompute_value
self.max_grad_norm = max_grad_norm
self.rms_grad_clip = rms_grad_clip
self.debug_grad = debug_grad
self.num_mini_batch = num_mini_batch
self.batch_size = batch_size # The batch size for policy gradient
self.critic_warmup_epoch = critic_warmup_epoch
self.num_epoch = num_epoch
self.critic_epoch = critic_epoch
self.actor_epoch = actor_epoch
# Use extra state to get better feature
self.regress_visual_state = visual_state_coeff > 0 and visual_state_mlp_cfg is not None and "visual_state" in obs_shape
self.visual_state_coeff = visual_state_coeff
if self.regress_visual_state:
assert shared_backbone, "Only Visuomotor policy supports extra state fitting"
# For DAPG
self.dapg_lambda = nn.Parameter(to_torch(dapg_lambda), requires_grad=False)
self.dapg_damping = dapg_damping
self.demo_replay = build_replay(demo_replay_cfg)
if self.demo_replay is not None:
for key in ['obs', 'actions']:
assert key in self.demo_replay.memory, f"DAPG needs {key} in your demo!"
# For done signal process.
self.ignore_dones = ignore_dones
# Build networks
actor_cfg.update(env_params)
critic_cfg.update(env_params)
self.actor, self.critic = build_actor_critic(actor_cfg, critic_cfg, shared_backbone)
if self.regress_visual_state:
visual_state_mlp_cfg.mlp_spec += [obs_shape["visual_state"]]
self.extra_fit = build_model(visual_state_mlp_cfg)
if rms_grad_clip is not None:
self.grad_rms = RunningSecondMomentumTorch(get_flat_params(self, trainable=True).shape, clip_max=rms_grad_clip)
self.shared_backbone = shared_backbone
self.detach_actor_feature = detach_actor_feature
self.actor_optim = build_optimizer(self.actor, actor_optim_cfg)
self.critic_optim = build_optimizer(self.critic, critic_optim_cfg)
def compute_critic_loss(self, samples):
# For update_actor_critic and update critic
assert isinstance(samples, (dict, GDict))
values = self.critic(
samples["obs"], episode_dones=samples["episode_dones"], save_feature=True
)
feature = self.critic.values[0].backbone.pop_attr("saved_feature")
visual_feature = self.critic.values[0].backbone.pop_attr("saved_visual_feature")
if self.detach_actor_feature and feature is not None:
feature = feature.detach()
if self.critic_clip and isinstance(self.critic_clip, float):
v_clip = samples["old_values"] + (values - samples["old_values"]).clamp(-self.critic_clip, self.critic_clip)
vf1 = (samples["returns"] - values).pow(2)
vf2 = (samples["returns"] - v_clip).pow(2)
critic_loss = torch.max(vf1, vf2)
else:
critic_loss = (samples["returns"] - values).pow(2)
critic_loss = critic_loss.mean() if samples["is_valid"] is None else critic_loss[samples["is_valid"]].mean()
return critic_loss, feature, visual_feature
def update_actor_critic(self, samples, demo_samples=None, with_critic=False):
"""
Returns True if self.max_kl is not None and
policy update causes large kl divergence between new policy and old policy,
in which case we stop the policy update and throw away the current replay buffer
"""
is_valid = samples["is_valid"]
self.actor_optim.zero_grad()
self.critic_optim.zero_grad()
ret = {}
critic_loss, actor_loss, demo_actor_loss, visual_state_loss, entropy_term = [0.0] * 5
feature, visual_feature, critic_loss, policy_std = [None] * 4
if with_critic:
critic_mse, feature, visual_feature = self.compute_critic_loss(samples)
critic_loss = critic_mse * self.critic_coeff
ret["ppo/critic_err"] = critic_mse.item()
# ret['ppo/critic_loss'] = critic_loss.item()
# Run actor forward
alls = self.actor(
samples["obs"],
episode_dones=samples["episode_dones"],
mode="dist" if self.is_discrete else "dist_std",
feature=feature,
save_feature=feature is None,
require_aux_loss=True, # auxiliary backbone self-supervision, e.g. aux_regress in VisuomotorTransformerFrame
)
if isinstance(alls, dict) and 'aux_loss' in alls.keys(): # auxiliary backbone self-supervision, e.g. aux_regress in VisuomotorTransformerFrame
alls, backbone_aux_loss = alls['feat'], alls['aux_loss']
else:
backbone_aux_loss = None
if not self.is_discrete:
new_distributions, policy_std = alls
else:
new_distributions, policy_std = alls, None
del alls
if visual_feature is None:
visual_feature = self.actor.backbone.pop_attr("saved_visual_feature")
# Compute actor loss
dist_entropy = new_distributions.entropy().mean()
recent_log_p = new_distributions.log_prob(samples["actions"])
log_ratio = recent_log_p - samples["old_log_p"]
ratio = log_ratio.exp()
# print("ratio", ratio[:20], flush=True)
# Estimation of KL divergence = p (log p - log q) with method in Schulman blog: http://joschu.net/blog/kl-approx.html
with torch.no_grad():
approx_kl_div = (ratio - 1 - log_ratio).mean().item()
clip_frac = (torch.abs(ratio - 1) > self.eps_clip).float().mean().item()
if policy_std is not None:
ret["ppo/policy_std"] = policy_std.mean().item()
ret["ppo/entropy"] = dist_entropy.item()
ret["ppo/mean_p_ratio"] = ratio.mean().item()
ret["ppo/max_p_ratio"] = ratio.max().item()
ret["ppo/log_p"] = recent_log_p.mean().item()
ret["ppo/clip_frac"] = clip_frac
ret["ppo/approx_kl"] = approx_kl_div
sign = GDict(self.max_kl is not None and approx_kl_div > self.max_kl * 1.5).allreduce(op="BOR", wrapper=False)
if sign:
return True, ret
if ratio.ndim == samples["advantages"].ndim - 1:
ratio = ratio[..., None]
surr1 = ratio * samples["advantages"]
surr2 = ratio.clamp(1 - self.eps_clip, 1 + self.eps_clip) * samples["advantages"]
surr = torch.min(surr1, surr2)
if self.dual_clip:
surr = torch.max(surr, self.dual_clip * samples["advantages"])
actor_loss = -surr[is_valid].mean()
entropy_term = -dist_entropy * self.entropy_coeff
ret["ppo/actor_loss"] = actor_loss.item()
ret["ppo/entropy_loss"] = entropy_term.item()
# DAPG actor loss
if demo_samples is not None:
new_demo_distributions = self.actor(demo_samples["obs"], mode="dist")
nll_loss_demo = -new_demo_distributions.log_prob(demo_samples["actions"]).mean()
demo_actor_loss = nll_loss_demo * self.dapg_lambda
with torch.no_grad():
ret["dapg/demo_nll_loss"] = nll_loss_demo.item()
ret["dapg/demo_actor_loss"] = demo_actor_loss.item()
# State regression loss
if self.regress_visual_state:
assert feature is not None
visual_state_mse = F.mse_loss(self.extra_fit(visual_feature), samples["obs/visual_state"], reduction="none")
visual_state_mse = visual_state_mse[is_valid].mean()
ret["ppo-extra/visual_state_mse"] = visual_state_mse
visual_state_loss = visual_state_mse * self.visual_state_coeff
ret["ppo-extra/visual_state_loss"] = visual_state_loss.item()
# Backbone auxiliary supervision loss
if backbone_aux_loss is not None:
ret["ppo-extra/backbone_auxiliary_loss"] = backbone_aux_loss.item()
loss = actor_loss + entropy_term + critic_loss + visual_state_loss + demo_actor_loss
if backbone_aux_loss is not None:
loss = loss + backbone_aux_loss
loss.backward()
net = self if with_critic else self.actor
ret["grad/grad_norm"] = net.grad_norm
if math.isnan(ret["grad/grad_norm"]):
print("############ Debugging nan grad ############", flush=True)
print("Dist mean", new_distributions.mean, flush=True)
print("Dist std", new_distributions.stddev, flush=True)
print("Samples[actions]", samples["actions"], flush=True)
print("Recent_log_p", recent_log_p, flush=True)
print("Samples[old_log_p]", samples["old_log_p"], flush=True)
for k, v in ret.keys():
print(k, v, flush=True)
if self.shared_backbone:
if getattr(self.actor.backbone, "visual_nn", None) is not None:
ret["grad/visual_grad"] = self.actor.backbone.visual_nn.grad_norm
if getattr(self.actor.backbone, "final_mlp", None) is not None:
ret["grad/actor_mlp_grad"] = self.actor.backbone.final_mlp.grad_norm
elif self.actor.final_mlp is not None:
ret["grad/actor_mlp_grad"] = self.actor.final_mlp.grad_norm
if with_critic:
if getattr(self.critic.values[0].backbone, "final_mlp", None) is not None:
ret["grad/critic_mlp_grad"] = self.critic.values[0].backbone.final_mlp.grad_norm
elif self.critic.values[0].final_mlp is not None:
ret["grad/critic_mlp_grad"] = self.critic.values[0].final_mlp.grad_norm
if self.max_grad_norm is not None:
nn.utils.clip_grad_norm_(net.parameters(), self.max_grad_norm)
elif self.rms_grad_clip is not None:
grads = get_flat_grads(self)
grads = self.grad_rms.add(grads)
set_flat_grads(self, grads)
ret["grad/clipped_grad_norm"] = net.grad_norm
self.actor_optim.step()
if with_critic:
self.critic_optim.step()
return False, ret
def update_critic(self, samples, demo_samples=None):
self.critic_optim.zero_grad()
critic_mse = self.compute_critic_loss(samples)[0]
critic_loss = critic_mse * self.critic_coeff
critic_loss.backward()
ret = {}
ret["grad/grad_norm"] = self.critic.grad_norm
if self.max_grad_norm is not None:
nn.utils.clip_grad_norm_(self.critic.parameters(), self.max_grad_norm)
elif self.rms_grad_clip is not None:
assert False
grads = get_flat_grads(self)
grads = self.grad_rms.add(grads)
set_flat_grads(self, grads)
ret["grad/clipped_grad_norm"] = self.critic.grad_norm
ret["ppo/critic_loss"] = critic_loss.item()
ret["ppo/critic_mse"] = critic_mse.item()
self.critic_optim.step()
return ret
def update_parameters(self, memory, updates, with_v=False):
world_size = get_world_size()
logger = get_logger()
ret = defaultdict(list)
process_batch_size = self.batch_size if GDict(memory["obs"]).is_big else None
if self.num_mini_batch < 0:
max_samples = GDict(len(memory)).allreduce(op="MAX", device=self.device, wrapper=False) if world_size > 1 else len(memory)
num_mini_batch = int((max_samples + self.batch_size - 1) // self.batch_size)
else:
num_mini_batch = self.num_mini_batch
logger.info(f"Number of batches in one PPO epoch: {num_mini_batch}!")
if len(memory) < memory.capacity:
memory["episode_dones"][len(memory) :] = True
# Do transformation for all valid samples
memory["episode_dones"] = (memory["episode_dones"] + memory["is_truncated"]) > 1 - 0.1
if self.has_obs_process:
self.obs_rms.sync()
obs = GDict({"obs": memory["obs"], "next_obs": memory["next_obs"]}).to_torch(device="cpu", wrapper=False)
obs = GDict(self.process_obs(obs, batch_size=process_batch_size)).to_numpy(wrapper=False)
memory.update(obs)
with torch.no_grad():
memory["old_distribution"], memory["old_log_p"] = self.get_dist_with_logp(
obs=memory["obs"], actions=memory["actions"], batch_size=process_batch_size
)
ret["ppo/old_log_p"].append(memory["old_log_p"].mean().item())
demo_memory = self.demo_replay
if demo_memory is not None:
with torch.no_grad():
demo_memory = self.demo_replay.sample(min(len(self.demo_replay), len(memory)))
if self.has_obs_process:
demo_memory = demo_memory.to_torch(device="cpu")
demo_memory = self.process_obs(demo_memory, batch_size=process_batch_size)
demo_memory = demo_memory.to_numpy()
if self.ignore_dones:
demo_memory["dones"] = demo_memory["dones"] * 0
def run_over_buffer(epoch_id, mode="v"):
nonlocal memory, ret, demo_memory, logger
assert mode in ["v", "pi", "v+pi"]
if "v" in mode and (epoch_id == 0 or self.recompute_value):
with self.critic.no_sync():
memory.update(
self.compute_gae(
obs=memory["obs"],
next_obs=memory["next_obs"],
rewards=memory["rewards"],
dones=memory["dones"],
episode_dones=memory["episode_dones"],
update_rms=True,
batch_size=process_batch_size,
ignore_dones=self.ignore_dones,
)
)
if self.adv_norm:
# print(mean_adv, std_adv)
mean_adv = memory["advantages"].mean(0)
std_adv = memory["advantages"].std(0) + 1e-8
mean_adv, std_adv = GDict([mean_adv, std_adv]).allreduce(wrapper=False)
# print(mean_adv, std_adv)
# exit(0)
memory["advantages"] = (memory["advantages"] - mean_adv) / std_adv
ret["ppo/adv_mean"].append(mean_adv.item())
ret["ppo/adv_std"].append(std_adv.item())
ret["ppo/max_normed_adv"].append(np.abs(memory["advantages"]).max().item())
ret["ppo/v_target"].append(memory["returns"].mean().item())
ret["ppo/ori_returns"].append(memory["original_returns"].mean().item())
def run_one_iter(samples, demo_samples):
if "pi" in mode:
flag, infos = self.update_actor_critic(samples, demo_samples, with_critic=(mode == "v+pi"))
for key in infos:
ret[key].append(infos[key])
elif mode == "v":
flag, infos = False, self.update_critic(samples, demo_samples)
for key in infos:
ret[key].append(infos[key])
return flag
for samples in memory.mini_batch_sampler(self.batch_size, drop_last=True, auto_restart=True, max_num_batches=num_mini_batch):
samples = DictArray(samples).to_torch(device=self.device, non_blocking=True)
demo_samples = None
if demo_memory is not None:
indices = np.random.randint(0, high=len(demo_memory), size=self.batch_size)
demo_samples = demo_memory.slice(indices).to_torch(device=self.device, non_blocking=True)
if run_one_iter(samples, demo_samples):
return True
return False
if self.critic_warmup_epoch > 0:
logger.info("**Warming up critic at the beginning of training; this causes reported ETA to be slower than actual ETA**")
for i in range(self.critic_warmup_epoch):
run_over_buffer(i, "v")
if self.num_epoch > 0:
for i in range(self.num_epoch):
num_actor_epoch = i + 1
if run_over_buffer(i, "v+pi"):
break
else:
for i in range(self.critic_epoch):
run_over_buffer(i, "v")
for i in range(self.actor_epoch):
num_actor_epoch = i + 1
if run_over_buffer(i, "pi"):
break
self.critic_warmup_epoch = 0
ret = {key: np.mean(ret[key]) for key in ret}
with torch.no_grad():
ret["param/max_policy_abs"] = torch.max(torch.abs(get_flat_params(self.actor))).item()
ret["param/policy_norm"] = torch.norm(get_flat_params(self.actor)).item()
if isinstance(self.critic, nn.Module):
ret["param/max_critic_abs"] = torch.max(torch.abs(get_flat_params(self.critic))).item()
ret["param/critic_norm"] = torch.norm(get_flat_params(self.critic)).item()
for key in ["old_distribution", "old_log_p", "old_values", "old_next_values", "original_returns", "returns", "advantages"]:
if key in memory.memory:
memory.memory.pop(key)
ret["ppo/num_actor_epoch"] = num_actor_epoch
if self.demo_replay is not None:
# For DAPG
ret["dapg/demo_lambda"] = self.dapg_lambda.item()
self.dapg_lambda *= self.dapg_damping
if with_v:
# For PPG
ret["vf"] = to_np(memory["original_returns"])
# exit(0)
return ret
|
haosulab/ManiSkill2-Learn
|
maniskill2_learn/methods/mfrl/ppo.py
|
ppo.py
|
py
| 21,464 |
python
|
en
|
code
| 53 |
github-code
|
6
|
43724719541
|
from PyQt5.QtCore import QThread, QMutex, pyqtSignal
from binance.client import Client
import pyupbit
import pybithumb
import requests
from bs4 import BeautifulSoup
from debug import debuginfo
class binanceThread(QThread):
binance_data = pyqtSignal(dict)
def __init__(self):
QThread.__init__(self)
self.mutex = QMutex()
self.binance = Client()
self.binanceList = list()
self.exchange_rate = float(1100)
self.isRun = True
def delSymbol(self, symbol):
if symbol+"BTC" in self.binanceList:
self.binanceList.remove(symbol+"BTC")
def _start(self):
self.isRun = True
self.start()
def stop(self):
self.isRun = False
def get_symbol_list(self):
binanceList = list()
try:
for i in self.binance.get_all_tickers():
symbol = i['symbol']
if symbol[-3:] == 'BTC':
binanceList.append(symbol[:-3])
if symbol == 'BTCUSDT':
binanceList.append(symbol[:-4])
except Exception as e:
debuginfo(e)
pass
return binanceList
def save_list(self, list):
for i in list:
if i == 'BTC':
self.binanceList.append('BTCUSDT')
else:
self.binanceList.append(i+'BTC')
def get_dollor(self):
try:
res = requests.get('http://finance.naver.com/')
text = res.text
soup = BeautifulSoup(text, 'html.parser')
td = soup.select_one(
"#content > div.article2 > div.section1 > div.group1 > table > tbody > tr > td")
exchange_rate = ''
for i in td.text:
if i == ',':
pass
else:
exchange_rate += i
self.exchange_rate = float(exchange_rate)
except Exception as e:
debuginfo(e)
def get_prices(self):
prices = dict()
try:
for i in self.binance.get_all_tickers():
prices[i['symbol']] = i['price']
except Exception as e:
debuginfo(e)
pass
return prices
def get_orderbooks(self):
orderbooks = dict()
try:
for i in self.binance.get_orderbook_tickers():
orderbooks[i['symbol']] = dict()
orderbooks[i['symbol']]['bidPrice'] = i['bidPrice']
orderbooks[i['symbol']]['bidQty'] = i['bidQty']
orderbooks[i['symbol']]['askPrice'] = i['askPrice']
orderbooks[i['symbol']]['askQty'] = i['askQty']
except Exception as e:
debuginfo(e)
pass
return orderbooks
def calculate_krw(self, price, BTCUSDT, exchange_rate):
return str(round(float(price) * BTCUSDT * exchange_rate, 2))
def run(self):
while self.isRun:
self.mutex.lock()
binanceDict = dict()
self.get_dollor()
prices = self.get_prices()
orderbooks = self.get_orderbooks()
try:
BTCUSDT = float(prices['BTCUSDT'])
binanceDict['BTC'] = dict()
binanceDict['BTC']['price'] = str(round(BTCUSDT * self.exchange_rate, 2))
binanceDict['BTC']['ask'] = str(
round(float(orderbooks['BTCUSDT']['askPrice']) * self.exchange_rate, 2)) + '/' + str(
round(float(orderbooks['BTCUSDT']['askQty']), 2))
binanceDict['BTC']['bid'] = str(
round(float(orderbooks['BTCUSDT']['bidPrice']) * self.exchange_rate, 2)) + '/' + str(
round(float(orderbooks['BTCUSDT']['bidQty']), 2))
except Exception as e:
debuginfo(e)
for i in self.binanceList:
if i == 'BTCUSDT':
continue
try:
symbol = i.replace('BTC', '')
binanceDict[symbol] = dict()
binanceDict[symbol]['price'] = self.calculate_krw(prices[i], BTCUSDT, self.exchange_rate)
binanceDict[symbol]['ask'] = self.calculate_krw(orderbooks[i]['askPrice'], BTCUSDT, self.exchange_rate) + '/' + str(round(float(orderbooks[i]['askQty']), 2))
binanceDict[symbol]['bid'] = self.calculate_krw(orderbooks[i]['bidPrice'], BTCUSDT, self.exchange_rate) + '/' + str(round(float(orderbooks[i]['bidQty']), 2))
except Exception as e:
debuginfo(e)
pass
self.binance_data.emit(binanceDict)
self.mutex.unlock()
class upbitThread(QThread):
upbit_data = pyqtSignal(dict)
def __init__(self):
QThread.__init__(self)
self.mutex = QMutex()
self.upbit = pyupbit
self.upbitList = list()
self.isRun = True
def delSymbol(self, symbol):
if "KRW-"+symbol in self.upbitList:
self.upbitList.remove("KRW-"+symbol)
def _start(self):
self.isRun = True
self.start()
def stop(self):
self.isRun = False
def get_symbol_list(self):
upbitList = list()
try:
for i in self.upbit.get_tickers(fiat="KRW"):
upbitList.append(i.split('KRW-')[1])
except Exception as e:
debuginfo(e)
pass
return upbitList
def save_list(self, list):
for i in list:
self.upbitList.append('KRW-'+i)
def run(self):
while self.isRun:
self.mutex.lock()
upbitDict = dict()
prices = self.upbit.get_current_price(self.upbitList)
orderbooks = self.upbit.get_orderbook(self.upbitList)
if orderbooks and prices:
for i in orderbooks:
try:
symbol = i['market'].split('-')[1]
orderbook = i['orderbook_units'][0]
ask = str(orderbook['ask_price']) + '/' + str(round(orderbook['ask_size'], 2))
bid = str(orderbook['bid_price']) + '/' + str(round(orderbook['bid_size'], 2))
upbitDict[symbol] = dict()
upbitDict[symbol]['price'] = str(round(prices[i['market']], 2))
upbitDict[symbol]['ask'] = ask
upbitDict[symbol]['bid'] = bid
except Exception as e:
debuginfo(e)
self.upbit_data.emit(upbitDict)
self.mutex.unlock()
class bithumbThread(QThread):
bithumb_data = pyqtSignal(dict)
def __init__(self):
QThread.__init__(self)
self.mutex = QMutex()
self.bithumb = pybithumb.Bithumb
self.bithumbList = list()
self.isRun = True
def delSymbol(self, symbol):
if symbol in self.bithumbList:
self.bithumbList.remove(symbol)
def _start(self):
self.isRun = True
self.start()
def stop(self):
self.isRun = False
def get_symbol_list(self):
bithumbList = list()
try:
bithumbList = self.bithumb.get_tickers()
except Exception as e:
debuginfo(e)
pass
return bithumbList
def save_list(self, list):
self.bithumbList = list
def run(self):
while self.isRun:
self.mutex.lock()
bithumbDict = dict()
prices = self.bithumb.get_current_price('ALL')
orderbooks = self.bithumb.get_orderbook('ALL')
if orderbooks and prices:
orderbooks = orderbooks['data']
for i in self.bithumbList:
try:
price = prices[i]['closing_price']
orderbook = orderbooks[i]
ask = orderbook['asks'][0]['price'] + '/' + str(round(float(orderbook['asks'][0]['quantity']), 2))
bid = orderbook['bids'][0]['price'] + '/' + str(round(float(orderbook['bids'][0]['quantity']), 2))
bithumbDict[i] = dict()
bithumbDict[i]['price'] = price
bithumbDict[i]['ask'] = ask
bithumbDict[i]['bid'] = bid
except Exception as e:
debuginfo(e)
pass
self.bithumb_data.emit(bithumbDict)
self.mutex.unlock()
if __name__ == "__main__":
binance = binanceThread()
upbit = upbitThread()
bithumb = bithumbThread()
binanceList = binance.get_symbol_list()
upbitList = upbit.get_symbol_list()
bithumbList = bithumb.get_symbol_list()
binanceUpbitDuplicate = list()
binanceBithumbDuplicate = list()
upbitBithumbDuplicate = list()
for i in binanceList:
if i in upbitList:
binanceUpbitDuplicate.append(i)
if i in bithumbList:
binanceBithumbDuplicate.append(i)
for i in upbitList:
if i in bithumbList:
upbitBithumbDuplicate.append(i)
newBinanceList = list(set(binanceUpbitDuplicate+binanceBithumbDuplicate))
newUpbitList = list(set(binanceUpbitDuplicate+upbitBithumbDuplicate))
newBithumbList = list(set(binanceBithumbDuplicate+upbitBithumbDuplicate))
binance.save_list(newBinanceList)
upbit.save_list(newUpbitList)
bithumb.save_list(newBithumbList)
|
JunTae90/coin_viewer
|
thread.py
|
thread.py
|
py
| 9,535 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8201566770
|
from typing import Dict
import os
import shutil
from hexlib.db import Table, PersistentState
import pickle
from tesseract import get_tesseract_langs
import sqlite3
from config import LOG_FOLDER, logger
from sist2 import SearchBackendType, Sist2SearchBackend
RUNNING_FRONTENDS: Dict[str, int] = {}
TESSERACT_LANGS = get_tesseract_langs()
DB_SCHEMA_VERSION = "5"
from pydantic import BaseModel
def _serialize(item):
if isinstance(item, BaseModel):
return pickle.dumps(item)
if isinstance(item, bytes):
raise Exception("FIXME: bytes in PickleTable")
return item
def _deserialize(item):
if isinstance(item, bytes):
return pickle.loads(item)
return item
class PickleTable(Table):
def __getitem__(self, item):
row = super().__getitem__(item)
if row:
return dict((k, _deserialize(v)) for k, v in row.items())
return row
def __setitem__(self, key, value):
value = dict((k, _serialize(v)) for k, v in value.items())
super().__setitem__(key, value)
def __iter__(self):
for row in super().__iter__():
yield dict((k, _deserialize(v)) for k, v in row.items())
def sql(self, where_clause, *params):
for row in super().sql(where_clause, *params):
yield dict((k, _deserialize(v)) for k, v in row.items())
def get_log_files_to_remove(db: PersistentState, job_name: str, n: int):
if n < 0:
return []
counter = 0
to_remove = []
for row in db["task_done"].sql("WHERE has_logs=1 ORDER BY started DESC"):
if row["name"].endswith(f"[{job_name}]"):
counter += 1
if counter > n:
to_remove.append(row)
return to_remove
def delete_log_file(db: PersistentState, task_id: str):
db["task_done"][task_id] = {
"has_logs": 0
}
try:
os.remove(os.path.join(LOG_FOLDER, f"sist2-{task_id}.log"))
except:
pass
def migrate_v1_to_v2(db: PersistentState):
shutil.copy(db.dbfile, db.dbfile + "-before-migrate-v2.bak")
# Frontends
db._table_factory = PickleTable
frontends = [row["frontend"] for row in db["frontends"]]
del db["frontends"]
db._table_factory = Table
for frontend in frontends:
db["frontends"][frontend.name] = frontend
list(db["frontends"])
# Jobs
db._table_factory = PickleTable
jobs = [row["job"] for row in db["jobs"]]
del db["jobs"]
db._table_factory = Table
for job in jobs:
db["jobs"][job.name] = job
list(db["jobs"])
db["sist2_admin"]["info"] = {
"version": "2"
}
def create_default_search_backends(db: PersistentState):
es_backend = Sist2SearchBackend.create_default(name="elasticsearch",
backend_type=SearchBackendType("elasticsearch"))
db["search_backends"]["elasticsearch"] = es_backend
sqlite_backend = Sist2SearchBackend.create_default(name="sqlite", backend_type=SearchBackendType("sqlite"))
db["search_backends"]["sqlite"] = sqlite_backend
def migrate_v3_to_v4(db: PersistentState):
shutil.copy(db.dbfile, db.dbfile + "-before-migrate-v4.bak")
create_default_search_backends(db)
try:
conn = sqlite3.connect(db.dbfile)
conn.execute("ALTER TABLE task_done ADD COLUMN has_logs INTEGER DEFAULT 1")
conn.commit()
conn.close()
except Exception as e:
logger.exception(e)
db["sist2_admin"]["info"] = {
"version": "4"
}
|
simon987/sist2
|
sist2-admin/sist2_admin/state.py
|
state.py
|
py
| 3,537 |
python
|
en
|
code
| 652 |
github-code
|
6
|
6806255656
|
"""
Пожалуйста, приступайте к этой задаче после того, как вы сделали и получили ревью ко всем остальным задачам
в этом репозитории. Она значительно сложнее.
Есть набор сообщений из чата в следующем формате:
```
messages = [
{
"id": "efadb781-9b04-4aad-9afe-e79faef8cffb",
"sent_at": datetime.datetime(2022, 10, 11, 23, 11, 11, 721),
"sent_by": 46, # id пользователя-отправителя
"reply_for": "7b22ae19-6c58-443e-b138-e22784878581", # id сообщение, на которое это сообщение является ответом (может быть None)
"seen_by": [26, 91, 71], # идентификаторы пользователей, которые видели это сообщение
"text": "А когда ревью будет?",
}
]
```
Так же есть функция `generate_chat_history`, которая вернёт список из большого количества таких сообщений.
Установите библиотеку lorem, чтобы она работала.
Нужно:
1. Вывести айди пользователя, который написал больше всех сообщений.
2. Вывести айди пользователя, на сообщения которого больше всего отвечали.
3. Вывести айди пользователей, сообщения которых видело больше всего уникальных пользователей.
4. Определить, когда в чате больше всего сообщений: утром (до 12 часов), днём (12-18 часов) или вечером (после 18 часов).
5. Вывести идентификаторы сообщений, который стали началом для самых длинных тредов (цепочек ответов).
Весь код стоит разбить на логические части с помощью функций.
"""
import random
import uuid
import datetime
from pprint import pprint
from collections import defaultdict
import lorem
def generate_chat_history():
messages_amount = random.randint(200, 1000)
users_ids = list(
{random.randint(1, 10000) for _ in range(random.randint(5, 20))}
)
sent_at = datetime.datetime.now() - datetime.timedelta(days=100)
messages = []
for _ in range(messages_amount):
sent_at += datetime.timedelta(minutes=random.randint(0, 240))
messages.append({
"id": uuid.uuid4(),
"sent_at": sent_at,
"sent_by": random.choice(users_ids),
"reply_for": random.choice(
[
None,
(
random.choice([m["id"] for m in messages])
if messages else None
),
],
),
"seen_by": random.sample(users_ids,
random.randint(1, len(users_ids))),
"text": lorem.sentence(),
})
return messages
def find_id_user_most_messages(messages: list) -> int:
messages_per_user = defaultdict(int)
for message in messages:
messages_per_user[message['sent_by']] += 1
max_messages = 0
user_ids_with_max_messages = 0
for key, value in messages_per_user.items():
if value > max_messages:
max_messages = value
user_ids_with_max_messages = [key]
elif value == max_messages:
user_ids_with_max_messages.append(key)
return user_ids_with_max_messages
def find_id_user_most_messages_replies(messages: list) -> int:
replies_per_message = defaultdict(int)
for message in messages:
if message['reply_for'] is None:
continue
replies_per_message[message['reply_for']] += 1
most_replied_messages = set()
most_replied_count = 0
for key, value in replies_per_message.items():
if value > most_replied_count:
most_replied_count = value
most_replied_messages = {key}
elif value == most_replied_count:
most_replied_messages.add(key)
most_replied_users = []
for message in messages:
if message['id'] in most_replied_messages:
most_replied_users.append(message['sent_by'])
return most_replied_users
def find_id_user_most_see_messages(messages: list) -> list:
users = defaultdict(set)
for message in messages:
if users.get(message['sent_by']) is None:
users[message['sent_by']] = set(message['seen_by'])
else:
users[message['sent_by']] = users[message['sent_by']].union(message['seen_by'])
most_see_message_user = []
max_len_seen_by = 0
for key, value in users.items():
if len(value) > max_len_seen_by:
most_see_message_user = []
max_len_seen_by = len(value)
most_see_message_user.append(key)
elif len(value) == max_len_seen_by:
most_see_message_user.append(key)
return most_see_message_user
def when_most_messages(messages: list) -> str:
count_morning = 0
count_day = 0
count_evening = 0
for message in messages:
time = message['sent_at']
time = time.time()
if datetime.time(0, 0, 0) < time < datetime.time(12, 0, 0):
count_morning += 1
elif datetime.time(12, 0, 0) <= time <= datetime.time(18, 0, 0):
count_day += 1
else:
count_evening += 1
if count_morning > count_day and count_morning > count_evening:
return 'Утром'
elif count_day > count_evening:
return 'Днем'
else:
return 'Вечером'
# вспомогательная функция для нахождения id сообщения отправителя
def find_id_message(messages: list, id_message) -> str:
for message in messages:
if message['id'] == id_message:
return message['reply_for']
def find_id_messages_which_have_most_threads(messages: list) -> list:
dict_result = defaultdict(int)
for message in messages:
if message['reply_for'] is None:
continue
else:
id_message = message['reply_for']
count_threads = 0
while True:
count_threads += 1
id_message_find = find_id_message(messages, id_message)
if id_message_find is None:
break
else:
id_message = id_message_find
dict_result[id_message] = count_threads
id_message = []
max_value = 0
for key, value in dict_result.items():
if value > max_value:
max_value = value
id_message.clear()
id_message.append(key)
elif value == max_value:
id_message.append(key)
return id_message
if __name__ == "__main__":
# pprint(generate_chat_history())
print(find_id_user_most_messages(generate_chat_history()))
print(find_id_user_most_messages_replies(generate_chat_history()))
print(find_id_user_most_see_messages(generate_chat_history()))
print(when_most_messages(generate_chat_history()))
print(find_id_messages_which_have_most_threads(generate_chat_history()))
|
hodakoov/basic_exercises
|
for_dict_challenges_bonus.py
|
for_dict_challenges_bonus.py
|
py
| 7,598 |
python
|
ru
|
code
| null |
github-code
|
6
|
8412088860
|
from rest_framework import serializers
from .models import (
Product,
ProductImage,
Size,
Category
)
class CategoryListSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='products:category-detail-view',
lookup_field='slug'
)
class Meta:
model = Category
fields = (
'id',
'title',
'url',
)
class CategoryDetailSerializer(CategoryListSerializer):
products = serializers.SerializerMethodField()
class Meta:
model = Category
fields = (
'id',
'title',
'products',
)
def get_products(self, obj):
# The source of the SSL context override
return ProductListSerializer(obj.product_set.all(), many=True, context=self.context).data
class ProductListSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='products:product-detail-view', lookup_field='slug')
class Meta:
model = Product
fields = (
'id',
'slug',
'title',
'price',
'image',
'url',
)
class ProductDetailSerializer(ProductListSerializer):
sizes = serializers.SerializerMethodField()
productImages = serializers.SerializerMethodField()
categories = CategoryListSerializer(many=True)
class Meta:
model = Product
fields = (
'id',
'title',
'price',
'image',
'slug',
'categories',
'sizes',
'description',
'productImages',
)
def get_sizes(self, obj):
return SizeSerializer(obj.size_set.all(), many=True).data
def get_productImages(self, obj):
return ProductImageSerializer(
obj.productimage_set.all(),
many=True
).data
class SizeSerializer(serializers.ModelSerializer):
class Meta:
model = Size
fields = (
'id',
'size',
'slug',
'stock',
)
class ProductImageSerializer(serializers.ModelSerializer):
class Meta:
model = ProductImage
fields = (
'id',
'image',
)
|
fanimashaun-r7/Nf_Kicks_Api
|
app/products/serializers.py
|
serializers.py
|
py
| 2,365 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39131633270
|
import random
from itertools import zip_longest
from typing import List
from config import MuZeroConfig
from game.game import AbstractGame
import _pickle as cPickle
import os
import numpy as np
class ReplayBuffer(object):
def __init__(self, config: MuZeroConfig, fighter):
self.window_size = config.window_size
self.batch_size = config.batch_size
self.buffer = []
self.loaded_games = []
self.current_games = []
self.memory_path = config.memory_path
self.fighter = fighter
def save_game(self, game):
if sum([len(i.root_values) for i in self.buffer]) > self.window_size:
self.buffer.pop(0)
if game.player1_historic_network == True:
game.game_player1_priority = 0
game.player1_priorities = list(np.full(len(game.root_values), 0))
else:
game.game_player1_priority = 1e3*len(game.root_values)
game.player1_priorities = list(np.full(len(game.root_values), 1e3))
player1_zero_move_idx = [i for i, j in enumerate(game.child_visits) if j[0][0] == 1.]
for idx in player1_zero_move_idx:
game.player1_priorities[idx] = 0
if game.player2_historic_network == True:
game.game_player2_priority = 0
game.player2_priorities = list(np.full(len(game.root_values), 0))
else:
game.game_player2_priority = 1e3*len(game.root_values)
game.player2_priorities = list(np.full(len(game.root_values), 1e3))
player2_zero_move_idx = [i for i, j in enumerate(game.child_visits) if j[1][0] == 1.]
for idx in player2_zero_move_idx:
game.player2_priorities[idx] = 0
self.buffer.append(game)
def update_buffer(self):
new_files = [f for f in os.listdir(self.memory_path) if f not in self.loaded_games]
new_files = [f for f in new_files if (f.split('_')[-1][:-4] == self.fighter) | (f.split('_')[-2] == self.fighter)]
new_files.sort(key = lambda x: int(x.split('_')[1]))
if len(new_files) > self.window_size // 1100:
self.loaded_games = self.loaded_games + new_files[:-self.window_size // 1100]
new_files = new_files[-self.window_size // 1100:]
if len(new_files) != 0:
for new_file in new_files:
with open(os.path.join(self.memory_path,new_file), 'rb') as game_file:
game = cPickle.load(game_file)
self.save_game(game)
self.loaded_games.append(new_file)
if sum([len(i.root_values) for i in self.buffer]) > self.window_size:
self.current_games.pop(0)
self.current_games.append(new_file)
def sample_batch(self, num_unroll_steps: int, unroll_step_size : int, td_steps: int, fighter):
# Generate some sample of data to train on
games = self.sample_games(fighter)
game_pos = [(g, self.sample_position(self.buffer[g], fighter), 'player1' if self.buffer[g].player1 == fighter else 'player2') for g in games]
game_data = [(self.buffer[g].make_image(i, p), [action.index for action in [j[int(p[-1]) - 1] for j in self.buffer[g].history[i:i + num_unroll_steps]]],
self.buffer[g].make_target(i, num_unroll_steps, unroll_step_size, td_steps, p))
for (g, i, p) in game_pos]
sample_weights = [self.buffer[g].player1_priorities[i] if p == 'player1' else self.buffer[g].player2_priorities[i] for (g, i, p) in game_pos]
game_weights = [self.buffer[g].game_player1_priority if p == 'player1' else self.buffer[g].game_player2_priority for (g, i, p) in game_pos]
weight_batch = 1 / (np.array(sample_weights) * np.array(game_weights))
weight_batch = weight_batch / np.max(weight_batch)
# Pre-process the batch
image_batch, actions_time_batch, targets_batch = zip(*game_data)
targets_init_batch, *targets_time_batch = zip(*targets_batch)
actions_time_batch = list(zip_longest(*actions_time_batch, fillvalue=0))
# Building batch of valid actions and a dynamic mask for hidden representations during BPTT
batch = image_batch, targets_init_batch, targets_time_batch, actions_time_batch
return batch, game_pos, weight_batch**0.4
def sample_games(self, fighter) -> List[AbstractGame]:
# Sample game from buffer either uniformly or according to some priority.
game_probs = np.array([game.game_player1_priority if game.player1 == fighter else game.game_player2_priority for game in self.buffer])
game_probs /= np.sum(game_probs)
return np.random.choice(len(self.buffer), size=self.batch_size, p = game_probs)
def sample_position(self, game: AbstractGame, fighter) -> int:
# Sample position from game either uniformly or according to some priority.
if game.player1 == fighter:
pos_probs = game.player1_priorities / sum(game.player1_priorities)
if game.player2 == fighter:
pos_probs = game.player2_priorities / sum(game.player2_priorities)
return np.random.choice(len(pos_probs), p=pos_probs)
def sample_position_value_bias(self, game: AbstractGame) -> int:
# Sample position from game either uniformly or according to some priority.
history = [i.index for i in game.history]
counts = np.bincount(history)
common = np.argmax(counts)
above_avg = [i[0] for i in np.argwhere(history==common)]
below_avg = [i[0] for i in np.argwhere(history!=common)]
if random.randint(0,5) != 5:
return np.random.choice(below_avg)
else:
return np.random.choice(above_avg)
def update_priorities(self, priorities, idx_info, fighter):
for i in range(len(idx_info)):
game_id, game_pos, _ = idx_info[i]
priority = priorities[i,:]
start_idx = game_pos
if self.buffer[game_id].player1 == fighter:
end_idx = min(game_pos+len(priority), len(self.buffer[game_id].player1_priorities))
self.buffer[game_id].player1_priorities[start_idx:end_idx] = priority[:end_idx-start_idx]
self.buffer[game_id].game_player1_priority = np.mean(self.buffer[game_id].player1_priorities) * len(self.buffer[game_id].root_values)
if self.buffer[game_id].player2 == fighter:
end_idx = min(game_pos+len(priority), len(self.buffer[game_id].player2_priorities))
self.buffer[game_id].player2_priorities[start_idx:end_idx] = priority[:end_idx-start_idx]
self.buffer[game_id].game_player2_priority = np.mean(self.buffer[game_id].player2_priorities) * len(self.buffer[game_id].root_values)
|
Nebraskinator/StreetFighter2AI
|
muzero/training/replay_buffer.py
|
replay_buffer.py
|
py
| 6,951 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27009678128
|
import numpy as np
import run as r
from sklearn.gaussian_process.kernels import ABCMeta, Matern, ConstantKernel, Exponentiation, ExpSineSquared, Hyperparameter, KernelOperator, \
NormalizedKernelMixin, PairwiseKernel, RationalQuadratic, StationaryKernelMixin, RBF, CompoundKernel, DotProduct, Product, GenericKernelMixin, WhiteKernel, \
Kernel, Sum
'''
[id]
112
[name]
GaussianProcessRegressor
[input]
x_train 训练集 训练集标签数据集 二维数组 必须 定数
y_train 测试集 测试集数据集 二维数组 必须 定数
x_test 训练集标签 训练集标签标签 一维数组 必须 定数
y_test 测试集标签 测试集标签 一维数组 必须 定数
kernel 内核 默认为None,指定GP协方差函数的内核。如果传递了None,则默认使用内核'1.0 * RBF(1.0)'。请注意,内核的超参数在拟合过程中已优化,可选字符串 字符串 不必须 定数
alpha alpha 默认为1e-10,拟合期间将值添加到内核矩阵的对角线。较大的值对应于观测结果中增加的噪声水平。通过确保计算值形成正定矩阵,这也可以防止拟合期间出现潜在的数值问题。如果传递了数组,则该数组必须具有与用于拟合的数据相同的条目数,并且用作与数据点有关的噪声水平。请注意,这等效于添加c = alpha的WhiteKernel。直接允许将噪声级别指定为参数主要是为了方便和与Ridge保持一致,可选数组,浮点数 字符串 不必须 定数
optimizer optimizer 默认为'fmin_l_bfgs_b',可以是内部支持的用于优化kernel 's parameters, specified by a string, or an externally defined optimizer passed as a callable. Per default, the ' L-BFGS-B ' algorithm from scipy.optimize.minimize is used. If None is passed, the kernel' s参数的优化器之一。可用的内部优化器是:: 'fmin_l_bfgs_b,可选'fmin_l_bfgs_b' 字符串 不必须 定数
n_restarts_optimizer 重新启动次数 默认为0,用于查找内核初始参数的优化程序的重新启动次数,以及从允许的theta值空间中随机抽取的theta采样对数均匀性中剩余的参数(如果有的话)。如果大于0,则所有边界必须是有限的。请注意,n_restarts_optimizer == 0表示执行了一次运行,可选整数 整数 不必须 定数
normalize_y normalize_y 默认为False,无论目标值y是否被归一化,目标值的均值和方差分别设置为等于0和1。对于使用零均值,单位方差先验的情况,建议使用此方法。注意,在此实现中,在报告GP预测之前,将规范化反转,可选布尔值 布尔值 不必须 定数
copy_X_train copy_X_train 默认为True,如果为True,则训练数据的永久副本存储在对象中。否则,仅存储对训练数据的引用,如果对数据进行外部修改,则可能导致预测更改,可选布尔值 布尔值 不必须 定数
random_state 随机种子 默认为None,确定用于初始化中心的随机数生成。在多个函数调用之间传递int以获得可重复的结果,可选整数 整数 不必须 定数
[output]
train_predict 预测 训练集预测结果 一维数组(数值)
test_predict 预测 测试集预测结果 一维数组(数值)
train_score 正确率 训练集预测结果的正确率 数字
test_score 正确率 测试集预测结果的正确率 数字
X_train_ X_train_ 训练数据的特征向量或其他表示形式(预测也需要) 二维数组
y_train_ y_train_ 训练数据中的目标值(预测也需要) 一维数组
L_ L_ 'X_train_'中内核的下三角Cholesky分解 二维数组
kernel_ kernel_ 用于预测的内核。内核的结构与作为参数传递的内核相同,但具有优化的超参数 字符串
alpha_ alpha 核空间中训练数据点的对偶系数 一维数组
log_marginal_likelihood_value_ 对数边际可能性 'self.kernel_.theta'的对数边际可能性 浮点数
[outline]
[describe]
高斯过程回归(GPR)。
该实现基于Rasmussen和Williams提出的高斯机器学习过程算法(GPML)的算法2.1。
除了标准的scikit-learn估计器API外,GaussianProcessRegressor:*允许进行预测而无需事先拟合(基于GP优先级)*提供其他方法sample_y(X),该方法评估在给定输入下从GPR(优先级或后验)中提取的样本*公开了一个方法log_marginal_likelihood(theta),该方法可在外部用于其他选择超参数的方式,例如通过马尔可夫链蒙特卡洛。
'''
def main(x_train, y_train, x_test, y_test,
kernel=None, alpha=1e-10, optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0, normalize_y=False, copy_X_train=True, random_state=None
):
if type(x_train) is str:
x_train = eval(x_train)
if type(y_train) is str:
y_train = eval(y_train)
if type(x_test) is str:
x_test = eval(x_test)
if type(y_test) is str:
y_test = eval(y_test)
if type(kernel) is str:
kernel = eval(kernel)
if type(alpha) is str:
alpha = eval(alpha)
if type(n_restarts_optimizer) is str:
n_restarts_optimizer = eval(n_restarts_optimizer)
if type(normalize_y) is str:
normalize_y = eval(normalize_y)
if type(copy_X_train) is str:
copy_X_train = eval(copy_X_train)
if type(random_state) is str:
random_state = eval(random_state)
return r.run(x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test, kernel=kernel,
alpha=alpha,
optimizer=optimizer,
n_restarts_optimizer=n_restarts_optimizer,
normalize_y=normalize_y,
copy_X_train=copy_X_train,
random_state=random_state)
if __name__ == '__main__':
import numpy as np
import json
array = np.loadtxt('D:\\123_2.csv', delimiter=',')
array = array[0:20, :]
y = array[:, -1].tolist()
x = np.delete(array, -1, axis=1).tolist()
array = array.tolist()
back = main(x, y, x, y)
print(back)
for i in back:
print(i + ":" + str(back[i]))
json.dumps(back)
|
lisunshine1234/mlp-algorithm-python
|
machine_learning/regression/gaussian_processes/GaussianProcessRegressor/main.py
|
main.py
|
py
| 6,034 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
21402453945
|
import torch
import math
from torch import nn
import torch.nn.functional as F
from transformers.activations import get_activation
from .utils import init_weights
def _mask(logits, mask):
return mask * logits - 1e3 * (1 - mask)
# VarMisuse -----------------------------------------------------------------
class _LocRepairPointerHead(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.prediction = nn.Linear(config.hidden_size, 2)
self.apply(init_weights)
def forward(self, input_states):
hidden = self.dense(input_states)
hidden = get_activation("gelu")(hidden)
logits = self.prediction(hidden)
logits = logits.transpose(2, 1)
return logits
class VarMisuseBaseModel(nn.Module):
def __init__(self, config, encoder):
super().__init__()
self.config = config
self.encoder = encoder
@torch.no_grad()
def score(self, logits, labels):
probs = nn.Softmax(dim = 2)(logits)
# Location metrics
loc_predict = probs[:, 0, :]
loc_labels = labels[:, 0, :]
locate = loc_predict.argmax(dim=1)
locate = torch.nn.functional.one_hot(locate, num_classes=loc_predict.shape[1]).float()
locate_acc = (locate * loc_labels).sum(dim=1)
buggy_labels = 1 - loc_labels[:, 0]
# Buggy classification
false_alarms = 1 - ((1 - buggy_labels)*locate_acc).sum() / ((1 - buggy_labels).sum() + 1e-9)
bug_acc = (buggy_labels * locate_acc).sum() / (buggy_labels.sum() + 1e-9)
# Classification
cls_predict = loc_predict[:, 0].round()
cls_labels = loc_labels[:, 0]
cls_acc = (cls_predict * cls_labels).mean() + ((1 - cls_predict) * buggy_labels).mean()
#Repair pointer
rep_probs = probs[:, 1, :]
rep_labels = labels[:, 1, :]
target_probs = (rep_labels * rep_probs).sum(dim=-1)
target_predict = target_probs.round()
target_acc = (target_predict * buggy_labels).sum() / (1e-9 + buggy_labels.sum())
joint_acc = (buggy_labels * locate_acc * target_predict).sum() / (1e-9 + buggy_labels.sum())
return {
"classification_acc": cls_acc.item(),
"localization_acc": locate_acc.mean().item(),
"bug_acc": bug_acc.item(),
"false_alarm_rate": false_alarms.item(),
"repair_acc": target_acc.item(),
"loc_repair_acc": joint_acc.item(),
"avg_prediction": cls_predict.mean().item()
}
def loc_repair_acc(self, tokens, position_ids = None, labels = None):
pass
def forward(self, tokens, token_mask = None, position_ids = None, labels = None):
prediction = self.loc_repair_logits(tokens, position_ids, labels)
# Mask prediction
if token_mask is not None:
token_mask = token_mask.float().unsqueeze(1).expand_as(prediction)
prediction = _mask(prediction, token_mask)
# Calculate a loss if necessary
if labels is not None:
log_probs = nn.LogSoftmax(dim=2)(prediction)
norm = labels.sum(dim=-1, keepdim = True)
per_token_loss = (-labels * log_probs) / (norm + 1e-9)
per_example_loss = per_token_loss.sum(dim=-1)
per_task_loss = per_example_loss.mean(dim = 0)
return per_task_loss.sum(), prediction
return prediction
class VarMisuseModel(VarMisuseBaseModel):
def __init__(self, config, encoder):
super().__init__(config, encoder)
self.head = _LocRepairPointerHead(config)
def loc_repair_logits(self, tokens, position_ids = None, labels = None):
attention_mask = tokens.sum(dim=2).clamp_(0, 1)
encoding, _ = self.encoder(
tokens = tokens,
attention_mask = attention_mask.bool(),
position_ids = position_ids
)
return self.head(encoding)
# General model that works with inner repairs and localization --------------------------------
class _LocateHead(nn.Module):
def __init__(self, config):
super().__init__()
self.ffn_in = nn.Linear(2 * config.hidden_size, config.hidden_size)
self.ffn_out = nn.Linear(config.hidden_size, 1)
self.apply(init_weights)
def forward(self, context_embed, token_embed, token_mask = None, labels = None):
assert context_embed.shape[1] == token_embed.shape[1]
# Localization prediction --------------------------------
diff_vector = token_embed - context_embed
diff_vector = torch.cat([context_embed, diff_vector], dim = 2)
hidden = self.ffn_in(diff_vector)
hidden = nn.Tanh()(hidden)
hidden = self.ffn_out(hidden)
hidden = hidden.squeeze(-1)
if token_mask is not None: hidden = _mask(hidden, token_mask)
# Loss calculation ---------------------------------------
if labels is not None:
locate_labels = labels[:, 0, :]
log_probs = nn.LogSoftmax(dim=1)(hidden)
loss = (-locate_labels * log_probs).sum(dim=1)
return loss.mean(), hidden
return None, hidden
class _RepairHead(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
if config.decoder_vocab_size > 0: # We have a target vocab
self.decoder = nn.Linear(config.hidden_size, config.decoder_vocab_size, bias = False)
self.apply(init_weights)
def forward(self, error_embed, context_embed, token_mask = None, labels = None, target_labels = None):
# Compute a local pointer --------------------------------
repair_logits = torch.bmm(error_embed.unsqueeze(1), context_embed.transpose(2, 1)).squeeze()
repair_logits /= math.sqrt(error_embed.shape[1])
if len(repair_logits.shape) < 2:
repair_logits = repair_logits.unsqueeze(0)
if token_mask is not None and not self.config.token_annotate:
repair_logits = _mask(repair_logits, token_mask)
if labels is not None:
repair_labels = labels[:, 1, :]
# Compute a global vocab index ---------------------------
if hasattr(self, "decoder"):
decoder_logits = self.decoder(error_embed)
repair_logits = torch.cat([repair_logits, decoder_logits], dim = 1)
if labels is not None and target_labels is not None:
ohe_labels = F.one_hot(target_labels, num_classes=self.config.decoder_vocab_size)
ohe_labels[:, 0] = 0
repair_labels = torch.cat([repair_labels, ohe_labels], dim = 1)
# Loss computation ---------------------------------------
if labels is not None:
repair_log_probs = nn.LogSoftmax(dim = 1)(repair_logits)
norm = repair_labels.sum(dim = -1).clamp_(0, 1)
# Collect log probs
# log sum_(t_i = w)(P(t_i)) = log sum_(t_i = w)(exp log P(t_i))
# = LSE(log P(t_i))
repair_log_probs = _mask(repair_log_probs, repair_labels)
per_example_loss = -norm * torch.logsumexp(repair_log_probs, dim = 1)
return per_example_loss.mean(), repair_logits
return None, repair_logits
class LocateRepairModel(nn.Module):
def __init__(self, config, encoder):
super().__init__()
self.config = config
self.encoder = encoder
self.locate_head = _LocateHead(config)
self.repair_head = _RepairHead(config)
@torch.no_grad()
def score(self, logits, labels):
locate_logits, repair_logits = logits
# Score for localization
loc_predict = nn.Softmax(dim = 1)(locate_logits)
loc_labels = labels[:, 0, :]
locate = loc_predict.argmax(dim=1)
locate = torch.nn.functional.one_hot(locate, num_classes=loc_predict.shape[1]).float()
locate_acc = (locate * loc_labels).sum(dim=1)
buggy_labels = 1 - loc_labels[:, 0]
# Buggy classification
false_alarms = 1 - ((1 - buggy_labels)*locate_acc).sum() / ((1 - buggy_labels).sum() + 1e-9)
bug_acc = (buggy_labels * locate_acc).sum() / (buggy_labels.sum() + 1e-9)
# Classification
cls_predict = loc_predict[:, 0].round()
cls_labels = loc_labels[:, 0]
cls_acc = (cls_predict * cls_labels).mean() + ((1 - cls_predict) * buggy_labels).mean()
# Repair scores
rep_probs = nn.Softmax(dim = 1)(repair_logits)
rep_labels = labels[:, 1, :]
if rep_probs.shape[1] != rep_labels.shape[1]:
target_labels = labels[:, 2, :]
target_labels = target_labels[loc_labels.bool()]
ohe_labels = F.one_hot(target_labels, num_classes=self.config.decoder_vocab_size)
ohe_labels[:, 0] = 0
rep_labels = torch.cat([rep_labels, ohe_labels], dim = 1)
target_probs = (rep_labels * rep_probs).sum(dim=-1)
target_predict = target_probs.round()
target_acc = (target_predict * buggy_labels).sum() / (1e-9 + buggy_labels.sum())
joint_acc = (buggy_labels * locate_acc * target_predict).sum() / (1e-9 + buggy_labels.sum())
return {
"classification_acc": cls_acc.item(),
"localization_acc": locate_acc.mean().item(),
"bug_acc": bug_acc.item(),
"false_alarm_rate": false_alarms.item(),
"repair_acc": target_acc.item(),
"loc_repair_acc": joint_acc.item(),
"avg_prediction": cls_predict.mean().item()
}
def forward(self, tokens, token_mask = None, position_ids = None, labels = None):
attention_mask = tokens.sum(dim=2).clamp_(0, 1)
context_embed, token_embed = self.encoder(
tokens = tokens,
attention_mask = attention_mask.bool(),
position_ids = position_ids,
token_type_ids = token_mask if self.config.token_annotate else None,
)
locate_loss, locate_logits = self.locate_head(context_embed,
token_embed,
token_mask,
labels)
# Either use the gold localization or the predicted to get the error position
error_repair_labels = None
if labels is not None: # We are training
locate_mask = labels[:, 0, :].bool()
if self.config.decoder_vocab_size > 0:
assert labels.shape[1] >= 2, "If a target vocabulary is specified we expect that target labels are provided."
error_repair_labels = labels[:, 2, :]
error_repair_labels = error_repair_labels[locate_mask]
else: # We are at inference
locate = locate_logits.argmax(dim=1)
locate_mask = F.one_hot(locate, num_classes=tokens.shape[1]).bool()
error_hidden = context_embed[locate_mask]
# ----------------------------------------------------------------
repair_loss, repair_logits = self.repair_head(
error_hidden,
context_embed,
token_mask,
labels,
error_repair_labels
)
if labels is not None:
return locate_loss + repair_loss, (locate_logits, repair_logits)
return (locate_logits, repair_logits)
# Masked repair ----------------------------------------------------------------
class MaskedRepairModel(nn.Module):
def __init__(self, config, encoder):
super().__init__()
self.config = config
self.encoder = encoder
self.repair_head = _RepairHead(config)
@torch.no_grad()
def score(self, repair_logits, labels):
# Repair mask
loc_labels = labels[:, 0, :]
buggy_labels = 1 - loc_labels[:, 0]
# Repair scores
rep_probs = nn.Softmax(dim = 1)(repair_logits)
rep_labels = labels[:, 1, :]
if rep_probs.shape[1] != rep_labels.shape[1]:
target_labels = labels[:, 2, :]
target_labels = target_labels[loc_labels.bool()]
ohe_labels = F.one_hot(target_labels, num_classes=self.config.decoder_vocab_size)
ohe_labels[:, 1] = 0
rep_labels = torch.cat([rep_labels, ohe_labels], dim = 1)
target_probs = (rep_labels * rep_probs).sum(dim=-1)
target_predict = target_probs.round()
target_acc = (target_predict * buggy_labels).sum() / (1e-9 + buggy_labels.sum())
return {
"repair_acc": target_acc.item()
}
def forward(self, tokens, token_mask = None, position_ids = None, labels = None, repair_mask = None):
attention_mask = tokens.sum(dim=2).clamp_(0, 1)
context_embed, _ = self.encoder(
tokens = tokens,
attention_mask = attention_mask.bool(),
position_ids = position_ids,
token_type_ids = token_mask if self.config.token_annotate else None,
)
# Either use the gold localization or the predicted to get the error position
error_repair_labels = None
if labels is not None: # We are training
locate_mask = labels[:, 0, :].bool()
if self.training and self.config.decoder_vocab_size > 0:
assert labels.shape[1] >= 2, "If a target vocabulary is specified we expect that target labels are provided."
error_repair_labels = labels[:, 2, :]
error_repair_labels = error_repair_labels[locate_mask]
else: # We are at inference
if repair_mask is None:
raise ValueError("Location labels are required to identify mask position.")
locate_mask = repair_mask.bool()
error_hidden = context_embed[locate_mask]
# ----------------------------------------------------------------
repair_loss, repair_logits = self.repair_head(
error_hidden,
context_embed,
token_mask,
labels,
error_repair_labels
)
if labels is not None:
return repair_loss, repair_logits
return repair_logits
|
cedricrupb/ctxmutants
|
ctxmutants/modelling/meta_models.py
|
meta_models.py
|
py
| 14,434 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39048517647
|
import re
import logging
from datetime import datetime, timezone
__all__ = ('datetime_to_ns',)
logger = logging.getLogger('aionationstates')
class DataClassWithId:
def __eq__(self, other):
# Ids in NS are pretty much always not globally unique.
if type(self) is not type(other):
return NotImplemented
return self.id == other.id
def __hash__(self):
return hash((self.id,))
def __repr__(self):
return f'<{self.__class__.__name__} id={self.id}>'
def normalize(identifier):
identifier = identifier.lower().replace(' ', '_')
if not re.match('^[a-z0-9_-]+$', identifier):
raise ValueError(f'provided identifier {identifier} contains invalid'
' characters.')
return identifier
def banner_url(id):
return f'https://www.nationstates.net/images/banners/{id}.jpg'
def timestamp(line):
return datetime.utcfromtimestamp(int(line))
def utc_seconds(datetime_):
return int(datetime_.replace(tzinfo=timezone.utc).timestamp())
def unscramble_encoding(text):
"""This is a workaround for a bug in the NS server-side code.
(This entire lib is, honestly.)
Specifically, somewhere in the process W-1252 encoded text is
wrongly interpreted to be ISO-8859-1, resulting in *some* characters
being deterministically unintentionally replaced with useless to the
user Unicode control chars.
This is a very common problem. Common enough, in fact, to be
accounted for in the HTML treatment of Character References as
defined by the specification. Well, it is technically a parse
error, but nobody really cares since the correct, expected character
is returned. For this reason, the bug is not present (or at least
not visible) on the NS web interface, and only shows itself when
dealing with the API.
Interestingly enough, these characters are not always serialized as
NCRs, in the dispatch CDATA they are represented literally, meaning
that even modifying the XML parser to include a bit of HTML leniency
would not be enough. Not that anyone would do that regardless.
This function reverses the process, substiuting the unprintable mess
returned by NS for the Unicode characters it must have originated
from.
It's a bit ugly, but gets the job done.
"""
return text.translate(unscramble_table)
unscramble_table = str.maketrans({
'\u0080': '\N{EURO SIGN}',
'\u0082': '\N{SINGLE LOW-9 QUOTATION MARK}',
'\u0083': '\N{LATIN SMALL LETTER F WITH HOOK}',
'\u0084': '\N{DOUBLE LOW-9 QUOTATION MARK}',
'\u0085': '\N{HORIZONTAL ELLIPSIS}',
'\u0086': '\N{DAGGER}',
'\u0087': '\N{DOUBLE DAGGER}',
'\u0088': '\N{MODIFIER LETTER CIRCUMFLEX ACCENT}',
'\u0089': '\N{PER MILLE SIGN}',
'\u008A': '\N{LATIN CAPITAL LETTER S WITH CARON}',
'\u008B': '\N{SINGLE LEFT-POINTING ANGLE QUOTATION MARK}',
'\u008C': '\N{LATIN CAPITAL LIGATURE OE}',
'\u008E': '\N{LATIN CAPITAL LETTER Z WITH CARON}',
'\u0091': '\N{LEFT SINGLE QUOTATION MARK}',
'\u0092': '\N{RIGHT SINGLE QUOTATION MARK}',
'\u0093': '\N{LEFT DOUBLE QUOTATION MARK}',
'\u0094': '\N{RIGHT DOUBLE QUOTATION MARK}',
'\u0095': '\N{BULLET}',
'\u0096': '\N{EN DASH}',
'\u0097': '\N{EM DASH}',
'\u0098': '\N{SMALL TILDE}',
'\u0099': '\N{TRADE MARK SIGN}',
'\u009A': '\N{LATIN SMALL LETTER S WITH CARON}',
'\u009B': '\N{SINGLE RIGHT-POINTING ANGLE QUOTATION MARK}',
'\u009C': '\N{LATIN SMALL LIGATURE OE}',
'\u009E': '\N{LATIN SMALL LETTER Z WITH CARON}',
'\u009F': '\N{LATIN CAPITAL LETTER Y WITH DIAERESIS}',
})
class aobject:
"""Inheriting this class allows you to define an async __init__.
Code shamelessly ripped from StackOverflow.
Before getting angry at me for abusing python features, remind
yourself that all async/await code is already an abuse of generators
and embrace the simple truth that practicality beats purity.
"""
async def __new__(cls, *a, **kw):
instance = super().__new__(cls)
await instance.__init__(*a, **kw)
return instance
async def __init__(self):
pass
def actually_synchronous(async_function):
def wrapper(*args, **kwargs):
coro_object = async_function(*args, **kwargs)
try:
coro_object.send(None)
except StopIteration as e:
return e.value
else:
raise TypeError("the function supplied isn't actually synchronous")
return wrapper
async def alist(asyncgen):
return [item async for item in asyncgen]
def datetime_to_ns(then):
"""Transform a :any:`datetime.datetime` into a NationStates-style
string.
For example "6 days ago", "105 minutes ago", etc.
"""
if then == datetime(1970, 1, 1, 0, 0):
return 'Antiquity'
now = datetime.utcnow()
delta = now - then
seconds = delta.total_seconds()
# There's gotta be a better way to do this...
years, seconds = divmod(seconds, 60*60*24*365)
days, seconds = divmod(seconds, 60*60*24)
hours, seconds = divmod(seconds, 60*60)
minutes, seconds = divmod(seconds, 60)
years = int(years)
days = int(days)
hours = int(hours)
minutes = int(minutes)
seconds = round(seconds)
if years > 1:
if days > 1:
return f'{years} years {days} days ago'
elif days == 1:
return '{years} years 1 day ago'
return '{years} years ago'
if years == 1:
if days > 1:
return f'1 year {days} days ago'
elif days == 1:
return '1 year 1 day ago'
return '1 year ago'
if days > 3:
return f'{days} days ago'
if days > 1:
if hours > 1:
return f'{days} days {hours} hours ago'
elif hours == 1:
return f'{days} days 1 hour ago'
return f'{days} days ago'
if days == 1:
if hours > 1:
return f'1 day {hours} hours ago'
elif hours == 1:
return '1 day 1 hour ago'
return '1 day ago'
if hours > 1:
return f'{hours} hours ago'
if hours == 1:
return f'{minutes + 60} minutes ago'
if minutes > 1:
return f'{minutes} minutes ago'
if minutes == 1:
return '1 minute ago'
return 'Seconds ago'
|
micha030201/aionationstates
|
aionationstates/utils.py
|
utils.py
|
py
| 6,383 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37635242690
|
from videos_freeze_analyzer import VideosFreezeAnalyzer
from video_valid_points_list_generator import dowload_url
from video_valid_points_list_generator import VideoValidPointsListGeneratorFfmpeg
from video_freeze_analyzer import VideoFreezeAnalyzer
import json
def main(urls):
files = []
for url in urls:
files.append(dowload_url(url))
videos_list =[]
for file_name in files:
video_valid_list = VideoValidPointsListGeneratorFfmpeg(file_name).generate_valid_points_list()
videos_list.append(VideoFreezeAnalyzer().analyze(video_valid_list))
videos_output = VideosFreezeAnalyzer(videos_list).analyze()
results = json.dumps(videos_output, indent=4)
print(results)
if __name__ == '__main__':
urls = ["https://storage.googleapis.com/hiring_process_data/freeze_frame_input_a.mp4",
"https://storage.googleapis.com/hiring_process_data/freeze_frame_input_b.mp4",
"https://storage.googleapis.com/hiring_process_data/freeze_frame_input_c.mp4"]
main(urls)
|
EderRobins/video_freeze_analyzer
|
main.py
|
main.py
|
py
| 1,064 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72638922747
|
import pandas as pd
from dotenv import load_dotenv
import os
# load env
load_dotenv()
# load dataset
url = "https://raw.githubusercontent.com/erijmo/3690/main/healthcare_dataset.csv"
df = pd.read_csv(url)
# set api key
api_key = os.getenv("OPENAI_API_KEY")
def get_healthcare_response(user_input, user_name, df):
# search for keyword in user input
for column in df.columns:
if column.lower() in user_input:
response = f"{user_name}, your {column.lower()} is {df[column].iloc[0]}"
return response
# if no keyword located, ask for clarification
return "I'm sorry, I couldn't understand your request. Can you please provide more details?"
# prompt response
print("HealthcareBot: Hello! I'm your HealthcareBot. May I know your name, please?")
while True:
user_name = input("User: ")
# check if the user's name is in the system
if user_name.lower() in df["Name"].str.lower().values:
print(f"HealthcareBot: Thank you, {user_name}! How can I assist you today?")
break
else:
print("HealthcareBot: I'm sorry, but I couldn't find your name in the system. Please try again.")
# user interaction loop
while True:
user_input = input("User: ")
# check if any exit-related keywords are present in the user input
if any(keyword in user_input.lower() for keyword in ['exit', 'bye', 'quit']):
print("HealthcareBot: Goodbye! If you have more questions, feel free to ask.")
break
response = get_healthcare_response(user_input, user_name, df)
if response:
print("HealthcareBot:", response)
|
erijmo/3690
|
chatbot.py
|
chatbot.py
|
py
| 1,661 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43242935161
|
from thumbor.utils import logger
try:
import cv2 # noqa
import numpy as np # noqa
CV_AVAILABLE = True
except ImportError:
CV_AVAILABLE = False
class BaseDetector:
def __init__(self, context, index, detectors):
self.context = context
self.index = index
self.detectors = detectors
def verify_cv(self) -> bool:
if CV_AVAILABLE:
return True
logger.error(
"OpenCV (cv2) is not available for thumbor. "
"thumbor.detectors.local_detector.CascadeLoaderDetector "
"can't be executed. Skipping..."
)
return False
async def detect(self):
raise NotImplementedError()
async def next(self):
if not self.detectors or self.index > len(self.detectors) - 2:
return
next_detector = self.detectors[self.index + 1](
self.context, self.index + 1, self.detectors
)
await next_detector.detect()
|
thumbor/thumbor
|
thumbor/detectors/__init__.py
|
__init__.py
|
py
| 983 |
python
|
en
|
code
| 9,707 |
github-code
|
6
|
34429645121
|
class Node:
def __init__(self, data):
self.data = data
self.nref = None
self.pref = None
class Queue:
def __init__(self):
self.start = None
self.end = None
def is_empty(self):
return self.start is None
def pop(self):
if self.is_empty():
return None
else:
val = self.start.data
if self.start == self.end:
self.start = None
self.end = None
else:
self.start = self.start.nref
self.start.pref = None
return val
def push(self, val):
new_node = Node(val)
if self.is_empty():
self.start = new_node
self.end = new_node
else:
new_node.pref = self.end
self.end.nref = new_node
self.end = new_node
def insert(self, n, val):
if n == 0:
new_node = Node(val)
new_node.nref = self.start
self.start.pref = new_node
self.start = new_node
else:
current = self.start
for i in range(n-1):
current = current.nref
if current is None:
raise IndexError("Index out of range")
new_node = Node(val)
new_node.nref = current.nref
new_node.pref = current
if current.nref is not None:
current.nref.pref = new_node
current.nref = new_node
def print(self):
current = self.start
while current is not None:
print(current.data, end=' ')
current = current.nref
|
scary327/python_skillbox
|
mod5/task2.py
|
task2.py
|
py
| 1,687 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38666481157
|
import sys
def qtm(seq):
result = 0
for move in seq:
if move.startswith("("):
result += 0 # this line does nothing, but I added it for clarity that we weight AUF as 0
elif "2" in move:
result += 2
else:
result += 1
return result
def htm(seq):
result = 0
for move in seq:
if move.startswith("("):
result += 0 # this line does nothing, but I added it for clarity that we weight AUF as 0
else:
result += 1
return result
eval_dict = {
"R": 1,
"R'": 1,
"R2": 1.5,
"L": 1.1,
"L'": 1.1,
"L2": 1.6,
"U": 1,
"U'": 1,
"U2": 1.3,
"D": 1.1,
"D'": 1.1,
"D2": 1.9,
"F": 1.6,
"F'": 1.6,
"F2": 1.6,
"B": 2,
"B'": 2.2,
"B2": 3.5,
}
def eval_sequence(seq):
result = 0
for move in seq:
if move.startswith("("):
result += 0 # this line does nothing, but I added it for clarity that we weight AUF as 0
else:
result += eval_dict[move]
return result
if __name__ == '__main__' :
if len(sys.argv) == 2:
file_to_process = sys.argv[1]
else:
print("please provide a file as an argument")
exit(1)
all_sequences = []
with open(file_to_process, "r", encoding="UTF-8") as f:
for line in f:
line = line.strip()
if line and not line.startswith("INFO"):
seq = line.split()
#remove U.
if seq[0].startswith("U"):
seq[0] = "(%s)" % seq[0]
if seq[-1].startswith("U"):
seq[-1] = "(%s)" % seq[-1]
all_sequences.append([seq, eval_sequence(seq)])
for (alg, score) in sorted(all_sequences, key = lambda x:x[1]):
print("score=%.2f qtm=%d htm=%d\n%s" % (score, qtm(alg), htm(alg), " ".join(alg)))
|
kuba97531/kubesolver
|
src/py/sort_algs.py
|
sort_algs.py
|
py
| 1,942 |
python
|
en
|
code
| 4 |
github-code
|
6
|
70063293948
|
import socket
import tkinter as tk
from tkinter import *
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect to the server
host = '10.0.65.12'
port = 5556
s.connect((host, port))
print('Connected to the server')
name = input("What's your name: ")
print(name)
s.send(str.encode(name))
flag = s.recv(1042).decode()
if flag:
root = Tk()
root.title("21 Dares")
canvas = Canvas(width=800,height=250)
root.geometry("800x600")
para="Basically, a online multiplayer game where players will take turns saying numbers 1,2 or\n 3 and it will add up to the main thing, the person whose number reaches 21 has to do a dare.\n Dare will ask that person to turn on the webcam and the other players will ask that person to \ndo a dare. In 3 mins that person has to do the dare. "
label_head = tk.Label(root, text="Welcome To 21 Dares",font=('Arial,',40),foreground='gold3')
label_head.pack()
canvas.create_rectangle(140,70,310,240,outline ="black",fill ="white",width = 2)
canvas.create_rectangle(330,70,500,240,outline ="black",fill ="white",width = 2)
canvas.create_rectangle(520,70,690,240,outline ="black",fill ="white",width = 2)
canvas.pack()
def start_game():
root.destroy()
root2 = Tk()
root2.title("21 Dares")
root2.geometry("1500x800")
w= root2.winfo_screenwidth()
h= root2.winfo_screenheight()
canvs2 = tk.Canvas(root2, height=900, width=1500)
count_bx = canvs2.create_rectangle(600,50,850,300,fill="DarkSeaGreen1")
ply1 = canvs2.create_oval(600,350,850,600,fill="lavender blush1")
ply1_lb = canvs2.create_text(725,475,text="0",font=('Calibri',90))
ply2 = canvs2.create_oval(900,350,1150,600,fill="lavender blush1")
ply2_lb = canvs2.create_text(1025,475,text="0",font=('Calibri',90))
ply3 = canvs2.create_oval(300,350,550,600,fill="lavender blush1")
ply3_lb = canvs2.create_text(425,475,text="0",font=('Calibri',90))
lb_count = canvs2.create_text(720,180,text="0",font=('Calibri',90))
bt_1=tk.Button(root2,text="1",font=('Calibri',25),foreground='black',background="light goldenrod yellow")
bt_1.place(x=420, y=700)
bt_2=tk.Button(root2,text="2",font=('Calibri',25),foreground='black',background="light goldenrod yellow")
bt_2.place(x=720, y=700)
bt_3=tk.Button(root2,text="3",font=('Calibri',25),foreground='black',background="light goldenrod yellow")
bt_3.place(x=1020, y=700)
canvs2.pack()
root2.mainloop()
label_desp = tk.Label(root,text=para,font=('Arial',20),foreground='dark olive green')
label_desp.pack(padx=5,pady=120)
start_button=tk.Button(root,text="Start the game",font=('Calibri',35),foreground='sandy brown',command = start_game)
start_button.pack(padx=5,pady=20)
root.mainloop()
# Close the socket
s.close()
|
CrazyKanav/21_Dares
|
DKINTER/client.py
|
client.py
|
py
| 2,924 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21341173003
|
import torch
from torch.optim import SGD
import torch.nn.functional as F
from sklearn.metrics import accuracy_score
from models_torch.FFM import FFM_Layer
from utils.load_data import load_criteo_data
if __name__ == '__main__':
(X_train, y_train), (X_test, y_test), feature_info = load_criteo_data('dataset/criteo_sample.csv',
sparse_return='category')
X_train = torch.tensor(X_train, dtype=torch.float32)
X_test = torch.tensor(X_test, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.float32)
y_test = torch.tensor(y_test, dtype=torch.float32)
# 参数
k = 8
n_epoch = 10
lr = 0.01
# 初始化
model = FFM_Layer(dense_features=feature_info['dense_feature'], sparse_features=feature_info['sparse_feature'],
sparse_feature_dim=feature_info['max_one_hot_dim'], k=k)
optim = SGD(lr=lr, params=model.parameters(), weight_decay=1e-4)
criterion = F.binary_cross_entropy
# 训练模型
for epoch in range(n_epoch):
model.train()
logits = torch.reshape(model(X_train), (-1,))
loss = criterion(logits, y_train)
# 更新权重
optim.zero_grad() # 清除累计梯度
loss.backward()
optim.step()
if epoch % 1 == 0 and epoch:
print('epoch: {}, loss: {}'.format(epoch, loss))
# 模型评估
model.eval()
with torch.no_grad():
pred = torch.reshape(model(X_test), (-1,))
loss = criterion(pred, y_test)
pred = [1 if x > 0.5 else 0 for x in pred]
print('acc: {}, loss: {}'.format(accuracy_score(y_test, pred), loss))
|
KrianJ/CtrEstimate
|
predict_ffm_torch.py
|
predict_ffm_torch.py
|
py
| 1,739 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10695567948
|
import subprocess
from multiprocessing import Pool
import os
import numpy as np
import sys
def Thread(arg):
print(arg)
file = open('output/' + str(0) + '.log', 'w')
subprocess.call(arg, shell=True, stdout=file)
def main():
seed = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
batch = np.array([10, 50, 100, 200])
# batch = batch.repeat(9)
batch = np.tile(batch, 9)
hidden = np.array([25, 50, 100])
hidden = hidden.repeat(4)
hidden = np.tile(hidden, 3)
optim = {0: 'adam', 1: 'adagrad', 2: 'adadelta', 3: 'sgd'}
op_idx = np.array([0, 1, 2, 3])
op_idx = op_idx.repeat(12)
lr = np.array([0.1, 0.01, 0.001])
ed_pass = np.array([4, 8, 10])
idx = [x for x in range(36)]
arglist = []
st = int(sys.argv[1])
print(st)
end = int(sys.argv[2])
print(end)
for i in range(st, end):
opt_st = optim[op_idx[i]]
pcmd = "python dt_pl_parser.py --train data/wsj10_tr --tag_num 1 --hidden " + str(
hidden[i]) + " " + "--batch " + str(
batch[i]) + " " + "--optim " + opt_st + " " + "--do_eval --use_trigram " + "--sample_idx " + str(idx[i])
arglist.append(pcmd)
print(pcmd)
p = Pool(4)
p.map(Thread, arglist, chunksize=1)
p.close()
p.join()
if __name__ == '__main__':
main()
|
mikufan/NCRFAE_DepParsing
|
noderun_pl_model.py
|
noderun_pl_model.py
|
py
| 1,323 |
python
|
en
|
code
| 3 |
github-code
|
6
|
43193622036
|
#!/usr/bin/env python
import rospy
import smach
from mavros_msgs.msg import WaypointList
from std_msgs.msg import Bool,String
from PrintColours import *
#from aerialcore_common.srv import ConfigMission, ConfigMissionResponse
#
# def mission_callback(req):
# rospy.loginfo(" /mission/new service was called")
# return ConfigMissionResponse(success=True)
class GCSConnection(smach.State):
def __init__(self,autopilot,uav_id):
smach.State.__init__(
self, outcomes=['mission_upload', 'shutdown'])
#rospy.Service('uav_2/mission/new', ConfigMission, mission_callback)
self.autopilot = autopilot
self.uav_id = uav_id
def execute(self, ud):
rospy.loginfo('[GCSconnection] - GCSconnection state')
#rospy.set_param("/uav_{}_sm/autopilot".format(self.uav_id),"GCSconnection") changed
airframe_pub = rospy.Publisher("/uav_{}_sm/com/airframe_type".format(self.uav_id), String, queue_size=10)
mission_state_pub = rospy.Publisher("/uav_{}_sm/com/mission_state".format(self.uav_id), String, queue_size=10)
if self.autopilot == "px4":
airframe = self.autopilot + "/vtol"
if self.autopilot == "dji":
airframe = self.autopilot + "/M210"
# transition to gcs_connection state
while not rospy.is_shutdown():
airframe_pub.publish(airframe)
mission_state_pub.publish("uav_{} connected to the GCS".format(self.uav_id))
if self.autopilot == "px4":
waypointList_msg = rospy.wait_for_message("/uav_{}/mavros/mission/waypoints".format(self.uav_id), WaypointList)# modify
rospy.loginfo(CBLUE+"There are %d waypoints in the mission"+CEND, len(waypointList_msg.waypoints))
if len(waypointList_msg.waypoints) > 0:
rospy.loginfo("Vehicle with PX4 with MISSION LOADED has %d waypoints", len(waypointList_msg.waypoints))
return 'mission_upload'
else:
rospy.loginfo("Vehicle with PX4 have no mission loaded")
rospy.sleep(1)
elif self.autopilot == "dji":
rospy.loginfo(CBLUE+"Vehicle with DJI"+CEND)
cmd_msg = rospy.wait_for_message("/uav_{}/dji_sm/upload_mission".format(self.uav_id), Bool)
if cmd_msg.data == True:
rospy.loginfo(CBLUE+"Vehicle with DJI has mission loaded"+CEND)
return 'mission_upload'
# if (True):# Modify
# rospy.sleep(2)
# return 'mission_upload'
return 'shutdown'
|
miggilcas/muav_state_machine
|
scripts/AgentStates/gcs_connection.py
|
gcs_connection.py
|
py
| 2,658 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3121294529
|
import os
import sys
import time
from functools import partial
from multiprocessing import Pool
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('window-size=1920,1080')
chrome_options.add_argument('start-maximised')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
def scroll_to_page_end_n_times(browser, s, page_load_wait_seconds):
for _ in range(s):
print(f'ScrollHeight before scrolling: {browser.execute_script("return document.documentElement.scrollHeight")}')
browser.execute_script('window.scrollTo(0, document.body.scrollHeight);')
print(f'Scrolled, waiting for {page_load_wait_seconds} seconds to load page')
time.sleep(page_load_wait_seconds)
print(f'ScrollHeight after scrolling: {browser.execute_script("return document.documentElement.scrollHeight")}')
return
def links_collection(main_page_link, num_links, start_at_link_num, scroll_limit, page_load_wait_seconds, element_load_wait_seconds):
print('Working in background...')
n = 0
s = 0
link_num = start_at_link_num
links_dict = {}
chrome_driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=chrome_options)
with chrome_driver as browser:
browser.get(main_page_link)
while n < num_links:
try:
link_path = '/html/body/div[1]/div/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div[2]/div/div/div/div/div/div/div[' + str(
link_num) + ']/a/div[1]/div/div/div/div/div[2]/div[1]/div/div/div/div[2]/span/span/object/a'
page = WebDriverWait(browser, element_load_wait_seconds).until(EC.visibility_of_element_located((By.XPATH, link_path)))
event_url_long = page.get_attribute('href')
event_url_cutoff = event_url_long[event_url_long.find('events/') + 7:].find('/')
event_url = event_url_long[:event_url_long.find('events/') + 7 + event_url_cutoff + 1]
event_title = page.text
print(f'Fetching link for event: {event_title}')
links_dict[event_title + str(n)] = event_url
link_num += 1
n += 1
except Exception as e:
# print(f'links_collection exception:\n{e}')
if s < scroll_limit:
s += 1
scroll_to_page_end_n_times(browser, s, page_load_wait_seconds)
else:
break
print(f'\nNumber of links: {str(len(links_dict.items()))}')
return links_dict
def get_location(browser, element_load_wait_seconds):
try:
link_path = '/html/body/div[1]/div/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div[2]/div/div/div[1]/div[1]/div[2]/div/div[2]/div/div[1]/div/div/div[3]'
page = WebDriverWait(browser, element_load_wait_seconds).until(EC.visibility_of_element_located((By.XPATH, link_path)))
e_location = page.text
except Exception as e:
e_location = 'n/a'
#print(f'get_location exception:\n{e}')
return e_location
def get_datetime(browser, element_load_wait_seconds):
try:
link_path = '/html/body/div[1]/div/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div[2]/div/div/div[1]/div[1]/div[2]/div/div[2]/div/div[1]/div/div/div[1]/span'
page = WebDriverWait(browser, element_load_wait_seconds).until(EC.visibility_of_element_located((By.XPATH, link_path)))
e_datetime_str = page.text
except Exception as e:
e_datetime_str = 'n/a'
# print(f'get_datetime exception:\n{e}')
return e_datetime_str
def get_host_and_num_people_responded(browser, element_load_wait_seconds):
try:
link_path = '/html/body/div[1]/div/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div[2]/div/div/div[2]/div/div/div/div/div[1]/div/div/div'
page = WebDriverWait(browser, element_load_wait_seconds).until(EC.visibility_of_element_located((By.XPATH, link_path)))
children = page.find_elements(By.XPATH, './child::*')
e_host = ''
e_num_people_responded = ''
for child in children:
textContent = child.text
if 'Event by' in textContent:
e_host = textContent[textContent.find('Event by') + 9:]
elif 'people responded' in textContent:
e_num_people_responded = textContent[:textContent.find('people responded')].strip()
elif 'person responded' in textContent:
e_num_people_responded = textContent[:textContent.find('person responded')].strip()
else:
pass
if e_host == '':
e_host = 'n/a'
if e_num_people_responded == '':
e_num_people_responded = 'n/a'
except Exception as e:
e_host = 'n/a'
e_num_people_responded = 'n/a'
# print(f'get_host_and_num_people_responded exception:\n{e}')
return e_host, e_num_people_responded
def get_description(browser, element_load_wait_seconds):
try:
link_path = '/html/body/div[1]/div/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div[2]/div/div/div[2]/div/div/div/div/div[1]/div/div/div/div[last()]/div/span'
page = WebDriverWait(browser, element_load_wait_seconds).until(EC.visibility_of_element_located((By.XPATH, link_path)))
children = page.find_elements(By.XPATH, './child::*')
for child in children:
try:
see_more_btn = child.find_element(By.XPATH, "./div[@role='button']")
see_more_btn.click()
except:
pass
children = page.find_elements(By.XPATH, './child::*')
e_description = ''
for child in children:
e_description += child.text
e_description += '\n'
if 'See less' in e_description:
e_description = e_description[:e_description.find(' See less')]
elif 'See more' in e_description:
e_description = e_description[:e_description.find('... See more')]
else:
pass
if e_description == '':
e_description = 'n/a'
except Exception as e:
e_description = 'n/a'
# print(f'get_description exception:\n{e}')
return e_description
def get_image_url(browser, element_load_wait_seconds):
try:
link_path = '/html/body/div[1]/div/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div[2]/div/div/div[1]/div[1]/div[1]/div/div/div[2]/div/a/div/div/div/div/div/img'
page = WebDriverWait(browser, element_load_wait_seconds).until(EC.visibility_of_element_located((By.XPATH, link_path)))
img_path = page.get_attribute('src')
except Exception as ignore:
try:
link_path = '/html/body/div[1]/div/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div[2]/div/div/div[1]/div[1]/div[1]/div/div/div[2]/div/a/div/div/div/div/img'
page = WebDriverWait(browser, element_load_wait_seconds).until(EC.visibility_of_element_located((By.XPATH, link_path)))
img_path = page.get_attribute('src')
except Exception as e:
img_path = 'n/a'
# print(f'get_image_url exception:\n{e}')
return img_path
def crawl_links(element_load_wait_seconds, current_link):
e_location = 'n/a'
e_datetime = 'n/a'
e_host = 'n/a'
e_num_people_responded = 'n/a'
e_description = 'n/a'
img_path= 'n/a'
try:
chrome_driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=chrome_options)
with chrome_driver as browser:
print(f'Crawling on: {current_link}')
# Using Selenium
browser.get(current_link)
e_location = get_location(browser, element_load_wait_seconds)
e_datetime = get_datetime(browser, element_load_wait_seconds)
e_host, e_num_people_responded = get_host_and_num_people_responded(browser, element_load_wait_seconds)
e_description = get_description(browser, element_load_wait_seconds)
img_path = get_image_url(browser, element_load_wait_seconds)
except Exception as e:
print(f'crawl_links exception:\n{e}') # if the links are not found in a page, print exception
return e_location, e_host, e_num_people_responded, e_datetime, e_description, img_path
def main(dict):
num_links = dict['num_links']
start_at_link_num = dict['start_at_link_num']
scroll_limit = dict['scroll_limit']
page_load_wait_seconds = dict['page_load_wait_seconds']
element_load_wait_seconds = dict['element_load_wait_seconds']
event_search_keyword = dict['event_search_keyword']
main_page_link = f'https://www.facebook.com/events/search/?q={event_search_keyword}'
pool_size = dict['pool_size']
links_dict = links_collection(main_page_link, num_links, start_at_link_num, scroll_limit, page_load_wait_seconds, element_load_wait_seconds)
print('\nInitiating scraping...')
#pool = Pool(processes=pool_size) # creates pool of n processes at a time
#func = partial(crawl_links, element_load_wait_seconds)
#e_details_list = pool.map(func, list(links_dict.values())) # maps the function crawl_links (with arg element_load_wait_seconds) with the links_dict.items() input
e_details_list = [crawl_links(element_load_wait_seconds, link) for link in list(links_dict.values())]
return_dict = { 'payload' : [] }
e_details_labels = ['location', 'host', 'numPeopleResponded', 'datetime', 'details', 'imgPath']
for (e_name, e_link), e_details in zip(links_dict.items(), e_details_list):
event_dict = {}
event_dict['link'] = e_link
event_dict['name'] = e_name[:-1]
for e_detail_item_label, e_detail_item in zip(e_details_labels, e_details):
event_dict[e_detail_item_label] = e_detail_item
return_dict['payload'].append(event_dict)
return return_dict
|
davi1972/greener-app
|
greener-scraper/greener-scraper-cli.py
|
greener-scraper-cli.py
|
py
| 9,583 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18216861751
|
#Fall2019W7
#C-Elevator Trouble
def failMsg():
print("use the stairs")
def main():
params = input().split()
f = int(params[0])
s = int(params[1])
g = int(params[2])
u = int(params[3])
d = int(params[4])
floorDif = g - s
curr = s
buttonPresses = 0
if (floorDif % 2 == 1 and u % 2 == 0 and d % 2 == 0):
failMsg()
#incomplete
#use breadth first search
#queues or dictionaries??
|
andrew-qu2000/Programming-Club
|
Poly Programming Club/Fall2019W7C.py
|
Fall2019W7C.py
|
py
| 450 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41146228063
|
from flask import Flask, g, render_template, request, send_from_directory, url_for
import sqlite3, os, datetime
from werkzeug.utils import redirect, secure_filename
SITENAME = 'SaLeeMas - PicShare'
# Définir le dossier dans lequel les photos
# vont petre uploadés
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
# Attention, à préciser le répertoire local !
DATABASE = 'app.db'
# On définit une variable globale qui rendra
# nos fichiers accesssibles même via les templates
# récupéré de la doc FLASK
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# Se connecter à la DB (code récupéré de la doc FLASK)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
# La fonction pour spécifier les types de fichier autorisés
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# La route de la homepage
@app.route("/", methods=["GET", "POST"])
def show_pictures():
db = get_db()
print("All categories")
if request.method == 'GET':
categories = db.execute("SELECT name from categories order by id")
pictures = db.execute("SELECT id, title, filename \
from pictures order by upload_date desc")
# print(pictures.fetchall())
return render_template("index.html",
all_pictures=pictures, all_categories=categories)
# La route de la homepage avec la categorie name en argument
@app.route("/<category>", methods=["GET", "POST"])
def show_category_pictures(category):
db = get_db()
if request.method == 'GET':
# print("Chosen category", category)
categories = db.execute("SELECT name from categories order by id")
if category:
print("category", category)
pictures = db.execute("SELECT pictures.id, title, filename \
from pictures left join categories \
on category_id = categories.id \
where categories.name = (?) \
order by upload_date desc", [category])
# print(pictures.fetchall())
return render_template("index.html",
all_pictures=pictures,
all_categories=categories,
chosen_category=category)
# La route du chemin d'accès à l'image à renvoyer, avec
# le nom du répertoire "uploads/", suivi du nom du fichier image
@app.route('/uploads/<filename>')
def download_file(filename):
print("send_from_directory",
send_from_directory(app.config["UPLOAD_FOLDER"], filename))
return send_from_directory(app.config["UPLOAD_FOLDER"], filename)
# La route de la page upload
@app.route("/upload", methods=["GET", "POST"])
def upload():
db = get_db()
categories_cursor = db.execute("select name from categories order by id;")
categories_name = categories_cursor.fetchall()
print("I am the result of your query: ", categories_name)
list_of_categories = []
for category in categories_name:
name = category[0]
list_of_categories.append(name)
print("i am the list of cat : ", list_of_categories)
if request.method == 'POST':
file = request.files['file']
print("I am the files.filename : ", file.filename)
if allowed_file(file.filename):
filename = secure_filename(file.filename) # c'est le path
title = request.form.get("title")
description = request.form.get("description")
print(description, " - ", request.form.get("description"))
category = request.form.get("category")
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
upload_date = datetime.datetime.now()
# print("datetime.datetime.now()", datetime.datetime.now())
# sauvegarder le fichier dans la DB
db = get_db()
if category:
cursor1 = db.execute("SELECT id from categories \
where name = ?", [category])
category_id = cursor1.fetchone()
# print(category_id[0])
db.execute("INSERT into pictures (title, filename, upload_date, category_id, description) \
values (? , ? , ? , ? , ? )",
[title, filename, upload_date, int(category_id[0]), description])
# # vérifier si le titre existe déjà
# cursor_title = db.execute(
# "SELECT id, title FROM pictures WHERE title = (?)", [title])
# print("I'am the cursor: ", cursor_title)
# # On l'enregistre dans une variable et on l'affiche
# # avec fetchone, si le résultat n'est pas None on retourne 404
# title_request = cursor_title.fetchone()
# print("hey I'm the request title ", title_request)
# if title_request is not None: abort(404)
db.commit()
return render_template("picture_uploaded.html")
return render_template('upload.html', list_of_categories=list_of_categories)
# La route de la page picture
@app.route("/picture/<picture_id>", methods=["GET", "POST"])
def picture_id(picture_id):
if picture_id and request.method == 'POST':
comment = request.form.get("comment")
# print("I am the comment :", comment)
comment_date = datetime.datetime.now()
# print("datetime.datetime.now()", comment_date)
# sauvegarder le fichier dans la DB
db = get_db()
db.execute("INSERT into comments (comment, comment_date, picture_id) \
values (? , ? , ?)",
[comment, comment_date, picture_id])
db.commit()
if picture_id and request.method == 'GET':
# print("I am the id of the chosen picture :", picture_id)
db = get_db()
pictures = db.execute("SELECT title, filename, upload_date, description, categories.name \
from pictures inner join categories \
on category_id = categories.id \
where pictures.id = (?)", [picture_id])
# print(pictures)
comments = db.execute("SELECT comment, comment_date \
from comments inner join pictures \
on picture_id = pictures.id \
where pictures.id = (?) \
order by comment_date desc", [picture_id])
# print(comments)
return render_template("picture.html",
all_pictures=pictures,
all_comments=comments)
# print("not picture_id")
return redirect("/picture/" + picture_id)
if __name__ == "__main__":
app.run(debug=True)
|
Sabrina-MORSLI/PicShare
|
picshare/run.py
|
run.py
|
py
| 7,210 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17522204148
|
import json
import sqlite3
from urllib import response
from fastapi.testclient import TestClient
import time
import pytest
from main import app, conn, c
from models import AtualizarFilme, AtualizarPlaneta, Filme, Planeta, Excluido, InserirPlaneta
client = TestClient(app)
# def test_create_schema():
# c.executescript("""
# BEGIN TRANSACTION;
# DROP TABLE IF EXISTS "Filme";
# CREATE TABLE IF NOT EXISTS "Filme" (
# "id" INTEGER NOT NULL,
# "Nome" TEXT NOT NULL,
# "Data_de_lancamento" TEXT NOT NULL,
# "Excluido" INTEGER NOT NULL,
# PRIMARY KEY("id")
# );
# DROP TABLE IF EXISTS "Planeta";
# CREATE TABLE IF NOT EXISTS "Planeta" (
# "id" INTEGER NOT NULL,
# "Nome" TEXT NOT NULL,
# "Clima" TEXT NOT NULL,
# "Diametro" INTEGER NOT NULL,
# "Populacao" INTEGER NOT NULL,
# "Excluido" INTEGER NOT NULL,
# PRIMARY KEY("id")
# );
# DROP TABLE IF EXISTS "Planeta_Apareceu_Filme";
# CREATE TABLE IF NOT EXISTS "Planeta_Apareceu_Filme" (
# "id" INTEGER NOT NULL UNIQUE,
# "PlanetaID" INTEGER NOT NULL,
# "FilmeID" INTEGER NOT NULL,
# "Excluido" INTEGER NOT NULL,
# PRIMARY KEY("id" AUTOINCREMENT)
# );
# INSERT INTO "Filme" VALUES (1,'A morte do jedi','2020-04-23 10:20:30.400000+02:30',0);
# INSERT INTO "Filme" VALUES (2,'O jedi não morreu','2021-04-23 10:20:30.400000+02:30',0);
# INSERT INTO "Filme" VALUES (3,'O jedi nunca morreu','1970-01-01 00:33:41+00:00',0);
# INSERT INTO "Filme" VALUES (4,'Ou será que morreu?','1970-01-01 00:33:41+00:00',0);
# INSERT INTO "Filme" VALUES (5,'Não morreu, eu sabia!','2032-04-23 10:20:30.400000+02:30',0);
# INSERT INTO "Planeta" VALUES (1,'Marte','vento',55,66,0);
# INSERT INTO "Planeta" VALUES (2,'Marte 2','vento',10000,564612,0);
# INSERT INTO "Planeta" VALUES (3,'Planeta Voador','string',787878,152314856,0);
# INSERT INTO "Planeta" VALUES (5,'Nao lembro','murky',5489645,5164,0);
# INSERT INTO "Planeta" VALUES (6,'Planetoide','string',48654,1,1);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (1,1,1,0);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (2,1,2,0);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (3,2,2,0);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (4,6,1,1);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (5,6,2,1);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (10,6,3,1);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (11,6,4,1);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (12,6,5,1);
# COMMIT;
# """)
def test_read_root():
response = client.get('/')
assert response.status_code == 200
assert response.json() == {'Hello,': 'World!'}
def test_read_planets_error_without_bool():
response = client.get('/api/v1/planets')
#Sem ?show_deleted=true
assert response.status_code == 422
def test_read_planets_deleted_true():
response = client.get('/api/v1/planets?show_deleted=true')
assert response.status_code == 200
print(type(response.json()))
# assert response.json() == [
# {
# "id": 1,
# "Nome": "Marte",
# "Clima": "vento",
# "Diametro": 55,
# "Populacao": 66,
# "Excluido": 0,
# "Filmes_em_que_apareceu": [
# 1,
# 2
# ]
# },
# {
# "id": 2,
# "Nome": "Marte",
# "Clima": "vento",
# "Diametro": 55,
# "Populacao": 66,
# "Excluido": 0,
# "Filmes_em_que_apareceu": [
# 2
# ]
# },
# {
# "id": 3,
# "Nome": "sexomaluco",
# "Clima": "string",
# "Diametro": 0,
# "Populacao": 0,
# "Excluido": 1,
# "Filmes_em_que_apareceu": []
# },
# {
# "id": 5,
# "Nome": "string",
# "Clima": "murky",
# "Diametro": 0,
# "Populacao": 0,
# "Excluido": 1,
# "Filmes_em_que_apareceu": []
# },
# {
# "id": 6,
# "Nome": "string",
# "Clima": "string",
# "Diametro": 0,
# "Populacao": 0,
# "Excluido": 1,
# "Filmes_em_que_apareceu": [
# 1,
# 2,
# 3,
# 4,
# 5
# ]
# }
# ]
def test_read_planets_deleted_false():
response = client.get('/api/v1/planets?show_deleted=false')
assert response.status_code == 200
def test_read_planet():
response = client.get('/api/v1/planets/1')
assert response.status_code == 200
assert response.json() == {
"id": 1,
"Nome": "Marte",
"Clima": "vento",
"Diametro": 55,
"Populacao": 66,
"Excluido": 0,
"Filmes_em_que_apareceu": [
1,
2
]
}
def test_create_planet_movie_doesnt_exist():
json={
"id": 61,
"Nome": "string",
"Diametro": 0,
"Populacao": 0,
"FilmesID": [
0
],
"Excluido": 1
}
response = client.post('/api/v1/planets',
json=json
)
assert response.status_code == 400
#assert response.json() == {"detail": 'Pelo menos um dos filmes inseridos não existe',}
# def test_create_planet():
# json={
# "id": 44,
# "Nome": "teste",
# "Diametro": 0,
# "Populacao": 0,
# "FilmesID": [
# 1
# ],
# "Excluido": 0
# }
# response = client.post('/api/v1/planets',
# json=json
# )
# assert response.status_code == 200
|
MarceloTerra0/FastAPI_TesteTuring
|
test_main.py
|
test_main.py
|
py
| 5,453 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27465756937
|
import keras.backend as K
import tensorflow as tf
import cv2
import imageio
import numpy as np
def square_sum(x):
return K.sum(K.square(x), axis=-1, keepdims=True)
def euclSq(x):
x, y = x
x = K.batch_flatten(x)
y = K.batch_flatten(y)
return square_sum(x - y)
def l2_normalize(x):
inv_sqrt = 1. / K.sqrt(K.maximum(square_sum(x), 1e-6))
return x * inv_sqrt
def gram_matrix(x):
filters = x.shape[3]
size = x.shape[1]
V = K.reshape(x, (-1, size * size, 1, filters))
V = K.permute_dimensions(V, (0, 3, 2, 1))
VT = K.permute_dimensions(V, (0, 2, 1, 3))
return K.sum(V * VT, axis=3)
def triplet_loss(x):
return K.maximum(x[0] - x[1] + 1, 0)
def gram(x):
m, n = map(int, x.shape[2:])
G = gram_matrix(x)
return G / (4 * m**2 * n**2)
def get_image(filepath):
with open(filepath, 'rb') as f:
img = imageio.imread(f)
img = crop_resize(img)
return np.clip(img / 255, 0, 1)
def crop_resize(img):
height, width = img.shape[:2]
if height > width:
center = height // 2
up = center - width // 2
down = center + width // 2
img = img[up:down, :, :]
elif height < width:
center = width // 2
left = center - height // 2
right = center + height // 2
img = img[:, left:right, :]
img = cv2.resize(img, (256, 256), cv2.INTER_LANCZOS4)
return img
|
ebatuhankaynak/DeepPotato
|
src/util.py
|
util.py
|
py
| 1,405 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39939937920
|
from mpl_toolkits.mplot3d import axes3d
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
import plotly.graph_objects as go
import plotly.express as px
import publico as func
pd.options.mode.chained_assignment = None # default='warn'
from dateutil import parser
def MediaFileRede(res_select, interval_time=5):
res_select.drop_duplicates(subset=None, keep="first", inplace=True)
# cria campos
res_select['Timer2'] = 0
res_select['Media2'] = 0.0
velo_total = 0.0
count=0
timer_atual = 0.0
timer_ant = 0.0
elapset_atual= 0.0
elapset_cumulativo = 0.0
count_timer=interval_time
for index, row in res_select.iterrows():
timer_atual = row['Tempo']
if (timer_ant!=0.0):
elapset_atual = float(row['Tempo']) - float(timer_ant)
# print(abs(elapset_atual))
elapset_cumulativo+=float(elapset_atual)
if ((elapset_cumulativo >= interval_time)):
# print('Chegou')
# break
media_velo = velo_total / count
res_select.at[index,"Media2"] = media_velo
res_select.at[index,"Timer2"] = count_timer
elapset_cumulativo=0.0
timer_ant = 0.0
velo_total=0.0
media_velo=0.0
count=0
count_timer+=interval_time
if (timer_atual != timer_ant):
timer_ant = timer_atual
velo_total = velo_total + row['Download']
count+=1
# remove zeros
res_select = res_select[(res_select['Timer2']!=0) & (res_select['Timer2']<=280) & (res_select['Media2']<300) ]
return res_select
EXP="70"
print("Loading Dataframe...")
# BASELINE GERAL ***************************************************
df1 = pd.read_csv("../repositorio/" + EXP + "/REDE_GERAL.csv")
df1['Download'] = df1['Download'].astype(float)
df1['Upload'] = df1['Upload'].astype(float)
df1['Tempo'] = df1['Tempo'].astype(float)
df1['Source'] = "BASELINE"
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
df1_select = df1[['Download', 'Source', 'Tempo']]
df1_select = MediaFileRede(df1_select)
# *************************************************************************
# BASELINE 1TO 2 **********************************************************
df2 = pd.read_csv("../repositorio/" + EXP + "/REDE_BASELINE_1TO2.csv")
df2['Download'] = df2['Download'].astype(float)
df2['Upload'] = df2['Upload'].astype(float)
# df2['Duracao'] = df2['Duracao'].astype(float)
df2['Tempo'] = df2['Tempo'].astype(float)
# df2['Bytes'] = df2['Bytes'].astype(float)
df2['Source'] = "1TO2"
# df4_filtro = 7df4.loc[(df4['Bytes'] > 0)]
df2_select = df2[['Download', 'Source', 'Tempo']]
df2_select = MediaFileRede(df2_select)
#********************************************************************
print("Loading Dataframe...")
# BASELINE RANDOM **********************************************************
df3 = pd.read_csv("../repositorio/" + EXP + "/REDE_BASELINE_RANDOM.csv")
df3['Download'] = df3['Download'].astype(float)
df3['Upload'] = df3['Upload'].astype(float)
# df3['Duracao'] = df3['Duracao'].astype(float)
df3['Tempo'] = df3['Tempo'].astype(float)
# df3['Bytes'] = df3['Bytes'].astype(float)
df3['Source'] = "RAND"
# df4_filtro = df4.loc[(df4['Bytes'] > 0)]
df3_select = df3[['Download', 'Source', 'Tempo']]
df3_select = MediaFileRede(df3_select)
#********************************************************************
print("Loading Dataframe...")
# BASELINE THRESHOLD **********************************************************
df4 = pd.read_csv("../repositorio/" + EXP + "/REDE_BASELINE_THRESHOLD.csv")
df4['Download'] = df4['Download'].astype(float)
df4['Upload'] = df4['Upload'].astype(float)
# df4['Duracao'] = df4['Duracao'].astype(float)
df4['Tempo'] = df4['Tempo'].astype(float)
# df4['Bytes'] = df4['Bytes'].astype(float)
df4['Source'] = "LIM-5"
# df4_filtro = df4.loc[(df4['Bytes'] > 0)]
df4_select = df4[['Download', 'Source', 'Tempo']]
df4_select = MediaFileRede(df4_select)
#********************************************************************
print("Loading Dataframe...")
# DBSCAN **********************************************************
df5 = pd.read_csv("../repositorio/" + EXP + "/REDE_DBSCAN.csv")
df5['Download'] = df5['Download'].astype(float)
df5['Upload'] = df5['Upload'].astype(float)
df5['Tempo'] = df5['Tempo'].astype(float)
df5['Source'] = "DBSCAN"
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
df5_select =df5[['Download', 'Source', 'Tempo']]
df5_select = MediaFileRede(df5_select)
#********************************************************************
# # # DBSCAN FILTER **********************************************************
# # df6 = pd.read_csv("../repositorio/" + EXP + "/REDE_DBSCAN_FILTER.csv")
# # df6['Download'] = df6['Download'].astype(float)
# # df6['Upload'] = df6['Upload'].astype(float)
# # df6['Duracao'] = df6['Duracao'].astype(float)
# # df6['STime'] = df6['STime'].astype(float)
# # df6['Bytes'] = df6['Bytes'].astype(float)
# # df6['Source'] = "DBSCAN - FILTER"
# # df6_filtro = df6.loc[(df6['Bytes'] > 0)]
# # df6_select = df6_filtro[['Upload','Bytes','Source', 'STime','Duracao']]
# # df6_select = MediaFileRede(df6_select)
# # #********************************************************************
# XMEANS **********************************************************
df7 = pd.read_csv("../repositorio/" + EXP + "/REDE_XMEANS.csv")
df7['Download'] = df7['Download'].astype(float)
df7['Upload'] = df7['Upload'].astype(float)
# df7['Duracao'] = df7['Duracao'].astype(float)
df7['Tempo'] = df7['Tempo'].astype(float)
# df7['Bytes'] = df7['Bytes'].astype(float)
df7['Source'] = "XMEANS"
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
df7_select =df7[['Download', 'Source', 'Tempo']]
df7_select = MediaFileRede(df7_select)
#********************************************************************
print("Loading Chart...")
# res = pd.concat([df1_select,df5_select,df7_select], sort=False)
# res = pd.concat([df1_select,df2_select,df3_select,df4_select, df5_select,df7_select], sort=False)
res = pd.concat([df1_select,df2_select,df3_select,df4_select], sort=False)
fig = px.line(res.reset_index(), x="Timer2", y="Media2", color="Source", title='Network Traffic').for_each_trace(lambda t: t.update(name=t.name.replace("Source=","")))
# https://plotly.com/python/axes/
# https://plotly.com/python/line-charts/
# fig.update_layout(
# # title = "AnaliseAlgorithms ",
# yaxis = dict(
# # range=[0,9],
# # tick0=0, dtick=2.5,
# title_text='Upload Rate',
# ),
# xaxis = dict(
# title_text='Normalized Simulation Time (<i>i</i>)',
# ),
# )
fig.update_layout(
# title = "AnaliseAlgorithms ",
yaxis = dict(
# # range=[0,9],
# tick0=0, dtick=5,
title_text='Network Traffic',
),
font=dict(size=16),
xaxis = dict(
title_text='Normalized Simulation Time (<i>t</i>)',
),
# plot_bgcolor='rgba(0,1,0,0)' # 76 64=todos
legend=dict(
x=0.76,
y=1.1,
font=dict(size=16),
orientation='h'
),
# annotations=[dict(
# xref='paper',
# yref='paper',
# )
# ]
)
fig.show()
|
urbancomp/fogarch
|
FogLayer/visualization/chart3_old.py
|
chart3_old.py
|
py
| 7,553 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34213861281
|
import math
n = int(input())
for _ in range(n):
line = input()
k = int(math.sqrt(len(line)))
chunks = [line[i:i+k] for i in range(0, len(line), k)]
s = ""
for j in reversed(range(k)):
for chunk in chunks:
s += chunk[j]
print(s)
|
david-vinje/kattis-problems
|
Solutions/EncodedMessage.py
|
EncodedMessage.py
|
py
| 250 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3755394850
|
import asyncio
import traceback
from neptune_py.skeleton.skeleton import NeptuneServiceSkeleton
from neptune_py.skeleton.messager import (
NeptuneWriterBaseAbstract, NeptuneMessageType
)
import struct
import collections
class TLV:
_format = '!HI'
meta_size = struct.calcsize(_format)
tlv = collections.namedtuple('tlv_tuple', 'tag length')
MagicTag = 13
@classmethod
def pack(cls, tag, data):
return struct.pack(cls._format, tag, len(data)) + data
@classmethod
def unpack(cls, data):
if len(data) < cls.meta_size:
return None
tag, length = struct.unpack(cls._format, data)
return cls.tlv(tag=tag, length=length)
class TlvWriter(NeptuneWriterBaseAbstract):
def __init__(self, writer):
super().__init__()
self.writer = writer
self.closed = False
def write(self, message):
self.writer.write(TLV.pack(TLV.MagicTag, message))
def close(self):
if self.closed:
return
self.closed = True
if self.writer.can_write_eof():
self.writer.write_eof()
else:
self.writer.close()
class NeptuneTlvBase(NeptuneServiceSkeleton):
def __init__(self, host, port, messager_manager, name=None):
super().__init__(name)
self.host = host
self.port = port
self.messager_manager = messager_manager
self.messager_id = 0
async def connection_handler(self, reader, writer):
peername = writer.get_extra_info("peername")
self.get_logger().debug(f'{peername} connected')
messager_id = self.messager_id
tlv_writer = TlvWriter(writer)
self.messager_manager.on_connected(messager_id, tlv_writer)
self.messager_id += 1
try:
while True:
meta = await reader.readexactly(TLV.meta_size)
tlv = TLV.unpack(meta)
# print(tlv)
data = await reader.readexactly(tlv.length)
self.messager_manager.on_message(messager_id, data)
except asyncio.IncompleteReadError as e:
if e.partial:
# empty data indicates peer closed the connection, otherwise the data
# is illegal.
self.get_logger().debug(f'{peername} illegal data')
except Exception as e:
self.get_logger().error(traceback.format_exc())
finally:
self.get_logger().debug(f'{peername} closed')
self.messager_manager.on_disconnected(messager_id)
writer.close()
await writer.wait_closed()
def init(self):
self.get_logger().debug(f'init {self.__class__.__name__} {self.name}')
async def finish(self):
self.get_logger().debug(f'stopping {self.__class__.__name__} {self.name}...')
class NeptuneTlvService(NeptuneTlvBase):
"""
tlv message server
"""
async def logic(self):
# https://docs.python.org/3.6/library/asyncio-protocol.html
# 'Changed in version 3.6: The socket option TCP_NODELAY is now set by default.'
server = await asyncio.start_server(self.connection_handler, self.host, self.port)
async with server:
self.get_logger().debug(f'NeptuneTlvService {self.name} starts to server')
await server.serve_forever()
class NeptuneTlvClient(NeptuneTlvBase):
"""
tlv message client
"""
async def logic(self):
reader, writer = await asyncio.open_connection(self.host, self.port)
await self.connection_handler(reader, writer)
|
kstardust/neptune
|
neptune_py/skeleton/transporter/neptune_tlv.py
|
neptune_tlv.py
|
py
| 3,598 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18805702748
|
# 9 - Crie uma lista contendo 5 nomes e adicione esta lista dentro da lista gerada no exercício 4
import random
lista1 = ['Maria', 'João', 'Marcio', 'Marta', 'Ana']
lista2 = []
contador = 0
while contador < 10:
n = random.randint(10, 1580)
lista2.append(n)
contador += 1
lista2.append(lista1)
print(lista2)
|
chrystian-souza/exercicios_em_python
|
exerciciosAula4/exercicio09.py
|
exercicio09.py
|
py
| 326 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
44833012944
|
import socket
from time import sleep
TCP_IP = '192.168.1.103'
TCP_PORT = 5005
BUFFER_SIZE = 40 # Normally 1024, but we want fast response
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
c1 = ""
c2 = ""
print("INICIA SERVER")
conn, addr = s.accept()
print('Connection address:', addr)
while 1:
print("RECIBE")
data = conn.recv(BUFFER_SIZE)
longitud = len(data)
if longitud > 0:
c1 = data[0:5]
if longitud > 5:
c2 = data[5:10]
print("received data:", data)
if c1 == b'B_ACE' or c2 == b'B_ACE':
print("Hay que acelerar!")
if c1 == b'B_RET' or c2 == b'B_RET':
print("Hay que frenar!")
if c1 == b'EXIT':
print("CIERRA CONEXION")
conn.close()
break
else:
c1 = ""
c2 = ""
# data = bytes(str(BUFFER_SIZE))
conn.send(data) # echo
sleep(3)
|
juanmanuelramallo/Monster-Pi
|
Pruebas/server.py
|
server.py
|
py
| 924 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3286995844
|
from utility import classifier as cls
import numpy as np
import random
# action space 中的最后一个动作为终止
# 自己构建的环境
class MyEnv:
def __init__(self, state_size, max, data, classifier):
self.state_size = state_size
self.action_size = state_size + 1 # 包含一个终止动作
self.max = max # 最多选取max个特征,超出直接终止
self.data = data
self.classifier = classifier
self.dict = {}
self.reset()
def random_action(self):
while True:
action = random.randint(0, self.action_size - 1)
if action == self.action_size - 1 or self.state[action] == 0:
break
return action
def step(self, action_index):
if action_index == self.action_size - 1: # 终止
self.done = True
else:
self.state[action_index] = 1
self.count += 1
if self.count == self.max_count: # 已经到达选择数量上线
self.done = True
# reward 默认为0
# if current_count>self.max:
# reward = self.max - current_count
# else:
reward = self.get_reward()
if reward == -1:
# print("no flag")
reward = cls.get_reward(self.state, self.classifier, self.data, self.max_count)
self.add_dict(reward)
# reward = random.random()*100
return np.array(self.state), reward, self.done
def reset(self):
self.state = [0 for _ in range(self.state_size)]
self.max_count = min(self.max, self.state_size) # 最大特征数
self.count = 0 # 当前已经选取的特征数
self.done = False
return np.array(self.state)
def render(self):
print("This is me: {}".format(self.state))
def get_reward(self):
temp = [str(x) for x in self.state]
temp = '.'.join(temp)
reward = self.dict.get(temp, -1)
return reward
def add_dict(self, reward):
temp = [str(x) for x in self.state]
temp = '.'.join(temp)
self.dict[temp] = reward
|
jsllby/select-features
|
utility/env.py
|
env.py
|
py
| 2,162 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26531296671
|
from pyhpecfm import fabric
from lib.actions import HpecfmBaseAction
class fabricIpLookup(HpecfmBaseAction):
def run(self):
cfm_fabrics = fabric.get_fabric_ip_networks(self.client)
if isinstance(cfm_fabrics, list):
fabric_data = []
# Loop through cfm_fabrics and process IPZ
for fabip in cfm_fabrics:
desc = fabip['description']
if desc == '':
desc = 'HPE Composable Fabric'
out ={
'u_desc':desc,
'u_fabu_uid':fabip['fabric_uuid'],
'u_name':fabip['name'],
'u_mode':fabip['mode'],
'u_sub_address':fabip['subnet']['address'],
'u_mask_prefix':fabip['subnet']['prefix_length']
}
fabric_data.append(out)
return (True, fabric_data)
return (False, switches)
|
HewlettPackard/stackstorm-hpe-cfm
|
actions/get_fabric_ips.py
|
get_fabric_ips.py
|
py
| 979 |
python
|
en
|
code
| 1 |
github-code
|
6
|
35777431960
|
from pydoc import tempfilepager
from PIL import Image
import numpy
import cv2
slot_1_box = (905, 215, 930, 235)
slot_2_box = (933, 215, 958, 235)
slot_3_box = (961, 215, 986, 235)
slots_poss = (slot_1_box, slot_2_box, slot_3_box)
def get_crop(_source, _box):
return Image.open(_source).convert('RGB').crop(_box) # .save(tmp_path)
def calculate(image1, image2):
image1 = cv2.cvtColor(numpy.asarray(image1), cv2.COLOR_RGB2BGR)
image2 = cv2.cvtColor(numpy.asarray(image2), cv2.COLOR_RGB2BGR)
hist1 = cv2.calcHist([image1], [0], None, [256], [0.0, 255.0])
hist2 = cv2.calcHist([image2], [0], None, [256], [0.0, 255.0])
# 计算直方图的重合度
degree = 0
for i in range(len(hist1)):
if hist1[i] != hist2[i]:
degree = degree + \
(1 - abs(hist1[i] - hist2[i]) / max(hist1[i], hist2[i]))
else:
degree = degree + 1
degree = degree / len(hist1)
return degree
def classify_hist_with_split(image1, image2, size=(256, 256)):
# image1 = Image.open(image1)
image2 = Image.open(image2)
# 将图像resize后,分离为RGB三个通道,再计算每个通道的相似值
image1 = cv2.cvtColor(numpy.asarray(image1), cv2.COLOR_RGB2BGR)
image2 = cv2.cvtColor(numpy.asarray(image2), cv2.COLOR_RGB2BGR)
image1 = cv2.resize(image1, size)
image2 = cv2.resize(image2, size)
sub_image1 = cv2.split(image1)
sub_image2 = cv2.split(image2)
sub_data = 0
for im1, im2 in zip(sub_image1, sub_image2):
sub_data += calculate(im1, im2)
sub_data = sub_data / 3
return sub_data
class analyzer():
def __init__(self,sourcepath,slotpath):
self.sourcepath = sourcepath
self.slotpath = slotpath
pass
def analyze(self, img):
source_path = self.sourcepath + img
res = [0 for _ in range(len(slots_poss))]
for i in range(len(slots_poss)):
img1_path = get_crop(source_path, slots_poss[i])
for level in range(4):
img2_path = self.slotpath + 'slot_lv' + str(level + 1)+'.png'
result = classify_hist_with_split(img1_path, img2_path)
if result[0] > 0.8:
res[i] = (level+1)
# print(img + str(level) + "相似度为:" + "%.2f%%" % (result * 100))
# print(img, res)
# Image.open(source_path).crop((905, 215, 986, 235)
# ).save(tmppath + str(res) + "-" + img)
return res
|
BruceCheng1995/cyber_hunter
|
src/analyze_slot.py
|
analyze_slot.py
|
py
| 2,531 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36403480999
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 技术支持:dwz.cn/qkEfX1u0 项目实战讨论QQ群6089740 144081101
# CreateDate: 2019-12-29
def fib2(n):
if n < 2: # base case
return n
return fib2(n - 2) + fib2(n - 1) # recursive case
if __name__ == "__main__":
print(fib2(5))
print(fib2(10))
|
china-testing/python-testing-examples
|
interview/fib2.py
|
fib2.py
|
py
| 325 |
python
|
en
|
code
| 35 |
github-code
|
6
|
26041579406
|
from __future__ import annotations
import itertools
import re
from collections import defaultdict
from typing import Iterable, Iterator, Sequence, Tuple, TypeVar
from pkg_resources import Requirement
from typing_extensions import Protocol
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import InterpreterConstraintsField
from pants.build_graph.address import Address
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.target import Target
from pants.util.docutil import bin_name
from pants.util.frozendict import FrozenDict
from pants.util.memo import memoized
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import softwrap
# This protocol allows us to work with any arbitrary FieldSet. See
# https://mypy.readthedocs.io/en/stable/protocols.html.
class FieldSetWithInterpreterConstraints(Protocol):
@property
def address(self) -> Address:
...
@property
def interpreter_constraints(self) -> InterpreterConstraintsField:
...
_FS = TypeVar("_FS", bound=FieldSetWithInterpreterConstraints)
RawConstraints = Tuple[str, ...]
# The current maxes are 2.7.18 and 3.6.15. We go much higher, for safety.
_PATCH_VERSION_UPPER_BOUND = 30
@memoized
def interpreter_constraints_contains(
a: RawConstraints, b: RawConstraints, interpreter_universe: tuple[str, ...]
) -> bool:
"""A memoized version of `InterpreterConstraints.contains`.
This is a function in order to keep the memoization cache on the module rather than on an
instance. It can't go on `PythonSetup`, since that would cause a cycle with this module.
"""
return InterpreterConstraints(a).contains(InterpreterConstraints(b), interpreter_universe)
@memoized
def parse_constraint(constraint: str) -> Requirement:
"""Parse an interpreter constraint, e.g., CPython>=2.7,<3.
We allow shorthand such as `>=3.7`, which gets expanded to `CPython>=3.7`. See Pex's
interpreter.py's `parse_requirement()`.
"""
try:
parsed_requirement = Requirement.parse(constraint)
except ValueError:
parsed_requirement = Requirement.parse(f"CPython{constraint}")
return parsed_requirement
# Normally we would subclass `DeduplicatedCollection`, but we want a custom constructor.
class InterpreterConstraints(FrozenOrderedSet[Requirement], EngineAwareParameter):
@classmethod
def for_fixed_python_version(
cls, python_version_str: str, interpreter_type: str = "CPython"
) -> InterpreterConstraints:
return cls([f"{interpreter_type}=={python_version_str}"])
def __init__(self, constraints: Iterable[str | Requirement] = ()) -> None:
# #12578 `parse_constraint` will sort the requirement's component constraints into a stable form.
# We need to sort the component constraints for each requirement _before_ sorting the entire list
# for the ordering to be correct.
parsed_constraints = (
i if isinstance(i, Requirement) else parse_constraint(i) for i in constraints
)
super().__init__(sorted(parsed_constraints, key=lambda c: str(c)))
def __str__(self) -> str:
return " OR ".join(str(constraint) for constraint in self)
def debug_hint(self) -> str:
return str(self)
@property
def description(self) -> str:
return str(sorted(str(c) for c in self))
@classmethod
def merge(cls, ics: Iterable[InterpreterConstraints]) -> InterpreterConstraints:
return InterpreterConstraints(
cls.merge_constraint_sets(tuple(str(requirement) for requirement in ic) for ic in ics)
)
@classmethod
def merge_constraint_sets(
cls, constraint_sets: Iterable[Iterable[str]]
) -> frozenset[Requirement]:
"""Given a collection of constraints sets, merge by ORing within each individual constraint
set and ANDing across each distinct constraint set.
For example, given `[["CPython>=2.7", "CPython<=3"], ["CPython==3.6.*"]]`, return
`["CPython>=2.7,==3.6.*", "CPython<=3,==3.6.*"]`.
"""
# A sentinel to indicate a requirement that is impossible to satisfy (i.e., one that
# requires two different interpreter types).
impossible = parse_constraint("IMPOSSIBLE")
# Each element (a Set[ParsedConstraint]) will get ANDed. We use sets to deduplicate
# identical top-level parsed constraint sets.
# First filter out any empty constraint_sets, as those represent "no constraints", i.e.,
# any interpreters are allowed, so omitting them has the logical effect of ANDing them with
# the others, without having to deal with the vacuous case below.
constraint_sets = [cs for cs in constraint_sets if cs]
if not constraint_sets:
return frozenset()
parsed_constraint_sets: set[frozenset[Requirement]] = set()
for constraint_set in constraint_sets:
# Each element (a ParsedConstraint) will get ORed.
parsed_constraint_set = frozenset(
parse_constraint(constraint) for constraint in constraint_set
)
parsed_constraint_sets.add(parsed_constraint_set)
if len(parsed_constraint_sets) == 1:
return next(iter(parsed_constraint_sets))
def and_constraints(parsed_constraints: Sequence[Requirement]) -> Requirement:
merged_specs: set[tuple[str, str]] = set()
expected_interpreter = parsed_constraints[0].project_name
for parsed_constraint in parsed_constraints:
if parsed_constraint.project_name != expected_interpreter:
return impossible
merged_specs.update(parsed_constraint.specs)
formatted_specs = ",".join(f"{op}{version}" for op, version in merged_specs)
return parse_constraint(f"{expected_interpreter}{formatted_specs}")
ored_constraints = (
and_constraints(constraints_product)
for constraints_product in itertools.product(*parsed_constraint_sets)
)
ret = frozenset(cs for cs in ored_constraints if cs != impossible)
if not ret:
# There are no possible combinations.
attempted_str = " AND ".join(f"({' OR '.join(cs)})" for cs in constraint_sets)
raise ValueError(
softwrap(
f"""
These interpreter constraints cannot be merged, as they require
conflicting interpreter types: {attempted_str}
"""
)
)
return ret
@classmethod
def create_from_targets(
cls, targets: Iterable[Target], python_setup: PythonSetup
) -> InterpreterConstraints | None:
"""Returns merged InterpreterConstraints for the given Targets.
If none of the given Targets have InterpreterConstraintsField, returns None.
NB: Because Python targets validate that they have ICs which are a subset of their
dependencies, merging constraints like this is only necessary when you are _mixing_ code
which might not have any inter-dependencies, such as when you're merging un-related roots.
"""
fields = [
tgt[InterpreterConstraintsField]
for tgt in targets
if tgt.has_field(InterpreterConstraintsField)
]
if not fields:
return None
return cls.create_from_compatibility_fields(fields, python_setup)
@classmethod
def create_from_compatibility_fields(
cls, fields: Iterable[InterpreterConstraintsField], python_setup: PythonSetup
) -> InterpreterConstraints:
"""Returns merged InterpreterConstraints for the given `InterpreterConstraintsField`s.
NB: Because Python targets validate that they have ICs which are a subset of their
dependencies, merging constraints like this is only necessary when you are _mixing_ code
which might not have any inter-dependencies, such as when you're merging un-related roots.
"""
constraint_sets = {field.value_or_global_default(python_setup) for field in fields}
# This will OR within each field and AND across fields.
merged_constraints = cls.merge_constraint_sets(constraint_sets)
return InterpreterConstraints(merged_constraints)
@classmethod
def group_field_sets_by_constraints(
cls, field_sets: Iterable[_FS], python_setup: PythonSetup
) -> FrozenDict[InterpreterConstraints, tuple[_FS, ...]]:
results = defaultdict(set)
for fs in field_sets:
constraints = cls.create_from_compatibility_fields(
[fs.interpreter_constraints], python_setup
)
results[constraints].add(fs)
return FrozenDict(
{
constraints: tuple(sorted(field_sets, key=lambda fs: fs.address))
for constraints, field_sets in sorted(results.items())
}
)
def generate_pex_arg_list(self) -> list[str]:
args = []
for constraint in self:
args.extend(["--interpreter-constraint", str(constraint)])
return args
def _valid_patch_versions(self, major: int, minor: int) -> Iterator[int]:
for p in range(0, _PATCH_VERSION_UPPER_BOUND + 1):
for req in self:
if req.specifier.contains(f"{major}.{minor}.{p}"): # type: ignore[attr-defined]
yield p
def _includes_version(self, major: int, minor: int) -> bool:
return any(True for _ in self._valid_patch_versions(major, minor))
def includes_python2(self) -> bool:
"""Checks if any of the constraints include Python 2.
This will return True even if the code works with Python 3 too, so long as at least one of
the constraints works with Python 2.
"""
return self._includes_version(2, 7)
def minimum_python_version(self, interpreter_universe: Iterable[str]) -> str | None:
"""Find the lowest major.minor Python version that will work with these constraints.
The constraints may also be compatible with later versions; this is the lowest version that
still works.
"""
for major, minor in sorted(_major_minor_to_int(s) for s in interpreter_universe):
if self._includes_version(major, minor):
return f"{major}.{minor}"
return None
def snap_to_minimum(self, interpreter_universe: Iterable[str]) -> InterpreterConstraints | None:
"""Snap to the lowest Python major.minor version that works with these constraints.
Will exclude patch versions that are expressly incompatible.
"""
for major, minor in sorted(_major_minor_to_int(s) for s in interpreter_universe):
for p in range(0, _PATCH_VERSION_UPPER_BOUND + 1):
for req in self:
if req.specifier.contains(f"{major}.{minor}.{p}"): # type: ignore[attr-defined]
# We've found the minimum major.minor that is compatible.
req_strs = [f"{req.project_name}=={major}.{minor}.*"]
# Now find any patches within that major.minor that we must exclude.
invalid_patches = sorted(
set(range(0, _PATCH_VERSION_UPPER_BOUND + 1))
- set(self._valid_patch_versions(major, minor))
)
req_strs.extend(f"!={major}.{minor}.{p}" for p in invalid_patches)
req_str = ",".join(req_strs)
snapped = parse_constraint(req_str)
return InterpreterConstraints([snapped])
return None
def _requires_python3_version_or_newer(
self, *, allowed_versions: Iterable[str], prior_version: str
) -> bool:
if not self:
return False
patch_versions = list(reversed(range(0, _PATCH_VERSION_UPPER_BOUND)))
# We only look at the prior Python release. For example, consider Python 3.8+
# looking at 3.7. If using something like `>=3.5`, Py37 will be included.
# `==3.6.*,!=3.7.*,==3.8.*` is unlikely, and even that will work correctly as
# it's an invalid constraint so setuptools returns False always. `['==2.7.*', '==3.8.*']`
# will fail because not every single constraint is exclusively 3.8.
prior_versions = [f"{prior_version}.{p}" for p in patch_versions]
allowed_versions = [
f"{major_minor}.{p}" for major_minor in allowed_versions for p in patch_versions
]
def valid_constraint(constraint: Requirement) -> bool:
if any(
constraint.specifier.contains(prior) for prior in prior_versions # type: ignore[attr-defined]
):
return False
if not any(
constraint.specifier.contains(allowed) for allowed in allowed_versions # type: ignore[attr-defined]
):
return False
return True
return all(valid_constraint(c) for c in self)
def requires_python38_or_newer(self, interpreter_universe: Iterable[str]) -> bool:
"""Checks if the constraints are all for Python 3.8+.
This will return False if Python 3.8 is allowed, but prior versions like 3.7 are also
allowed.
"""
py38_and_later = [
interp for interp in interpreter_universe if _major_minor_to_int(interp) >= (3, 8)
]
return self._requires_python3_version_or_newer(
allowed_versions=py38_and_later, prior_version="3.7"
)
def to_poetry_constraint(self) -> str:
specifiers = []
wildcard_encountered = False
for constraint in self:
specifier = str(constraint.specifier) # type: ignore[attr-defined]
if specifier:
specifiers.append(specifier)
else:
wildcard_encountered = True
if not specifiers or wildcard_encountered:
return "*"
return " || ".join(specifiers)
def enumerate_python_versions(
self, interpreter_universe: Iterable[str]
) -> FrozenOrderedSet[tuple[int, int, int]]:
"""Return a set of all plausible (major, minor, patch) tuples for all Python 2.7/3.x in the
specified interpreter universe that matches this set of interpreter constraints.
This also validates our assumptions around the `interpreter_universe`:
- Python 2.7 is the only Python 2 version in the universe, if at all.
- Python 3 is the last major release of Python, which the core devs have committed to in
public several times.
"""
if not self:
return FrozenOrderedSet()
minors = []
for major_minor in interpreter_universe:
major, minor = _major_minor_to_int(major_minor)
if major == 2:
if minor != 7:
raise AssertionError(
softwrap(
f"""
Unexpected value in `[python].interpreter_versions_universe`:
{major_minor}. Expected the only Python 2 value to be '2.7', given that
all other versions are unmaintained or do not exist.
"""
)
)
minors.append((2, minor))
elif major == 3:
minors.append((3, minor))
else:
raise AssertionError(
softwrap(
f"""
Unexpected value in `[python].interpreter_versions_universe`:
{major_minor}. Expected to only include '2.7' and/or Python 3 versions,
given that Python 3 will be the last major Python version. Please open an
issue at https://github.com/pantsbuild/pants/issues/new if this is no longer
true.
"""
)
)
valid_patches = FrozenOrderedSet(
(major, minor, patch)
for (major, minor) in sorted(minors)
for patch in self._valid_patch_versions(major, minor)
)
if not valid_patches:
raise ValueError(
softwrap(
f"""
The interpreter constraints `{self}` are not compatible with any of the
interpreter versions from `[python].interpreter_versions_universe`.
Please either change these interpreter constraints or update the
`interpreter_versions_universe` to include the interpreters set in these
constraints. Run `{bin_name()} help-advanced python` for more information on the
`interpreter_versions_universe` option.
"""
)
)
return valid_patches
def contains(self, other: InterpreterConstraints, interpreter_universe: Iterable[str]) -> bool:
"""Returns True if the `InterpreterConstraints` specified in `other` is a subset of these
`InterpreterConstraints`.
This is restricted to the set of minor Python versions specified in `universe`.
"""
if self == other:
return True
this = self.enumerate_python_versions(interpreter_universe)
that = other.enumerate_python_versions(interpreter_universe)
return this.issuperset(that)
def partition_into_major_minor_versions(
self, interpreter_universe: Iterable[str]
) -> tuple[str, ...]:
"""Return all the valid major.minor versions, e.g. `('2.7', '3.6')`."""
result: OrderedSet[str] = OrderedSet()
for major, minor, _ in self.enumerate_python_versions(interpreter_universe):
result.add(f"{major}.{minor}")
return tuple(result)
def major_minor_version_when_single_and_entire(self) -> None | tuple[int, int]:
"""Returns the (major, minor) version that these constraints cover, if they cover all of
exactly one major minor version, without rules about patch versions.
This is a best effort function, e.g. for using during inference that can be overridden.
Examples:
All of these return (3, 9): `==3.9.*`, `CPython==3.9.*`, `>=3.9,<3.10`, `<3.10,>=3.9`
All of these return None:
- `==3.9.10`: restricted to a single patch version
- `==3.9`: restricted to a single patch version (0, implicitly)
- `==3.9.*,!=3.9.2`: excludes a patch
- `>=3.9,<3.11`: more than one major version
- `>=3.9,<3.11,!=3.10`: too complicated to understand it only includes 3.9
- more than one requirement in the list: too complicated
"""
try:
return _major_minor_version_when_single_and_entire(self)
except _NonSimpleMajorMinor:
return None
def _major_minor_to_int(major_minor: str) -> tuple[int, int]:
return tuple(int(x) for x in major_minor.split(".", maxsplit=1)) # type: ignore[return-value]
class _NonSimpleMajorMinor(Exception):
pass
_ANY_PATCH_VERSION = re.compile(r"^(?P<major>\d+)\.(?P<minor>\d+)(?P<any_patch>\.\*)?$")
def _parse_simple_version(version: str, require_any_patch: bool) -> tuple[int, int]:
match = _ANY_PATCH_VERSION.fullmatch(version)
if match is None or (require_any_patch and match.group("any_patch") is None):
raise _NonSimpleMajorMinor()
return int(match.group("major")), int(match.group("minor"))
def _major_minor_version_when_single_and_entire(ics: InterpreterConstraints) -> tuple[int, int]:
if len(ics) != 1:
raise _NonSimpleMajorMinor()
req = next(iter(ics))
just_cpython = req.project_name == "CPython" and not req.extras and not req.marker
if not just_cpython:
raise _NonSimpleMajorMinor()
# ==major.minor or ==major.minor.*
if len(req.specs) == 1:
operator, version = next(iter(req.specs))
if operator != "==":
raise _NonSimpleMajorMinor()
return _parse_simple_version(version, require_any_patch=True)
# >=major.minor,<major.(minor+1)
if len(req.specs) == 2:
(operator_lo, version_lo), (operator_hi, version_hi) = iter(req.specs)
if operator_lo != ">=":
# if the lo operator isn't >=, they might be in the wrong order (or, if not, the check
# below will catch them)
operator_lo, operator_hi = operator_hi, operator_lo
version_lo, version_hi = version_hi, version_lo
if operator_lo != ">=" and operator_hi != "<":
raise _NonSimpleMajorMinor()
major_lo, minor_lo = _parse_simple_version(version_lo, require_any_patch=False)
major_hi, minor_hi = _parse_simple_version(version_hi, require_any_patch=False)
if major_lo == major_hi and minor_lo + 1 == minor_hi:
return major_lo, minor_lo
raise _NonSimpleMajorMinor()
# anything else we don't understand
raise _NonSimpleMajorMinor()
|
pantsbuild/pants
|
src/python/pants/backend/python/util_rules/interpreter_constraints.py
|
interpreter_constraints.py
|
py
| 21,381 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
16916661051
|
import subprocess
import sys
import json
import platform
import os
from crmetrics import CRBase
class CRLogs(CRBase):
def _get_container_logs(self, pod, namespace, containers, kubeconfig):
for c in containers:
container = c['name']
cmd = 'kubectl logs ' + pod + ' -n ' + namespace + ' -c ' + container + ' ' + kubeconfig
#print(cmd)
print("======== Pod::" + pod + "/container::" + container + " ===========")
try:
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).communicate()[0]
if out:
print(out)
except Exception as e:
print(e)
def get_logs(self, pod, namespace, kubeconfig):
cmd = 'kubectl get pods ' + pod + ' -n ' + namespace + ' -o json ' + kubeconfig
#print(cmd)
try:
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).communicate()[0]
if out:
json_output = json.loads(out)
containers = json_output['spec']['containers']
self._get_container_logs(pod, namespace, containers, kubeconfig)
if 'initContainers' in json_output['spec']:
init_containers = json_output['spec']['initContainers']
self._get_container_logs(pod, namespace, init_containers, kubeconfig)
except Exception as e:
print(e)
def get_resources_composition(self, kind, instance, namespace, kubeconfig):
platf = platform.system()
kubeplus_home = os.getenv('KUBEPLUS_HOME')
cmd = ''
json_output = {}
if platf == "Darwin":
cmd = kubeplus_home + '/plugins/kubediscovery-macos composition '
elif platf == "Linux":
cmd = kubeplus_home + '/plugins/kubediscovery-linux composition '
else:
print("OS not supported:" + platf)
return json_output
cmd = cmd + kind + ' ' + instance + ' ' + namespace + ' ' + kubeconfig
#print(cmd)
out = ''
try:
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).communicate()[0]
out = out.decode('utf-8')
except Exception as e:
print(e)
if out:
print(out)
try:
json_output = json.loads(out)
except Exception as e:
print(e)
return json_output
def get_pods1(self, resources):
pod_list = []
for resource in resources:
#print(resource)
if resource['Kind'] == 'Pod':
present = False
for p in pod_list:
if p['Name'] == resource['Name']:
present = True
break
if not present:
pod_list.append(resource)
#print(pod_list)
return pod_list
if __name__ == '__main__':
crLogs = CRLogs()
#crLogs.get_logs(sys.argv[1], sys.argv[2])
#resources = sys.argv[1]
relation = sys.argv[1]
kind = sys.argv[2]
instance = sys.argv[3]
namespace = sys.argv[4]
kubeconfig = sys.argv[5]
#print(kind + " " + instance + " " + namespace + " " + kubeconfig)
resources = {}
#if relation == 'connections':
# resources = crLogs.get_resources_connections(kind, instance, namespace, kubeconfig)
# #print(resources)
#if relation == 'composition':
# resources = crLogs.get_resources_composition(kind, instance, namespace, kubeconfig)
# #print(resources)
#resource_json = json.loads(resources)
pods = crLogs.get_pods(kind, instance, kubeconfig)
for pod in pods:
pod_name = pod['Name']
pod_namespace = pod['Namespace']
#print(pod_name)
crLogs.get_logs(pod_name, pod_namespace, kubeconfig)
print("---------------------------------------")
|
cloud-ark/kubeplus
|
plugins/crlogs.py
|
crlogs.py
|
py
| 3,366 |
python
|
en
|
code
| 555 |
github-code
|
6
|
31089813709
|
#!/bin/python3
# Prune only reasonable lexical mappings using both lex.e2f & lex.f2e
import pickle
def is_char_in_lang_range(c):
'''hindi unicode range is 0900 - 097F
or 2304 - 2431 in integers'''
lb = 2304
ub = 2431
ic = ord(c)
return ic >= lb and ic <= ub
def is_lang(word):
'''Checks if a word is in Devanagari alphabets.
That means avoid numbers, NULL etc.'''
return all([is_char_in_lang_range(c) for c in word])
lex_dict = {}
prev_token = ''
count = 0
mapped_tokens = ''
max_prob = 0
outfile = open('lex_mappings.txt', 'w', encoding='utf-8')
with open('lex.e2f') as e2f, open('lex.f2e') as f2e:
for line1, line2 in zip(e2f, f2e):
line1 = line1.rstrip().lstrip()
line2 = line2.rstrip().lstrip()
src1, tgt1, prob1 = line1.split(' ')
tgt2, src2, prob2 = line2.split(' ')
try:
assert src1==src2 and tgt1==tgt2, line1
except Exception:
continue
# Ignore numbers, comma, english mixed words etc.
if not is_lang(src1):
print('skipped: ' + src1)
continue
count += 1
if src1 != prev_token:
# Time to update the lex_dict
if len(mapped_tokens) > 0: # and count < 10:
lex_dict[prev_token] = mapped_tokens
tran_line = prev_token + ' ' + mapped_tokens # ' '.join(list_en)
# print(tran_line)
outfile.write(tran_line + '\n')
mapped_tokens = ''
count = 0
max_prob = 0
prev_token = src1
total_prob = float(prob1) * float(prob2)
if total_prob >= 0.01 and total_prob > max_prob:
mapped_tokens = tgt1
max_prob = total_prob
if len(mapped_tokens) > 0:
lex_dict[prev_token] = mapped_tokens
tran_line = prev_token + ' ' + mapped_tokens # ' '.join(list_en)
print(tran_line)
outfile.write(tran_line + '\n')
outfile.close()
with open('lex_dict.pickle', 'wb') as handle:
pickle.dump(lex_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
print(len(lex_dict.keys()))
|
bnjasim/phraseOut
|
get_lex_dict.py
|
get_lex_dict.py
|
py
| 2,175 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3919536002
|
# standard python libs
import os
import re
import html
import json
import random
import hashlib
import lxml.html
import lxml.etree
import unicodedata
import urllib.request
from datetime import datetime
from urllib.parse import urlparse
from urllib.parse import urlsplit
# non-standard libs which must be installed
from textstat.textstat import textstat
import lxml.html
# custom webxray classes
from webxray.ParseURL import ParseURL
from webxray.Utilities import Utilities
class OutputStore:
"""
This class receives data from the browser, processes it, and stores it in the db
"""
def __init__(self, db_name, db_engine):
self.db_name = db_name
self.utilities = Utilities()
self.url_parser = ParseURL()
self.debug = False
if db_engine == 'sqlite':
from webxray.SQLiteDriver import SQLiteDriver
self.sql_driver = SQLiteDriver(self.db_name)
elif db_engine == 'postgres':
from webxray.PostgreSQLDriver import PostgreSQLDriver
self.sql_driver = PostgreSQLDriver(self.db_name)
else:
print('INVALID DB ENGINE FOR %s, QUITTING!' % db_engine)
quit()
self.config = self.sql_driver.get_config()
# __init__
def close(self):
"""
Just to make sure we close the db connection.
"""
self.sql_driver.close()
# close
def store_scan(self, params):
"""
This function pre-processes data from the browser, inserts it into
database, and handles linking various entries across tables.
"""
# unpack params
browser_output = params['browser_output']
client_id = params['client_id']
crawl_id = params['crawl_id']
crawl_timestamp = params['crawl_timestamp']
crawl_sequence = params['crawl_sequence']
# client_ip is optional
if 'client_ip' in params:
client_ip = params['client_ip']
else:
client_ip = None
if self.debug: print('going to store scan %s' % browser_output['start_url'])
# keep track of domains
page_3p_cookie_domains = set()
page_3p_dom_storage_domains = set()
page_3p_request_domains = set()
page_3p_response_domains = set()
page_3p_websocket_domains = set()
# convert from timestamp to datetime object that will go to the db
accessed = datetime.fromtimestamp(browser_output['accessed'])
# first make sure we don't have it already
if self.sql_driver.page_exists(browser_output['start_url'],accessed):
return {'success': False, 'result': 'exists in db already'}
# if we have no responses the page didn't load at all and we skip
# unless we are using basic driver and then it's ok
if len(browser_output['responses']) == 0 and browser_output['browser_type'] != 'basic':
return {'success': False, 'result': 'no responses received'}
# ignore any malformed unicode characters
page_source = browser_output['page_source'].encode('utf-8', 'ignore').decode()
# store source
if self.config['store_source']:
if self.debug: print('going to store source %s' % browser_output['start_url'])
page_source_md5 = self.store_file(page_source, False, 'page_source')
else:
page_source_md5 = None
# store readability_html
if self.config['store_page_text'] and browser_output['page_text']:
if self.debug: print('going to store readability_html')
# ignore any malformed unicode characters
readability_html = browser_output['readability_html'].encode('utf-8', 'ignore').decode().strip()
readability_source_md5 = self.store_file(readability_html, False, 'readability_html')
# store_page_text handles some addition operations
if self.debug: print('going to store page_text')
page_text_id = self.store_page_text(readability_html,readability_source_md5)
else:
page_text_id = None
# process info on the start_url domain
if self.debug: print('going to parse start/final_url %s' % browser_output['start_url'])
start_url = browser_output['start_url']
start_url_domain_info = self.url_parser.get_parsed_domain_info(start_url)
if start_url_domain_info['success'] == False:
err_msg = 'unable to parse start_url_domain_info info for %s with error %s' % (browser_output['start_url'], start_url_domain_info['result'])
if self.debug: print(err_msg)
self.sql_driver.log_error({
'client_id' : client_id,
'target' : start_url,
'task' : 'output_store',
'msg' : err_msg
})
return {'success': False, 'result': 'could not parse start_url'}
else:
# needed for comparisons later on
start_url_domain = start_url_domain_info['result']['domain']
# add start_url domain and get id
start_url_domain_id = self.sql_driver.add_domain(start_url_domain_info['result'])
# process info on the final_url domain
# note: we use the final_url domain as the benchmark for determine 1p/3p
final_url = browser_output['final_url']
final_url_domain_info = self.url_parser.get_parsed_domain_info(final_url)
if final_url_domain_info['success'] == False:
err_msg = 'unable to parse final_url_domain_info info for %s with error %s' % (browser_output['final_url'], final_url_domain_info['result'])
if self.debug: print(err_msg)
self.sql_driver.log_error({
'client_id' : client_id,
'target' : start_url,
'task' : 'output_store',
'msg' : err_msg
})
return {'success': False, 'result': 'could not parse final_url'}
else:
final_url_domain = final_url_domain_info['result']['domain']
# self.sql_driver.add_domain both stores the new domain and returns its db row id
# if it is already in db just return the existing id
final_url_domain_id = self.sql_driver.add_domain(final_url_domain_info['result'])
# check if the page has redirected to a new domain
if start_url_domain != final_url_domain:
page_domain_redirect = True
else:
page_domain_redirect = False
# this is semi-redundant but ensures that any config changes made while
# a result is queued are followed
if self.config['client_reject_redirects'] and page_domain_redirect:
return {'success': False, 'result': 'rejecting redirect'}
# if the final page is https (often after a redirect), mark it appropriately
if browser_output['final_url'][:5] == 'https':
page_is_ssl = True
else:
page_is_ssl = False
# (optionally) process and store links, this allows us to go back later and do deeper scans
# as well as do more with policies
# links starts as empty list
links = []
# keep track of link counts as helpful for filtering pages
link_count_internal = 0
link_count_external = 0
if self.config['store_links']:
if self.debug: print('going to process links %s' % browser_output['start_url'])
# we use the list of policy_link_terms to flag that a link *might*
# be for a policy, we check if it actually is policy in PolicyCollector.py
policy_link_terms = self.utilities.get_policy_link_terms()
# process links, duplicates get ignored by db
for link in browser_output['all_links']:
# skip if href not valid
if not self.utilities.is_url_valid(link['href']): continue
# unpack values and catch any unicode errors
link_text = link['text'].encode('utf-8', 'ignore').decode()
link_url = link['href'].encode('utf-8', 'ignore').decode()
# get rid of trailing # and /
if link_url.strip()[-1:] == '#': link_url = link_url.strip()[:-1]
if link_url.strip()[-1:] == '/': link_url = link_url.strip()[:-1]
# sometimes the text will be a dict (very rarely)
# so we convert to string
link_text = str(link_text).strip()
# clean up white space and remove line breaks
link_text = re.sub('\n|\r|\t|\s+',' ',link_text.strip())
link_url = re.sub('\n|\r|\t|\s+',' ',link_url.strip())
# catch nulls
link_text = link_text.replace('\x00','NULL_REPLACED_FOR_PSQL')
link_url = link_url.replace('\x00','NULL_REPLACED_FOR_PSQL')
# update counts
if link['internal']:
link_count_internal += 1
else:
link_count_external += 1
# flag links that could be policies, default False
link_is_policy = False
# determine if a policy term appears in the link
for policy_term in policy_link_terms:
if policy_term in link_text.lower():
link_is_policy = True
break
link_domain_info = self.url_parser.get_parsed_domain_info(link_url)
if link_domain_info['success'] == False:
# don't bother with storing errors
link_domain_id = None
else:
# self.sql_driver.add_domain both stores the new domain and returns its db row id
# if it is already in db just return the existing id
link_domain_id = self.sql_driver.add_domain(link_domain_info['result'])
links.append({
'url' : link_url,
'text' : link_text,
'is_internal' : link['internal'],
'is_policy' : link_is_policy,
'domain_id' : link_domain_id
})
# if we got the screen shot we get the hash and store it to the file table
screen_shot_md5 = None
if browser_output['screen_shot'] and self.config['store_screen_shot']:
if self.debug: print('going to store screen shot %s' % browser_output['start_url'])
# store file to get md5
screen_shot_md5 = self.store_file(browser_output['screen_shot'],True,'screen_shot')
# if we have timestamp it is also an 'accessed' field from
# a page load so we convert that as well
if crawl_timestamp:
crawl_timestamp = datetime.fromtimestamp(crawl_timestamp)
# ignore any malformed unicode characters
if browser_output['title']:
browser_output['title'] = browser_output['title'].encode('utf-8', 'ignore').decode()
if browser_output['meta_desc']:
browser_output['meta_desc'] = browser_output['meta_desc'].encode('utf-8', 'ignore').decode()
if browser_output['lang']:
browser_output['lang'] = browser_output['lang'].encode('utf-8', 'ignore').decode()
# now we know link counts we can store the page
if self.debug: print('going to store page %s' % browser_output['start_url'])
page_id = self.sql_driver.add_page({
'accessed' : accessed,
'browser_type' : browser_output['browser_type'],
'browser_version' : browser_output['browser_version'],
'browser_prewait' : browser_output['prewait'],
'browser_no_event_wait' : browser_output['no_event_wait'],
'browser_max_wait' : browser_output['max_wait'],
'page_load_strategy' : browser_output['page_load_strategy'],
'title' : browser_output['title'],
'meta_desc' : browser_output['meta_desc'],
'lang' : browser_output['lang'],
'start_url' : browser_output['start_url'],
'final_url' : browser_output['final_url'],
'is_ssl' : page_is_ssl,
'page_domain_redirect' : page_domain_redirect,
'link_count_internal' : link_count_internal,
'link_count_external' : link_count_external,
'load_time' : browser_output['load_time'],
'start_url_domain_id' : start_url_domain_id,
'final_url_domain_id' : final_url_domain_id,
'client_id' : client_id,
'client_timezone' : browser_output['client_timezone'],
'client_ip' : client_ip,
'page_text_id' : page_text_id,
'screen_shot_md5' : screen_shot_md5,
'page_source_md5' : page_source_md5,
'crawl_id' : crawl_id,
'crawl_timestamp' : crawl_timestamp,
'crawl_sequence' : crawl_sequence
})
# STORE LINKS
if self.config['store_links']:
if self.debug: print('going to store links %s' % browser_output['start_url'])
for link in links:
link_id = self.sql_driver.add_link(link)
if link_id: self.sql_driver.join_link_to_page(page_id,link_id)
# PROCESS DOM_STORAGE
if self.config['store_dom_storage']:
if self.debug: print('going to process dom storage %s' % browser_output['start_url'])
for dom_storage in browser_output['dom_storage']:
# parse domain from the security_origin, which is equivalent to a url
domain_info = self.url_parser.get_parsed_domain_info(dom_storage['security_origin'])
if domain_info['success'] == False:
err_msg = 'unable to parse domain info for %s with error %s' % (dom_storage['security_origin'], domain_info['result'])
if self.debug: print(err_msg)
self.sql_driver.log_error({
'client_id' : client_id,
'target' : start_url,
'task' : 'output_store',
'msg' : err_msg
})
continue
else:
# self.sql_driver.add_domain both stores the new domain and returns its db row id
# if it is already in db just return the existing id
dom_storage['domain_id'] = self.sql_driver.add_domain(domain_info['result'])
# mark if third-party storage
if final_url_domain != domain_info['result']['domain']:
dom_storage['is_3p'] = True
else:
dom_storage['is_3p'] = False
# key to page
dom_storage['page_id'] = page_id
# replace null b/c postgres will die otherwise
dom_storage['key'] = dom_storage['key'].replace('\x00','NULL_REPLACED_FOR_PSQL')
dom_storage['value'] = dom_storage['value'].replace('\x00','NULL_REPLACED_FOR_PSQL')
# there types of illegal utf-8 characters that psql doesn't like, eg trying to store
# '\uded5' gives this error when storing in psql:
# 'UnicodeEncodeError: 'utf-8' codec can't encode character '\uded5' in position 0: surrogates not allowed'
#
# to overcome the above, we use python's backslashreplace to keep the original data in
# a way that won't cause our queries to die
# see https://docs.python.org/3/library/codecs.html#error-handlers
dom_storage['key'] = dom_storage['key'].encode('utf-8','backslashreplace')
dom_storage['value'] = dom_storage['value'].encode('utf-8','backslashreplace')
# now that we've encoded with backslashes we decode to get the semi-original data
dom_storage['key'] = dom_storage['key'].decode('utf-8')
dom_storage['value'] = dom_storage['value'].decode('utf-8')
# all done with this item
self.sql_driver.add_dom_storage(dom_storage)
# update domains
if dom_storage['is_3p']:
page_3p_dom_storage_domains.add((domain_info['result']['domain'],domain_info['result']['domain_owner_id']))
# PROCESS LOAD FINISH
if self.debug: print('going to process load finish data %s' % browser_output['start_url'])
load_finish_data = {}
for load_finish_event in browser_output['load_finish_events']:
load_finish_data[load_finish_event['request_id']] = load_finish_event['encoded_data_length']
# RESPONSE EXTRA HEADERS
if self.debug: print('going to process response extra header data %s' % browser_output['start_url'])
http_cookies = []
internal_id_to_resp_ex_headers = {}
for response_extra_header in browser_output['response_extra_headers']:
response_extra_header['page_id'] = page_id
response_extra_header['cookies_set'] = None
# to check for domain leakage in headers we make a big string keyed to the internal id
if response_extra_header['request_id'] not in internal_id_to_resp_ex_headers:
internal_id_to_resp_ex_headers[response_extra_header['request_id']] = str(response_extra_header['headers'])
else:
internal_id_to_resp_ex_headers[response_extra_header['request_id']] += str(response_extra_header['headers'])
for item in response_extra_header['headers']:
if item.lower() == 'set-cookie':
response_extra_header['cookies_set'] = response_extra_header['headers'][item]
# when we add cookies later on we mark those that came from response headers,
# note we try/pass on this in case we can't parse
for cookie in response_extra_header['cookies_set'].split('\n'):
if 'domain' in cookie.lower():
try:
name = re.match('^(.+?)=',cookie)[0][:-1]
domain = re.match('^.+domain=(.+?)(;|$)',cookie.lower())[1]
if domain[0] == '.': domain = domain[1:]
http_cookies.append((domain,name))
except:
pass
if self.config['store_response_xtra_headers']:
self.sql_driver.add_response_extra_header(response_extra_header)
# PROCESS RESPONSES
response_received_req_ids = []
if self.debug: print('going to process response data %s' % browser_output['start_url'])
for response in browser_output['responses']:
# defaut values that may get over-written
response['file_md5'] = None
response['is_data'] = False
response['is_3p'] = None
response['is_ssl'] = None
response['page_domain_in_headers'] = False
# first handle non-http urls and optionally store content
if re.match('^(data|about|chrome|blob|javascript).+', response['url']):
if 'base64' in response['url'].lower() or 'image' in response['type'].lower():
is_base64 = True
else:
is_base64 = False
# store_file follows the config as far as actually storing the file goes
# and will either return the md5 or None
# make sure we're following our configuration
if self.config['store_files'] and (self.config['store_base64'] or is_base64 == False):
response['file_md5'] = self.store_file(response['url'],is_base64,response['type'])
else:
response['file_md5'] = None
response['url'] = None
response['is_data'] = True
response['domain_id'] = None
else:
# parse, store, and get id of domain; if fails skip
domain_info = self.url_parser.get_parsed_domain_info(response['url'])
if domain_info['success'] == False:
err_msg = 'unable to parse domain info for %s with error %s' % (response['url'], domain_info['result'])
if self.debug: print(err_msg)
self.sql_driver.log_error({
'client_id' : client_id,
'target' : start_url,
'task' : 'output_store',
'msg' : err_msg
})
continue
else:
response_domain = domain_info['result']['domain']
response['domain_id'] = self.sql_driver.add_domain(domain_info['result'])
# now add ip
if response['remote_ip_address']:
self.sql_driver.add_domain_ip_addr(response['domain_id'],response['remote_ip_address'])
# mark third-party responses based on final_url domain
if response_domain != final_url_domain:
response['is_3p'] = True
else:
response['is_3p'] = False
# determine if encrypted
if response['url'][:5] == 'https' or response['url'][:3] == 'wss':
response['is_ssl'] = True
else:
response['is_ssl'] = False
# keep track of the request ids of each reponse to mark as received
response_received_req_ids.append(response['request_id'])
# we do no more processing at this point
if not self.config['store_responses']:
continue
# lower case the type, simplifies db queries
response['type'] = response['type'].lower()
# store the security details if they exist
if response['security_details'] and self.config['store_security_details']:
response['security_details_id'] = self.sql_driver.add_security_details(response['security_details'])
else:
response['security_details_id'] = None
# store the size of the request
if response['request_id'] in load_finish_data:
response['final_data_length'] = load_finish_data[response['request_id']]
else:
response['final_data_length'] = None
# parse off args/etc
# consider anything before the "?" to be the element_url
try:
response['base_url'] = re.search('^(.+?)\?.+$', response['url']).group(1)
except:
response['base_url'] = response['url']
# attempt to parse off the extension
try:
response['extension'] = re.search('\.([0-9A-Za-z]+)$', response['base_url']).group(1).lower()
except:
response['extension'] = None
# First see if this request_id is present in response_bodies, and if
# the entry is not None, then we store it to the db if config says to.
if response['request_id'] in browser_output['response_bodies']:
if browser_output['response_bodies'][response['request_id']]:
# make sure we're following our configuration
is_base64 = browser_output['response_bodies'][response['request_id']]['is_base64']
if self.config['store_files'] and (self.config['store_base64'] or is_base64 == False):
response['file_md5'] = self.store_file(
browser_output['response_bodies'][response['request_id']]['body'],
is_base64,
response['type']
)
else:
response['file_md5'] = None
# link to page
response['page_id'] = page_id
# parse data headers, accounts for upper/lower case variations (eg 'set-cookie', 'Set-Cookie')
response['content_type'] = None
response['cookies_set'] = None
for item in response['response_headers']:
if item.lower() == 'content-type':
response['content_type'] = response['response_headers'][item]
if item.lower() == 'set-cookie':
response['cookies_set'] = response['response_headers'][item]
# if we have request_headers look for cookies sent
response['cookies_sent'] = None
if response['request_headers']:
for item in response['request_headers']:
if item.lower() == 'cookie':
response['cookies_sent'] = response['request_headers'][item]
# parse referer header
response['referer'] = None
for item in response['response_headers']:
if item.lower() == 'referer':
response['referer'] = response['response_headers'][item]
# check if domain leaked in referer
if response['request_id'] in internal_id_to_resp_ex_headers:
if final_url_domain in internal_id_to_resp_ex_headers[response['request_id']]:
response['page_domain_in_headers'] = True
# convert from timestamp to datetime object that will go to the db
response['timestamp'] = datetime.fromtimestamp(response['timestamp'])
# store
self.sql_driver.add_response(response)
# update domains
if response['is_3p']:
page_3p_response_domains.add((domain_info['result']['domain'],domain_info['result']['domain_owner_id']))
# REQUEST EXTRA HEADERS
if self.debug: print('going to process request extra headers data %s' % browser_output['start_url'])
internal_id_to_req_ex_headers = {}
for request_extra_header in browser_output['request_extra_headers']:
request_extra_header['page_id'] = page_id
request_extra_header['cookies_sent'] = None
# to check for domain leakage in headers we make a big string keyed to the internal id
if request_extra_header['request_id'] not in internal_id_to_req_ex_headers:
internal_id_to_req_ex_headers[request_extra_header['request_id']] = str(request_extra_header['headers'])
else:
internal_id_to_req_ex_headers[request_extra_header['request_id']] += str(request_extra_header['headers'])
for item in request_extra_header['headers']:
if item.lower() == 'cookie':
request_extra_header['cookies_sent'] = request_extra_header['headers'][item]
if self.config['store_request_xtra_headers']:
self.sql_driver.add_request_extra_header(request_extra_header)
# PROCESS REQUESTS
if self.config['store_requests']:
if self.debug: print('going to process request data %s' % browser_output['start_url'])
for request in browser_output['requests']:
# defaut values that may get over-written
request['file_md5'] = None
request['is_data'] = False
request['is_3p'] = None
request['is_ssl'] = None
request['page_domain_in_headers'] = False
# first handle non-http urls and optionally store content
if re.match('^(data|about|chrome|blob|javascript).+', request['url']):
if 'base64' in request['url'].lower() or 'image' in request['url'].lower():
is_base64 = True
else:
is_base64 = False
# store_file follows the config as far as actually storing the file goes
# and will either return the md5 or None
# make sure we're following our configuration
if self.config['store_files'] and (self.config['store_base64'] or is_base64 == False):
request['file_md5'] = self.store_file(request['url'],is_base64,request['type'])
else:
request['file_md5'] = None
request['url'] = None
request['is_data'] = True
request['domain_id'] = None
else:
# parse, store, and get id of domain; if fails skip
domain_info = self.url_parser.get_parsed_domain_info(request['url'])
if domain_info['success'] == False:
err_msg = 'unable to parse domain info for %s with error %s' % (request['url'], domain_info['result'])
if self.debug: print(err_msg)
self.sql_driver.log_error({
'client_id' : client_id,
'target' : start_url,
'task' : 'output_store',
'msg' : err_msg
})
continue
else:
request_domain = domain_info['result']['domain']
request['domain_id'] = self.sql_driver.add_domain(domain_info['result'])
# mark third-party requests based on final_url domain
if request_domain != final_url_domain:
request['is_3p'] = True
else:
request['is_3p'] = False
# determine if encrypted
if request['url'][:5] == 'https' or request['url'][:3] == 'wss':
request['is_ssl'] = True
else:
request['is_ssl'] = False
# replace null b/c postgres will die otherwise
if request['post_data']:
request['post_data'] = request['post_data'].replace('\x00','NULL_REPLACED_FOR_PSQL')
# consider anything after the "?" to be the GET data
try:
get_string = re.search('^.+\?(.+)$', request['url']).group(1)
get_string = get_string.replace('\x00','NULL_REPLACED_FOR_PSQL')
get_data = {}
for key_val in get_string.split('&'):
get_data[key_val.split('=')[0]] = key_val.split('=')[1]
request['get_data'] = json.dumps(get_data)
except:
request['get_data'] = None
# mark if response received
if request['request_id'] in response_received_req_ids:
request['response_received'] = True
else:
request['response_received'] = None
# mark if the loading finished
if request['request_id'] in load_finish_data:
request['load_finished'] = True
else:
request['load_finished'] = None
# lower case the type, simplifies db queries
if request['type']: request['type'] = request['type'].lower()
# parse off args/etc
# consider anything before the "?" to be the element_url
try:
request['base_url'] = re.search('^(.+?)\?.+$', request['url']).group(1)
except:
request['base_url'] = request['url']
# attempt to parse off the extension
try:
request['extension'] = re.search('\.([0-9A-Za-z]+)$', request['base_url']).group(1).lower()
except:
request['extension'] = None
# link to page
request['page_id'] = page_id
# parse referer header
request['referer'] = None
for item in request['headers']:
if item.lower() == 'referer':
request['referer'] = request['headers'][item]
# check if domain leaked in headers
if request['request_id'] in internal_id_to_req_ex_headers:
if final_url_domain in internal_id_to_req_ex_headers[request['request_id']]:
request['page_domain_in_headers'] = True
# convert from timestamp to datetime object that will go to the db
request['timestamp'] = datetime.fromtimestamp(request['timestamp'])
# all done
self.sql_driver.add_request(request)
# update domains
if request['is_3p']:
page_3p_request_domains.add((domain_info['result']['domain'],domain_info['result']['domain_owner_id']))
# PROCESS WEBSOCKETS
if self.config['store_websockets']:
if self.debug: print('going to process websocket data %s' % browser_output['start_url'])
ws_id_map = {}
for websocket in browser_output['websockets']:
domain_info = self.url_parser.get_parsed_domain_info(websocket['url'])
if domain_info['success'] == False:
err_msg = 'unable to parse domain info for %s with error %s' % (websocket['url'], domain_info['result'])
if self.debug: print(err_msg)
self.sql_driver.log_error({
'client_id' : client_id,
'target' : start_url,
'task' : 'output_store',
'msg' : err_msg
})
continue
else:
# self.sql_driver.add_domain both stores the new domain and returns its db row id
# if it is already in db just return the existing id
websocket['domain_id'] = self.sql_driver.add_domain(domain_info['result'])
# mark if third-party connection
if final_url_domain != domain_info['result']['domain']:
websocket['is_3p'] = True
else:
websocket['is_3p'] = False
websocket['page_id'] = page_id
this_websocket_id = self.sql_driver.add_websocket(websocket)
# update domains
if websocket['is_3p']:
page_3p_websocket_domains.add((domain_info['result']['domain'],domain_info['result']['domain_owner_id']))
if websocket['request_id'] not in ws_id_map:
ws_id_map[websocket['request_id']] = this_websocket_id
else:
print('ERROR WS_REQ_ID ALREADY IN MAP')
# PROCESS WEBSOCKET EVENTS
if self.config['store_websockets'] and self.config['store_websocket_events']:
for websocket_event in browser_output['websocket_events']:
websocket_event['page_id'] = page_id
if websocket_event['request_id'] in ws_id_map:
websocket_event['websocket_id'] = ws_id_map[websocket_event['request_id']]
else:
websocket_event['websocket_id'] = None
# convert from timestamp to datetime object that will go to the db
websocket_event['timestamp'] = datetime.fromtimestamp(websocket_event['timestamp'])
self.sql_driver.add_websocket_event(websocket_event)
# PROCESS EVENT SOURCE MSGS
if self.config['store_event_source_msgs']:
if self.debug: print('going to process event source data %s' % browser_output['start_url'])
for event_source_msg in browser_output['event_source_msgs']:
event_source_msg['page_id'] = page_id
# convert from timestamp to datetime object that will go to the db
event_source_msg['timestamp'] = datetime.fromtimestamp(event_source_msg['timestamp'])
self.sql_driver.add_event_source_msg(event_source_msg)
# PROCESS COOKIES
if self.config['store_cookies']:
if self.debug: print('going to process cookies %s' % browser_output['start_url'])
for cookie in browser_output['cookies']:
# get the ip, fqdn, domain, pubsuffix, and tld
# we need the domain to figure out if cookies/elements are third-party
# note:
# url_parser fails on non-http, we should fix this, right now a lame hack is to prepend http://
# parse domain from the security_origin, which is equivalent to a url
domain_info = self.url_parser.get_parsed_domain_info('http://'+cookie['domain'])
if domain_info['success'] == False:
err_msg = 'unable to parse domain info for %s with error %s' % (cookie['domain'], domain_info['result'])
if self.debug: print(err_msg)
self.sql_driver.log_error({
'client_id' : client_id,
'target' : start_url,
'task' : 'output_store',
'msg' : err_msg
})
continue
else:
# self.sql_driver.add_domain both stores the new domain and returns its db row id
# if it is already in db just return the existing id
cookie['domain_id'] = self.sql_driver.add_domain(domain_info['result'])
# mark if third-party cookie
if final_url_domain != domain_info['result']['domain']:
cookie['is_3p'] = True
else:
cookie['is_3p'] = False
# key to page
cookie['page_id'] = page_id
# fix var names
cookie['http_only'] = cookie['httpOnly']
# attempt to convert cookie expiry from timestamp to datetime object, note we
# need try/except as python datetime object cannot have year > 9999 and some
# cookies do that
cookie['expires_timestamp'] = None
if cookie['expires']:
try:
cookie['expires_timestamp'] = datetime.fromtimestamp(cookie['expires'])
except:
pass
# this is optional, do fall-back
if 'sameSite' in cookie:
cookie['same_site'] = cookie['sameSite']
else:
cookie['same_site'] = None
# see if this cookie was set via http response
if cookie['domain'][0] == '.':
cookie_tuple = (cookie['domain'][1:],cookie['name'])
else:
cookie_tuple = (cookie['domain'],cookie['name'])
if cookie_tuple in http_cookies:
cookie['is_set_by_response'] = True
else:
cookie['is_set_by_response'] = False
# all done with this cookie
self.sql_driver.add_cookie(cookie)
# update domains
if cookie['is_3p']:
page_3p_cookie_domains.add((domain_info['result']['domain'],domain_info['result']['domain_owner_id']))
if self.debug: print('done storing scan %s' % browser_output['start_url'])
return {
'success' : True,
'page_id' : page_id,
'page_3p_request_domains' : page_3p_request_domains,
'page_3p_response_domains' : page_3p_response_domains,
'page_3p_websocket_domains' : page_3p_websocket_domains,
'page_3p_dom_storage_domains' : page_3p_dom_storage_domains,
'page_3p_cookie_domains' : page_3p_cookie_domains
}
# store_scan
def store_file(self,body,is_base64,type):
"""
Hashes and stores file, returns file_md5.
"""
# in theory we shouldn't get here if it is base64, so this is a fail-safe check
if not self.config['store_base64']:
if is_base64 or type.lower()=='image':
return None
# note hash is on original data, which we modify to remove \x00 before we store
file_md5 = hashlib.md5(body.encode()).hexdigest()
# store to db, note query will be ignored on conflict
# but since we calculate the md5 as above that is fine
self.sql_driver.add_file({
'md5' : file_md5,
'body' : body.replace('\x00','NULL_REPLACED_FOR_PSQL'),
'type' : type.lower(),
'is_base64' : is_base64
})
return file_md5
# store_file
def store_policy(self, browser_output, client_id, client_ip=None):
"""
We attempt to figure out if the text provided is a policy, if so
we store it to the database.
"""
# keep values in a dict here
policy = {}
# attempt to get_policy was a success, extract data from
# dict, since postgres cannot handle '\x00' we convert to
# string for several fields and use .replace('\x00',' ') to
# clean the input
policy['client_id'] = client_id
policy['client_ip'] = client_ip
policy['browser_type'] = browser_output['browser_type']
policy['browser_version'] = browser_output['browser_version']
policy['browser_prewait'] = browser_output['prewait']
policy['start_url'] = browser_output['start_url']
policy['final_url'] = browser_output['final_url']
policy['title'] = browser_output['title']
policy['meta_desc'] = browser_output['meta_desc']
policy['lang'] = browser_output['lang']
policy['fk_score'] = None
policy['fre_score'] = None
policy['word_count'] = None
policy['type'] = None
policy['match_term'] = None
policy['match_text'] = None
policy['match_text_type'] = None
policy['confidence'] = None
policy['page_text_id'] = None
policy['page_source_md5'] = None
# if readability failed we bail
if not browser_output['readability_html'] or not browser_output['page_text']:
self.sql_driver.close()
return {
'success' : False,
'result' : 'No readability result'
}
# ignore any malformed unicode characters
readability_html = browser_output['readability_html'].encode('utf-8', 'ignore').decode().strip()
page_text = browser_output['page_text'].encode('utf-8', 'ignore').decode().strip()
page_source = browser_output['page_source'].encode('utf-8', 'ignore').decode()
# bail on empty text
if len(page_text) == 0:
self.sql_driver.close()
return {
'success' : False,
'result' : 'Empty page text'
}
# load the source into lxml so we can do additional processing,
# if we fail we bail
try:
lxml_doc = lxml.html.fromstring(readability_html)
except:
return ({
'success': False,
'result': 'Could not parse readability_html with lxml'
})
# if the text is less than 500 words we ignore it
if len(page_text.split(' ')) < 500:
self.sql_driver.close()
return {
'success' : False,
'result' : 'Page text < 500 words'
}
# once we have the text we figure out if it is
# a policy, start false, override on match
is_policy = False
# first look for matches on page title
# we give this confidence of 100 as it is
# definitely a match
if policy['title']:
policy_type_result = self.determine_policy_type_from_text(policy['title'])
if policy_type_result['success'] == True:
is_policy = True
policy['type'] = policy_type_result['result']['policy_type']
policy['match_term'] = policy_type_result['result']['match_term']
policy['match_text'] = policy_type_result['result']['match_text']
policy['match_text_type'] = 'title'
policy['confidence'] = 100
# deep checks may generate false positives so
# they have confidence of 0 until they can
# be verified, note we may do this here
# or later on
deep_checks = True
if deep_checks:
policy['confidence'] = 0
# convert the url path to a sentence by replacing
# common delimiters with spaces and attempt matches
if self.debug: print('going to do checks on url path')
if not is_policy:
url_path_string = re.sub('[-|_|/|\.]',' ',urlsplit(policy['start_url']).path)
if len(url_path_string) > 0:
policy_type_result = self.determine_policy_type_from_text(url_path_string)
if policy_type_result['success'] == True:
is_policy = True
policy['type'] = policy_type_result['result']['policy_type']
policy['match_term'] = policy_type_result['result']['match_term']
policy['match_text'] = policy_type_result['result']['match_text']
policy['match_text_type'] = 'url_path'
if self.debug: print('going to do checks on meta desc')
if not is_policy and policy['meta_desc']:
policy_type_result = self.determine_policy_type_from_text(policy['meta_desc'])
if policy_type_result['success'] == True:
is_policy = True
policy['type'] = policy_type_result['result']['policy_type']
policy['match_term'] = policy_type_result['result']['match_term']
policy['match_text'] = policy_type_result['result']['match_text']
policy['match_text_type'] = 'meta_desc'
# iterate over all types of heading tags to extract text
# and check for policy matches. note we go in order of
# importance (eg h1->h7->span,etc)
if self.debug: print('going to do checks on heading tags')
if not is_policy:
for tag_type in ['h1','h2','h3','h4','h5','h6','h7','span','strong','em']:
if is_policy: break
tags = lxml_doc.cssselect(tag_type)
if len(tags) > 0:
for tag in tags:
tag_text = tag.text_content()
# if it is > 15 words it is likely not a heading
if len(tag_text.split(' ')) > 15: break
policy_type_result = self.determine_policy_type_from_text(tag_text)
if policy_type_result['success'] == True:
is_policy = True
policy['type'] = policy_type_result['result']['policy_type']
policy['match_term'] = policy_type_result['result']['match_term']
policy['match_text'] = policy_type_result['result']['match_text']
policy['match_text_type'] = tag_type
# if it is a policy we do additional processing
# before storing in db, otherwise we fail
# gracefully
if is_policy:
if self.debug: print('going to store readability_html')
readability_source_md5 = self.store_file(readability_html, False, 'readability_html')
if self.debug: print('going to store page_text')
# store_page_text handles some addition operations
if self.debug: print('going to store page_text')
policy['page_text_id'] = self.store_page_text(readability_html, readability_source_md5)
if self.debug: print(f"page_text_id is {policy['page_text_id']}")
if self.debug: print('going to store page_source')
policy['page_source_md5'] = self.store_file(page_source, False, 'page_source')
if self.debug: print('going to do reading ease scores')
# get readability scores, scores below zero are
# invalid so we null them
policy['fre_score'] = textstat.flesch_reading_ease(page_text)
if policy['fre_score'] <= 0:
policy['fre_score'] = None
policy['fk_score'] = textstat.flesch_kincaid_grade(page_text)
if policy['fk_score'] <= 0:
policy['fk_score'] = None
if self.debug: print('going to store policy')
# add to db and get id for this policy
policy_id = self.sql_driver.add_policy(policy)
if self.debug: print('going to link policy to pages')
# attach policy to all links with this url, not we can filter
# do only do internal links
for page_id, crawl_id in self.sql_driver.get_page_ids_from_link_url(policy['start_url'],internal_links_only=True):
self.sql_driver.attach_policy_to_page(policy_id,page_id)
self.sql_driver.attach_policy_to_crawl(policy_id,crawl_id)
if self.debug:
print(f'\t� Success: {policy["start_url"]}')
self.sql_driver.close()
return {'success': True}
else:
if self.debug:
print(f'\t👎 Fail: {policy["start_url"]}')
self.sql_driver.close()
return {
'success': False,
'result': 'Not policy'
}
# store_policy
def determine_policy_type_from_text(self, text):
"""
Determine if a given text fragment indicates
a given type of policy.
Returns dict.
"""
# clear whitespace
text = re.sub('\s+',' ',text)
# retrieve values from policy_terms.json
policy_verification_terms = self.utilities.get_policy_verification_terms()
policy_type_keys = []
for key in policy_verification_terms:
policy_type_keys.append(key)
# randomize the order we do our checks
random.shuffle(policy_type_keys)
# look for matches against verification terms
for policy_type in policy_type_keys:
for term in policy_verification_terms[policy_type]:
if term in text.lower():
return({
'success': True,
'result' :{
'policy_type': policy_type,
'match_term': term,
'match_text': text
}
})
# no match
return ({'success': False})
# determine_policy_type_from_text
def store_page_text(self,readability_html,readability_source_md5):
# the actual 'page_text' output from readability doesn't properly seperate words
# that use markup as a space. eg '<h3>this</h3><p>that</p>' becomes 'thisthat'
# whereas 'this that' is what a user would see in the browser
# to overcome the above issue we have to manually strip out html and do some
# cleaning of our own.
page_text = re.sub('<!--.+-->',' ', readability_html)
page_text = re.sub('<svg.+</svg>',' ', page_text)
page_text = re.sub('<.+?>', ' ', page_text)
page_text = re.sub('[\n|\r]', ' ', page_text)
page_text = re.sub('\s+', ' ', page_text)
page_text = unicodedata.normalize('NFKD',html.unescape(page_text.strip()))
# postgres can't handle nulls
page_text = page_text.replace('\x00','NULL_REPLACED_FOR_PSQL')
# return the id
return self.sql_driver.add_page_text({
'text' : page_text.replace('\x00',' '),
'word_count' : len(page_text.split()),
'readability_source_md5' : readability_source_md5
})
# store_page_text
# OutputStore
|
thezedwards/webXray
|
webxray/OutputStore.py
|
OutputStore.py
|
py
| 43,016 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72715840829
|
from django.db import models
from django import forms
from django.contrib.auth import get_user_model
# Create your models here.
class Challenge(models.Model):
title = models.CharField(max_length=200)
author = models.ForeignKey ( # author info will be retrieved from the user model
get_user_model(),
on_delete=models.PROTECT # if the author user is deleted, preserve the challenge created
)
pitch = models.CharField(max_length=200)
description = models.TextField(default="")
website = models.URLField()
image_url = models.ImageField()
date_created = models.DateTimeField()
deadline = models.DateTimeField()
class Meta:
ordering = ["date_created"]
# create a Challenge Form model to store its structure
class ChallengeForm(forms.ModelForm):
class Meta:
model = Challenge
fields = (
'title', 'author', 'pitch', 'description', 'website', 'image_url', 'deadline'
)
# how do we store the tasks from the front end?
|
hackathon-team-1/ReadingChallenge
|
readingchallenge/challenges/models.py
|
models.py
|
py
| 1,024 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.