seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
36388169565
|
from fastapi import APIRouter
from api.docker.models import DockerStatsModel
from api.docker.retrieval import get_container_stats, ping_docker
router = APIRouter(
prefix="/docker",
tags=["Docker"],
)
@router.get("/", tags=["Ping"])
def get_docker_health():
status = ping_docker()
return {"status": "ok" if status else "error"}
@router.get("/stats/", response_model=DockerStatsModel)
def get_docker_stats():
return get_container_stats()
|
noahtigner/homelab
|
api/docker/router.py
|
router.py
|
py
| 462 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7418507725
|
# Title: IS 6713 Homework #1
# Author: Kalea Sebesta
# Date: Oct. 20, 2017
# Due Date: Oct 31, 2017
'''
Program Discription: Purpose is to maintian company info
for a small business. (first and last name, position, department,
and salary of each employee) The program continuously read input
from the user, output data, or quit the program depending on the
option the user selects. Menu items: the input of the company can
only be entered once, can input department, add employee, change
employee name, promote employee, output list of all current empl-
oyee and their info, output whether an employee exists (by name)
if not can add employee, or quit the program.
'''
# ---------------------------------------------------------
# FUNCTION DEFINITIONS
# ---------------------------------------------------------
# Function Name: Menu function
# Inputs: None
# Output: Returns the option the user input (str)
# Purpose: Displays menu and prompts for user input
# ---------------------------------------------------------
def Menu():
# Prompts the user for input
option = input('Choose one of the Following Options:\n'
'a. Input Company Name\n'
'b. Input Department Name\n'
'c. Add Employee\n'
'd. Change Employee Name\n'
'e. Promote Employee to New Position\n'
'f. Give Employee Raise\n'
'g. Current Employees and Info\n'
'h. Check Employee Existence\n'
'i. End Program\n')
return option
# ----------------------------------------------------------
# Name: addEmployee function
# Input: Dictionary
# Output: Dictionary
# Purpose: Takes in a global dictionary and prompts the user
# for an employees information and then adds the emp
# to the dictionary if the emp doesnt already exists
# -----------------------------------------------------------
def addEmployee(dictionary):
try:
lname = str(input('Enter last name:'))
fname= str(input('Enter first name:'))
pos = str(input('Enter position: '))
dept = str(input('Enter department: '))
sal = int(input('Enter annual salary: '))
except:
# put a try or except to inform the user to enter the
# salary as numeric value
print('Please try again, the salary must include only numeric values')
sal = int(input('Enter annual salary: '))
# takes values inputted by user and saves them in the employee
# dictionary and returns it. it also creates a new key called
# full name that combines first and last names
dictionary[dept] = {'lname': lname,
'fname': fname,
'fullName': fname + " " + lname,
'Position': pos,
'Department': dept,
'Salary': sal}
return dictionary
# ------------------------------------------------------------
# Name: display_emp_counts
# Input: Two Lists and an index
# Output: Void (print statement
# Purpose: Takes the department list that contains the depart-
# ment names and the list that contains the dictionaries
# of the employees and the index that contains the
# desired department in the department list. It then loops
# through the employee list comparing the department in
# the employee dictionary to the desired department, if
# they are the same then the counter is increased (the
# counter is counting how many employees are in that
# department. Then the department name and number of emp
# is printed out.
# -------------------------------------------------------------
def display_emp_counts(dept_lst, dept_emp_lst, index):
# initializing variables
count = 0
size = len(dept_emp_lst)
# printing the department
print('The Below Employee List for Department:', dept_lst[index])
for i in range(0, size, 1):
# HELP HERE MY LOOP IS REPEATING LAST PERSON AND NOT PUTTING
# IT IN THE CORRECT DEPARTMENT IF THE DEPARTMENT HAS ALREADY
# BEEN ENTERED PREVIOUSLY
# printing each employees info on one line prettily who is in
# desired dept
if dept_emp_lst[i]['Department'] == dept_lst[index]:
print(dept_emp_lst[i]['fullName'],
dept_emp_lst[i]['Department'],
dept_emp_lst[i]['Position'],
"$"+'{:,}'.format(dept_emp_lst[i]['Salary']))
count = count + 1
# printing the department and total employee count
print('The', dept_lst[index], 'department has', count, 'employee(s)\n')
# ----------------------------------------------------------------
# MAIN PROGRAM
# ----------------------------------------------------------------
# initialize variables
counter = 0
dept = tuple()
optionList = ['a','b','c','d','e','f','g','h','i']
count = 0
dept_emp_lst = []
global_emp = dict()
# Display Menu (start while loop)
while True:
choice = Menu()
# ----------------------------------------------------------
# a. Input Company Name
# if option a. is selected more than once an error message is
# displayed and prompts the user to choose another option
if choice == 'a' or choice == 'A' and counter == 0:
comName = str(input('Enter the name of the company: '))
counter = counter + 1
elif choice == 'a' or choice == 'A' and counter > 0:
# entering same company name results in error message
comName = str(input('Enter the name of the company: '))
print('Please choose another option (b-i) ', comName, 'already exists')
# ----------------------------------------------------------
# b. Input Department Name
# prompts the user for a department name and checks to see
# if it exists, if it doesnt it adds it to the dept tuple
# if it does exists an error message is displayed
elif choice == 'b' or choice == 'B' and counter > 0:
deptName = input('Enter department name: ')
if deptName not in dept:
dept += (deptName,)
else:
print('Department Already Exists')
# ----------------------------------------------------------
# c. Add Employee
# HOW CAN I CHECK AND IF EMPLOYEE ALREADY EXISTS DELETE DUPLICATES
# add employee to the global dictionary and checks to see if
# department is in department list, if it isn't it adds it to the
# department tuple
elif choice == 'c' or choice == 'C' and counter > 0:
addEmployee(global_emp)
for key in global_emp:
if global_emp[key]["Department"] not in dept:
print("department not found")
print("adding to dept tuple")
dept += (global_emp[key]["Department"],)
dept_emp_lst.append(global_emp[key])
# ----------------------------------------------------------
# d. Change Employee's Name
# prompts user for the employee's name to change, if it is
# in the system, it prompts the employee for the new name, if
# it is not in the system it informs the user that the employee
# isn't in the system
elif choice == 'd' or choice == 'D' and counter > 0:
# prompt user for original name
oName = input('Enter original name:')
if oName in global_emp[key]['fullName']:
# prompt user for new name
nName = input('Enter new name: ')
# loop through employees in dept and find the original name
# replace old name with new name in the empInfo
for oName in global_emp[key]['fullName']:
global_emp[key]['fullName'] = nName
else:
print('That employee does not exist')
# ----------------------------------------------------------
# e. Promote Employee to New Position
# prompts the user for the employee that will be given a new
# position, if the employee is in the system it prompts the
# user for the new position title and replaces the old position
# with the new position, if employee is not found in the system
# the user is informed
elif choice == 'e' or choice == 'E' and counter > 0:
# prompt user for employee
name = input('Enter name of employee that is changing positions: ')
if name in global_emp[key]['fullName']:
# prompt user for new position
newPos = input('Enter the title of the new position: ')
# change position in dictionary
for name in global_emp[key]['fullName']:
global_emp[key]['Position'] = newPos
else:
print('That employee does not exist')
# ----------------------------------------------------------
# f. Give Employee Raise
# prompts the user for the employee that will be getting the
# raise, if the employee is in the system it then prompts the
# user for the percentage raise and calculates the new salary
# with the new raise and replaces the old salary with the new
# salary in the employee's dictionary
elif choice == 'f' or choice == 'F' and counter > 0:
# prompt user for employee
name = input('Enter name of employee that is getting a raise: ')
# checks to verify name is in the dictionary, if the employee is
# in the dictionary then the user is prompt for the percentage raise
# then calculate the raise and applies it to salary. if employee
# is not in the system the user is notified
if name in global_emp[key]['fullName']:
percR = float(input('Enter the %raise: '))
newSal = round((1 + (percR / 100)) * global_emp[key]['Salary'], 2)
global_emp[key]['Salary'] = newSal
else:
print('That employee does not exist')
# ---------------------------------------------------------
# g. Current Employees and Info
# for each department the employees info are printed out and
# the total count for each department is also displayed
# ERRORS HERE WHEN A EMPLOYEE IT IT SALES MARKETING IT (THE LAST IT
# IS PRINTED AS A DUPLICATE OF THE MARKETING EMPLOYEE) LOGIC OF MY
# LOOP IS INCORRECT
elif choice == 'g' or choice == 'G' and counter > 0:
size = len(dept_emp_lst)
for index in range(0, size, 1):
display_emp_counts(dept, dept_emp_lst, index)
# ---------------------------------------------------------
# h. Does Employee Exist?
# prompts the user for the name of the employee and checks it
# against the names in the global employee dictionary, if emp-
# loyee is already in the system the user is notified, if it
# is not, it prompts the user to add the new employee
elif choice == 'h' or choice == 'H' and counter > 0:
# prompt user for original name
name = input('Enter name of employee: ')
# if yes display info
if name in global_emp[key]['fullName']:
print(name, 'is already an employee in the system')
# if not then prompt to add to employee
else:
print('Employee not found, please add to system')
addEmployee(global_emp)
for key in global_emp:
if global_emp[key]["Department"] not in dept:
print("department not found")
print("adding to dept tuple")
dept += (global_emp[key]["Department"],)
dept_emp_lst.append(global_emp[key])
# ---------------------------------------------------------
# if the user inputs invalid option
# if the user tries to input any information before entering
# company name the user is notified and loops back to the menu
elif (choice != 'a' or choice != 'A') and counter == 0:
print('Company Name Must be Entered First')
# if user enters a string instead of a value from the option
# list the user is notified and loops back to the menu
elif choice not in optionList:
print('Not a valid option, please choose again')
# --------------------------------------------------------
# i. End Program
# by ending the program an output file is written and the
# title of the file is displayed to the user
elif choice == 'i' or choice == 'I':
# create output file
# open and write
fout = open('sebesta_HW1_output.txt', 'w')
# prints company name and total number of employees
fout.write('Company Name: %s\nNumber of Employees: %d\n\n' %
(comName, len(dept_emp_lst)))
# prints out each employee and info in department
# prints out alphabetically by department and last name
# packages and imports
from operator import itemgetter
import operator
newlst = sorted(dept_emp_lst,
key=operator.itemgetter('Department','lname'),
reverse=False)
# calculates the number of employees for each department and
# writes it to the file
for i in dept:
fout.write('The %s department has %d employees\n' %
(i, sum(x.get('Department') == i for x in newlst)))
fout.write('\n')
# LOGIC ERROR OCCURING HERE WHEN, IT IT Sales Marketing IT, IS ENTERED
fout.write('\n'.join(d['fullName'] + ", " +
d['Department'] + ", " +
d['Position'] + ", $" +
'{:,}'.format(d['Salary'])for d in newlst))
# print output file name
print("The output file is: 'sebesta_HW1_output.txt' ")
# close file
fout.close()
break
# ---------------------------------------------------------
|
ksebesta/PythonCourseProjects
|
sebesta_HW1_script.py
|
sebesta_HW1_script.py
|
py
| 13,882 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72779432828
|
import requests
import optparse
from progressbar import *
CHUNK_SIZE = 1024
widgets = ['Downloading : ', Percentage(),
' ', Bar(marker='#',left='[',right=']'),
' ', ETA(), ' ', FileTransferSpeed()]
def download_file(url, file_name=None):
response = requests.head(url)
total_size = int(response.headers.get('content-length'))
pbar = ProgressBar(widgets=widgets, maxval=total_size)
pbar.start()
going_size = 0
if not file_name:
file_name = url.split('/')[-1]
elif os.path.isfile(file_name):
file_name += 'new_' + file_name
r = requests.get(url, stream=True)
with open(file_name, 'wb') as f:
for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
going_size += CHUNK_SIZE
pbar.update(going_size)
if chunk:
f.write(chunk)
f.flush()
pbar.finish()
return local_filename
parser = optparse.OptionParser()
parser.add_option('-u', default=False, dest='url')
parser.add_option('-n', default=False, dest='name')
options, remainder = parser.parse_args()
file_ = download_file(options.url, options.name)
|
bitst0rm/video-stream-downloader
|
vid_single.py
|
vid_single.py
|
py
| 1,158 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2542885352
|
from os import path
import warnings
import copy
import cv2
from PIL import Image
from infy_field_extractor.internal.constants import Constants
class ExtractorHelper():
"""Helper class for data extraction"""
@staticmethod
def extract_with_text_coordinates(
image, bboxes_text, get_text_provider, file_data_list, additional_info,
fieldboxes, logger, temp_folderpath, field="checkbox"):
"""
Method to help extract fields using image, imagepath, bounding boxes coordinates of the text and
bounding boxes coordinates of the field
"""
_, width = image.shape
# filter fieldboxes from bboxes_text
bboxes_text = ExtractorHelper.filter_fieldboxes_from_ocr_words(
fieldboxes, bboxes_text)
# getting phrases
# bboxes_text = ocr_parser_object.get_tokens_from_ocr(
# token_type_value=3, ocr_word_list=bboxes_text)
additional_info['word_bbox_list'] = bboxes_text
bboxes_text = get_text_provider.get_tokens(
3, image, [], file_data_list, additional_info, temp_folderpath)
# dividing a series of horizontal fieldboxes as each bounding box
Y_SCALE = 6
H_SCALE = 3
c = fieldboxes[0]
# appends the first line in bboxes_line_list
bboxes_line_list = [[0, c[Constants.BB_Y]-c[Constants.BB_H]//Y_SCALE,
width, c[Constants.BB_H]+c[Constants.BB_H]//H_SCALE]]
# list to add a new line
temp_list = []
# to track the count of the number of lines in bboxes_line_list
count = 0
# to track if any new word found which is not present in any bboxes_line_list
flag = False
for f in fieldboxes:
for i in bboxes_line_list:
count += 1
# if words already there in the bboxes_list_line then set flag
# as True and moves to the next line
if(i[Constants.BB_Y] <= f[Constants.BB_Y] <= i[Constants.BB_Y]+i[Constants.BB_H]):
flag = True
elif(flag is False and count == len(bboxes_line_list)):
temp_list.append(
[0, f[Constants.BB_Y]-f[Constants.BB_H]//Y_SCALE, width,
f[Constants.BB_H]+f[Constants.BB_H]//H_SCALE])
bboxes_line_list = bboxes_line_list + temp_list
temp_list = []
flag = False
count = 0
# getting the final result
# for each line divided calls the __get_status_for_each_line method
result = {}
done_fields_dList = []
count = 0
for bbox_line in bboxes_line_list:
count += 1
logger.info(
"Extracting checkboxes from line "+str(count)+":")
r, done_fieldsList = ExtractorHelper.get_status_for_each_line(
bbox_line, bboxes_text, fieldboxes, image, logger, field)
done_fields_dList = done_fields_dList+done_fieldsList
result.update(r)
return result, done_fields_dList
@staticmethod
def get_status_for_each_line(bbox_line, bboxes_text, fieldboxes, image, logger, field):
"""
It returns a dictionary with text as key and the field's status or bbox as value
"""
# stores the x,y,width and height of the bbox of the line
_ = bbox_line[Constants.BB_X]
y_l = bbox_line[Constants.BB_Y]
_ = bbox_line[Constants.BB_W]
h_l = bbox_line[Constants.BB_H]
# filter fieldboxes present in bbox_line
fieldboxes_line = []
for c in fieldboxes:
if(y_l <= c[Constants.BB_Y] <= y_l+h_l):
fieldboxes_line.append(c)
# filter texts present in bbox_line
texts_line = []
for t in bboxes_text:
# gets all the text even if a small region of the text is in the line, therefore
# matches both the y-coordinate and y-coordinate+height of the text
# lying inside the line's bbox
if((y_l <= t.get("bbox")[Constants.BB_Y] <= (y_l+h_l)) or
(y_l <= (t.get("bbox")[Constants.BB_Y]+t.get("bbox")[Constants.BB_H]) <= (y_l+h_l))):
texts_line.append(t)
# check if the fieldboxes are at the right or left side of the texts
# initializing isfieldRight as True, assuming that the last bbox in the line is of checkbox
isfieldRight = True
last_field = fieldboxes_line[len(fieldboxes_line)-1]
y_c = last_field[Constants.BB_Y]
h_c = last_field[Constants.BB_H]
for t in texts_line:
x_t = t.get("bbox")[Constants.BB_X]
y_t = t.get("bbox")[Constants.BB_Y]
# if the last bbox in the line is a phrase then fieldboxes are on the left side
if((y_c-(h_c//2) <= y_t <= y_c + h_c) and x_t > last_field[Constants.BB_X]):
isfieldRight = False
logger.info(
"Fieldboxes on the right side of value:"+str(isfieldRight))
result = {}
# get the final result
# the variable adds dictionary with key as the text used for radiobutton and value as its bbox
done_fields_dList = []
for f in fieldboxes_line:
# declare closest variable to consider the key for the fielbox which is closest to it
closest = texts_line[0]
# if key are to the right of fields, the closest text to the right
# of the field is key for that field
if(isfieldRight is False):
for t in texts_line:
x_t = t.get("bbox")[Constants.BB_X]
t_dist = x_t - (f[Constants.BB_X]+f[Constants.BB_W])
close_dist = closest.get(
"bbox")[Constants.BB_X] - (f[Constants.BB_X]+f[Constants.BB_W])
if(close_dist < 0):
closest = t
if(close_dist > 0 and t_dist > 0 and t_dist < close_dist):
closest = t
# if key are to the left of fields, the closest text to the left of the field
# is key for that field
else:
for t in texts_line:
x_t = t.get("bbox")[Constants.BB_X]
w_t = t.get("bbox")[Constants.BB_W]
t_dist = f[Constants.BB_X] - x_t - w_t
close_dist = f[Constants.BB_X] - (
closest.get("bbox")[Constants.BB_X]+closest.get("bbox")[Constants.BB_W])
if(close_dist < 0):
closest = t
if(close_dist > 0 and t_dist > 0 and t_dist < close_dist):
closest = t
text = closest.get("text")
done_fields_dList.append(closest)
# if two phrases arranged vertically is meant for that field, it looks for the texts
# which has almost the same y-coordinate
X_SCALE = 2
Y_SCALE = 2
for t in texts_line:
x_t = t.get("bbox")[Constants.BB_X]
y_t = t.get("bbox")[Constants.BB_Y]
w_t = t.get("bbox")[Constants.BB_W]
h_t = t.get("bbox")[Constants.BB_H]
x_ct = closest.get("bbox")[Constants.BB_X]
y_ct = closest.get("bbox")[Constants.BB_Y]
# compares the closest text's y-coordinates with the current text
# which should be more than
# heigth of the phrase and the x- coordinate should be almost equal
if((x_t-w_t//X_SCALE) <= x_ct <= (x_t+w_t//X_SCALE) and (Y_SCALE*abs(y_t - y_ct) > h_t)):
done_fields_dList.append(t)
if(y_ct < y_t):
text = closest.get("text") + " " + t.get("text")
else:
text = t.get("text") + " " + closest.get("text")
break
# if the field is a checkbox then calls the method to see if checkbox checked or not
if(field == "checkbox"):
isCheck = ExtractorHelper.check_if_true(
image, f, field)
result[text] = isCheck
# if the fiels is radio then returns the text as key and radiobutton bbox as value
elif(field == "radio"):
result[text] = f
return result, done_fields_dList
@staticmethod
def check_if_true(
image, field_bbox, field, field_coordinate=[], debug_mode_check=False,
temp_folderpath=None, img_name=None):
"""
checks the status of the checkbox/radio using contour detection method
"""
# to get the image of only field
x, y, w, h = field_bbox[Constants.BB_X], field_bbox[Constants.BB_Y], \
field_bbox[Constants.BB_W], field_bbox[Constants.BB_H]
img = image[y:y+h, x:x+w]
_, threshold = cv2.threshold(
img, 170, 255, cv2.THRESH_BINARY_INV)
contours, _ = cv2.findContours(
threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if(debug_mode_check is True):
img = cv2.drawContours(img, contours, -1, (180, 105, 255), 3)
cv2.imwrite(f'{temp_folderpath}\\{img_name}_contours.png', img)
if(field == "radio"):
isCheck = False
x, y, r = field_coordinate[0] - \
x, field_coordinate[1] - y, field_coordinate[2]
for i in range(0, len(contours)):
cnt = contours[i]
for c in cnt:
if(int(x-r/3) < c[0][0] < int(x+r/3) and int(y - r/3) < c[0][1] < int(y+r/3)):
isCheck = True
return isCheck
elif(field == "checkbox"):
MY_CONS_1 = 6
cv2.drawContours(img, contours, -1, (100, 255, 150), 5)
# x,y,w,h of the outer most boundary of the checkbox
x, y = 0, 0
# to count the number of squares
count = 0
# to count junk contours
count_false_cnt = 0
# checked_area = 0
for i in range(0, len(contours)):
cnt = contours[i]
epsilon = 0.04*cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
x1, _, w1, h1 = cv2.boundingRect(cnt)
# counts the if the contours has four edges and if the x-coordinate lies in the range of
# x-coordinate of the outermost boundary of the checkbox
if (len(approx) == Constants.RECT_EDGES and x-(w//MY_CONS_1) <= x1 <= x+(w//MY_CONS_1)):
count += 1
elif w1*h1 < 0.05*w*h:
count_false_cnt += 1
# else:
# checked_area += w1*h1
# if there is another contour other than the margins of the checkboxes, then true
if(len(contours)-count - count_false_cnt > 0):
return True
else:
return False
@staticmethod
def filter_fieldboxes_from_ocr_words(fieldboxes, bboxes_text):
filter_list = []
# filter fieldboxes from bboxes_text
for t in bboxes_text:
for c in fieldboxes:
x_t = t.get("bbox")[Constants.BB_X]
y_t = t.get("bbox")[Constants.BB_Y]
w_t = t.get("bbox")[Constants.BB_W]
h_t = t.get("bbox")[Constants.BB_H]
# checks if the fieldbox x-coordinate is in the range of bboxes_text x -coordinate
# and x-coordinate+width or vice-versa
# also checks for the fieldbox y-coordinate is in the range of bboxes _text
if((x_t <= c[Constants.BB_X] <= (x_t+w_t) and
y_t <= c[Constants.BB_Y] <= (y_t+h_t)) or
(c[Constants.BB_X] <= x_t <= (c[Constants.BB_X]+c[Constants.BB_W]) and
c[Constants.BB_Y] <= y_t <= (c[Constants.BB_Y]+c[Constants.BB_H]))):
filter_list.append(t)
continue
# checks if the fieldbox y-coordinate is in the range of bboxes_text y-coordinate
# and x-coordinate+width or vice-versa
# also checks for the fieldbox x-coordinate is in the range of bboxes _text
if((x_t <= c[Constants.BB_X] <= (x_t+w_t) and
c[Constants.BB_Y] <= y_t <= (c[Constants.BB_Y]+c[Constants.BB_H])) or
(c[Constants.BB_X] <= x_t <= (c[Constants.BB_X]+c[Constants.BB_W]) and
y_t <= c[Constants.BB_Y] <= (y_t+h_t))):
filter_list.append(t)
continue
bboxes_text = [x for x in bboxes_text if x not in filter_list]
return bboxes_text
@staticmethod
def check_image_dpi(imagepath, logger):
im = Image.open(imagepath)
try:
dpi = im.info['dpi']
if(dpi[0] < Constants.TESSERACT_MIN_DPI and dpi[1] < Constants.TESSERACT_MIN_DPI):
warning = "The result might be not accurate due to low dpi"
warnings.warn(warning)
logger.warning(warning)
except Exception:
warning = ("Dpi of the image cannot be extracted: "
"The result might be not accurate if the dpi is less than 300")
warnings.warn(warning)
logger.warning(warning)
@staticmethod
def read_image(image_path, logger, temp_folderpath, coordinates=[]):
if(path.exists(image_path) is False):
logger.error("property imagepath not found")
raise Exception("property imagepath not found")
img_name = path.splitext(path.split(image_path)[1])[0]
image = cv2.imread(image_path)
try:
img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
except Exception:
img = image
if(coordinates != []):
if type(coordinates[0]) == dict:
coordinates = coordinates[0]["bbox"]
x_img, y_img, width, height = coordinates[Constants.BB_X], coordinates[
Constants.BB_Y], coordinates[Constants.BB_W], coordinates[Constants.BB_H]
image = img[y_img:y_img+height, x_img:x_img+width]
imagepath = temp_folderpath + "//" + img_name + '_crop.jpg'
PILimage = Image.fromarray(image)
PILimage.save(imagepath, dpi=(300, 300))
# cv2.imwrite(path.join(imagepath), image)
else:
image = img
imagepath = image_path
return (image, imagepath, img_name)
@staticmethod
def get_closest_fieldbox(fieldboxes, field_pos, phrase_bbox):
closest = fieldboxes[0]
if(field_pos == "right"):
for c in fieldboxes:
c_dist = c[Constants.BB_X] - \
(phrase_bbox[Constants.BB_X] + phrase_bbox[Constants.BB_Y])
close_dist = closest[Constants.BB_X] - \
(phrase_bbox[Constants.BB_X] + phrase_bbox[Constants.BB_Y])
closest = ExtractorHelper.closest_fieldbox_if_left_right(
phrase_bbox, c, close_dist, c_dist, closest)
elif(field_pos == "left"):
for c in fieldboxes:
c_dist = phrase_bbox[Constants.BB_X] - \
(c[Constants.BB_X] + c[Constants.BB_W])
close_dist = phrase_bbox[Constants.BB_X] - \
(closest[Constants.BB_X]+closest[Constants.BB_W])
closest = ExtractorHelper.closest_fieldbox_if_left_right(
phrase_bbox, c, close_dist, c_dist, closest)
elif(field_pos == "bottom"):
for c in fieldboxes:
c_dist = c[Constants.BB_Y] - \
(phrase_bbox[Constants.BB_Y] + phrase_bbox[Constants.BB_H])
close_dist = closest[Constants.BB_Y] - \
(phrase_bbox[Constants.BB_Y] + phrase_bbox[Constants.BB_H])
closest = ExtractorHelper.closest_fieldbox_if_top_bottom(
phrase_bbox, c, close_dist, c_dist, closest)
elif(field_pos == "top"):
for c in fieldboxes:
c_dist = phrase_bbox[Constants.BB_Y] - \
(c[Constants.BB_Y] + c[Constants.BB_H])
close_dist = phrase_bbox[Constants.BB_Y] - \
(closest[Constants.BB_Y]+closest[Constants.BB_H])
closest = ExtractorHelper.closest_fieldbox_if_top_bottom(
phrase_bbox, c, close_dist, c_dist, closest)
else:
dist_list = []
for f in fieldboxes:
dist_dict = {}
dist_dict["fieldbox"] = f
dist_dict["x_dist"] = abs(
f[Constants.BB_X] - phrase_bbox[Constants.BB_X])
dist_dict["y_dist"] = abs(
f[Constants.BB_Y] - phrase_bbox[Constants.BB_Y])
dist_list.append(dist_dict)
dist_list.sort(key=lambda x: (x["x_dist"], x["y_dist"]))
return dist_list[0]["fieldbox"]
return closest
@staticmethod
def closest_fieldbox_if_top_bottom(
phrase_bbox, fieldbox, closest_fieldbox_dist, fieldbox_dist, closest_fieldbox):
close_dist = closest_fieldbox_dist
c = fieldbox
c_dist = fieldbox_dist
closest = closest_fieldbox
if(close_dist < 0):
if(phrase_bbox[Constants.BB_X] >= c[Constants.BB_X] and
phrase_bbox[Constants.BB_X]+phrase_bbox[Constants.BB_W] <= c[Constants.BB_X]+c[Constants.BB_W]):
closest = c
elif(phrase_bbox[Constants.BB_X] <= c[Constants.BB_X] <= phrase_bbox[Constants.BB_X]+phrase_bbox[Constants.BB_W]):
closest = c
elif(phrase_bbox[Constants.BB_X] <= c[Constants.BB_X]+c[Constants.BB_W] <= phrase_bbox[Constants.BB_X]+phrase_bbox[Constants.BB_W]):
closest = c
elif(close_dist > 0 and c_dist > 0 and c_dist <= close_dist):
if(phrase_bbox[Constants.BB_X] >= c[Constants.BB_X] and
phrase_bbox[Constants.BB_X]+phrase_bbox[Constants.BB_W] <= c[Constants.BB_X]+c[Constants.BB_W]):
closest = c
elif(phrase_bbox[Constants.BB_X] <= c[Constants.BB_X] <= phrase_bbox[Constants.BB_X]+phrase_bbox[Constants.BB_W]):
closest = c
elif(phrase_bbox[Constants.BB_X] <= c[Constants.BB_X]+c[Constants.BB_W] <= phrase_bbox[Constants.BB_X]+phrase_bbox[Constants.BB_W]):
closest = c
return closest
@staticmethod
def closest_fieldbox_if_left_right(phrase_bbox, fieldbox, closest_fieldbox_dist, fieldbox_dist, closest_fieldbox):
close_dist = closest_fieldbox_dist
c = fieldbox
c_dist = fieldbox_dist
closest = closest_fieldbox
if(close_dist < 0):
if(phrase_bbox[Constants.BB_Y] >= c[Constants.BB_Y] and
phrase_bbox[Constants.BB_Y]+phrase_bbox[Constants.BB_H] <= c[Constants.BB_Y]+c[Constants.BB_H]):
closest = c
elif(phrase_bbox[Constants.BB_Y] <= c[Constants.BB_Y] <= phrase_bbox[Constants.BB_Y]+phrase_bbox[Constants.BB_H]):
closest = c
elif(phrase_bbox[Constants.BB_Y] <= c[Constants.BB_Y]+c[Constants.BB_W] <= phrase_bbox[Constants.BB_Y]+phrase_bbox[Constants.BB_H]):
closest = c
elif(close_dist > 0 and c_dist > 0 and c_dist <= close_dist):
if(phrase_bbox[Constants.BB_Y] >= c[Constants.BB_Y] and
phrase_bbox[Constants.BB_Y]+phrase_bbox[Constants.BB_H] <= c[Constants.BB_Y]+c[Constants.BB_H]):
closest = c
elif(phrase_bbox[Constants.BB_Y] <= c[Constants.BB_Y] <= phrase_bbox[Constants.BB_Y]+phrase_bbox[Constants.BB_H]):
closest = c
elif(phrase_bbox[Constants.BB_Y] <= c[Constants.BB_Y]+c[Constants.BB_W] <= phrase_bbox[Constants.BB_Y]+phrase_bbox[Constants.BB_H]):
closest = c
return closest
@staticmethod
def get_box_region(
image, img_name, debug_mode_check, temp_folderpath,
MIN_BOX_HEIGHT, MIN_BOX_WIDTH, MAX_BOX_HEIGHT=None, MAX_BOX_WIDTH=None):
# get regions
img = image.copy()
(_, img_bin) = cv2.threshold(img, 128, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
img_bin = 255-img_bin
# Defining a kernel length
kernel_length = img.shape[1]//200
# A verticle kernel of (1 X kernel_length), which will detect
# all the verticle lines from the image.
verticle_kernel = cv2.getStructuringElement(
cv2.MORPH_RECT, (1, kernel_length))
# A horizontal kernel of (kernel_length X 1), which will help
# to detect all the horizontal line from the image.
hori_kernel = cv2.getStructuringElement(
cv2.MORPH_RECT, (kernel_length, 1))
# A kernel of (3 X 3) ones.
# Morphological operation to detect verticle lines from an image
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
img_temp1 = cv2.erode(img_bin, verticle_kernel, iterations=3)
verticle_lines_img = cv2.dilate(
img_temp1, verticle_kernel, iterations=3)
# Morphological operation to detect horizontal lines from an image
img_temp2 = cv2.erode(img_bin, hori_kernel, iterations=3)
horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=3)
# Weighting parameters, this will decide the quantity of an image
# to be added to make a new image.
alpha = 0.5
beta = 1.0 - alpha
# This function helps to add two image with specific weight
# parameter to get a third image as summation of two image.
img_final_bin = cv2.addWeighted(
verticle_lines_img, alpha, horizontal_lines_img, beta, 0.0)
img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)
(_, img_final_bin) = cv2.threshold(img_final_bin, 128,
255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
if(debug_mode_check is True):
cv2.imwrite(temp_folderpath+"//"+img_name +
"verticle_line.png", verticle_lines_img)
cv2.imwrite(temp_folderpath+"//"+img_name +
"horizontal_line.png", horizontal_lines_img)
cv2.imwrite(temp_folderpath+"//"+img_name +
"img_final_bin.png", img_final_bin)
# Find contours for image, which will detect all the boxes
contours, _ = cv2.findContours(
img_final_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
idx = 0
bboxes_region = []
for c in contours:
# Returns the location and width,height for every contour
x, y, w, h = cv2.boundingRect(c)
if(MAX_BOX_HEIGHT is None and MAX_BOX_WIDTH is None):
if (w >= MIN_BOX_WIDTH and h >= MIN_BOX_HEIGHT):
idx += 1
bboxes_region.append([x, y, w, h])
if(debug_mode_check is True):
new_img = img[y:y+h, x:x+w]
cv2.imwrite(temp_folderpath+"//"+img_name+str(x) +
'_'+str(y) + '.png', new_img)
else:
if (MAX_BOX_WIDTH >= w >= MIN_BOX_WIDTH and MAX_BOX_HEIGHT >= h >= MIN_BOX_HEIGHT):
idx += 1
bboxes_region.append([x, y, w, h])
if(debug_mode_check is True):
new_img = img[y:y+h, x:x+w]
cv2.imwrite(temp_folderpath+"//"+img_name+str(x) +
'_'+str(y) + '.png', new_img)
return bboxes_region
@staticmethod
def get_updated_within_box(within_bbox, scaling_factor):
if(len(within_bbox) > 0):
for i in [0, 2]:
within_bbox[i] = round(
within_bbox[i] * scaling_factor.get('hor', 1))
for i in [1, 3]:
within_bbox[i] = round(
within_bbox[i] * scaling_factor.get('ver', 1))
return within_bbox
@staticmethod
def get_updated_text_bbox(text_bboxes, scaling_factor):
if(len(text_bboxes) > 0):
for bbox in text_bboxes:
for i in [0, 2]:
bbox['bbox'][i] = round(
bbox['bbox'][i] * scaling_factor.get('hor', 1))
for i in [1, 3]:
bbox['bbox'][i] = round(
bbox['bbox'][i] * scaling_factor.get('ver', 1))
return text_bboxes
@staticmethod
def get_invalid_keys(truth_dict, test_dict) -> list:
"""Compare two dictionary objects and return invalid keys by using one of them as reference
Args:
truth_dict (dict): The object containing all valid keys
test_dict (dict): The object to evaluate for presence of invalid keys
Returns:
list: The list of invalid keys
"""
def __get_all_keys_recursively(parent_key, dict_obj):
all_keys = []
for k, val in dict_obj.items():
key = k if parent_key is None or len(
parent_key) == 0 else f"{parent_key}->{k}"
if not key in all_keys:
all_keys.append(key)
if isinstance(val, dict):
all_keys += __get_all_keys_recursively(key, val)
return all_keys
truth_keys = __get_all_keys_recursively(None, truth_dict)
test_keys = __get_all_keys_recursively(None, test_dict)
return list(set(test_keys)-set(truth_keys))
@staticmethod
def get_updated_config_dict(from_dict, default_dict):
config_dict_temp = copy.deepcopy(default_dict)
for key in from_dict:
if isinstance(from_dict[key], dict):
if config_dict_temp.get(key) is None:
config_dict_temp[key] = from_dict[key]
else:
config_dict_temp[key] = ExtractorHelper.get_updated_config_dict(
from_dict[key], config_dict_temp[key])
else:
if config_dict_temp.get(key) is None:
config_dict_temp[key] = from_dict[key]
return config_dict_temp
|
Infosys/Document-Extraction-Libraries
|
infy_field_extractor/src/infy_field_extractor/internal/extractor_helper.py
|
extractor_helper.py
|
py
| 26,643 |
python
|
en
|
code
| 6 |
github-code
|
6
|
23370877467
|
IDUN = True
RANDOM_SEED = 2021
TEST_SIZE = 0.15
VAL_SIZE = 0.10
TUNING_SIZE = 0.3
# Dataset sizes
LABELED_VALIDATION_ABSOLUTE_SIZE = 345
LABELED_TRAIN_SIZE = 1380
WEAK_NUMBER_OF_ARTICLES_PER_CLASS = 2760
# TF-IDF params
TITLE_MAX_FEATURES = 1000
CONTENT_MAX_FEATURES = 5000
# LR params
MAX_ITER = 4000
SOLVER = 'liblinear'
PENALTY = 'l1'
CV = 5
# LR Tuned params
WEAK_BEST_C = 5
SUPERVISED_BEST_C = 1
# BERT config
BERT_N_WORDS = 512
# Numerical cols
NUMERICAL_COLS = ['id',
'content_exclamation_count',
'title_exclamation_count',
'content_word_count',
'title_word_count',
'content_word_count_with_punctuation',
'title_word_count_with_punctuation',
'content_sentence_count',
'title_sentence_count',
'content_capital_word_count',
'title_capital_word_count',
'content_stop_word_count',
'title_stop_word_count',
'content_stop_word_ratio',
'title_stop_word_ratio',
'content_words_per_sentence',
'content_quote_marks_count',
'content_ttr_score',
'title_ttr_score',
'title_nouns_count',
'title_proper_nouns_count',
'content_avg_word_length',
'title_avg_word_length',
'content_avg_word_length_no_stop_words',
'title_avg_word_length_no_stop_words',
'content_url_count',
'content_verb_ratio',
'content_past_tense_verb_ratio',
'content_past_tense_verb_ratio_of_all_verbs',
'content_adjective_ratio',
'content_adverb_ratio',
'title_verb_ratio',
'title_past_tense_verb_ratio',
'title_past_tense_verb_ratio_of_all_verbs',
'title_adjective_ratio',
'title_adverb_ratio',
'content_capital_word_ratio',
'title_capital_word_ratio',
'content_personal_pronouns_count',
'content_personal_pronouns_ratio',
'content_quote_marks_ratio',
'title_nouns_ratio',
'title_proper_nouns_ratio',
'content_exclamation_ratio',
'title_exclamation_ratio',
'content_sentiment_word_sub',
'content_sentiment_word_pos',
'content_sentiment_word_neg',
'title_sentiment_word_sub',
'title_sentiment_word_pos',
'title_sentiment_word_neg',
'content_sentiment_sentence_sub',
'content_sentiment_sentence_pos',
'content_sentiment_sentence_neg',
'title_sentiment_sentence_sub',
'title_sentiment_sentence_pos',
'title_sentiment_sentence_neg',
'content_sentiment_text_sub',
'content_sentiment_text_pos',
'content_sentiment_text_neg',
'title_sentiment_text_sub',
'title_sentiment_text_pos',
'title_sentiment_text_neg',
'title_swn_pos_score',
'title_swn_neg_score',
'title_swn_obj_score',
'content_swn_pos_score',
'content_swn_neg_score',
'content_swn_obj_score'
]
LEMMATIZED_COLS = ['id', 'content_lemmatized_lowercase_no_stopwords', 'title_lemmatized_lowercase_no_stopwords']
RAW_TEXT_COLS = ['id', 'content', 'title']
LABELED_LEMMATIZED_COLS = ['id', 'content_lemmatized_lowercase_no_stopwords', 'title_lemmatized_lowercase_no_stopwords', 'label']
LABELED_RAW_TEXT_COLS = ['id', 'content', 'title', 'label']
BERT_SAVE_COLS = ['text', 'label']
|
piiingz/fake-news-detection-classifiers
|
config.py
|
config.py
|
py
| 4,044 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19649617548
|
from sys import stdin
from heapq import heappop, heappush
readline = stdin.readline
heap = []
while True:
line = readline()
if line[1] == 'x':
print(-heappop(heap))
elif line[0] == 'i':
heappush(heap, -int(line.split()[1]))
else:
break
'''
def insert(heap, key):
heap.append(-1000000)
heapIncreaseKey(heap, key)
def parent(i):
return (i - 1) // 2
def heapIncreaseKey(heap, key):
heap[len(heap) - 1] = key
i = len(heap) - 1
while i > 0 and heap[parent(i)] < heap[i]:
heap[i], heap[parent(i)] = heap[parent(i)], heap[i]
i = parent(i)
def heapExtractMax(heap):
if len(heap) < 1:
return None
else:
MAX = heap[0]
heap[0] = heap.pop()
maxHeapify(heap, 0)
return MAX
def maxHeapify(heap, i):
left = (i + 1) * 2 - 1
right = (i + 1) * 2
largest = left if left < len(heap) and heap[left] > heap[i] else i
largest = right if right < len(heap) and heap[right] > heap[largest] else largest
if largest != i:
heap[i], heap[largest] = heap[largest], heap[i]
maxHeapify(heap, largest)
heap = []
while True:
line = sys.stdin.readline()
if line[0] == 'i':
insert(heap, int(line.split()[1]))
elif line[0:2] == 'ex':
print(str(heapExtractMax(heap)))
else:
break
'''
|
okuchap/cppAlgo
|
19C.py
|
19C.py
|
py
| 1,358 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40026671586
|
import requests
from lxml import etree
BASE_DOMIN = 'https://ygdy8.net'
URL = []
HEADERS = {
'Referer': 'https://c.02kdid.com/b/1/1754/22432/960X90/960X90.html',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36'
}
def get_detail_url(url):
response = requests.get(url, headers=HEADERS)
text = response.content
html = etree.HTML(text)
detail_url = html.xpath('//table[@class="tbspan"]//a/@href')
detail_url = map(lambda url: BASE_DOMIN + url, detail_url)
return detail_url
def parse_detail_page(url):
response = requests.get(url, headers=HEADERS)
text = response.content
html = etree.HTML(text)
movie = {}
movie['title'] = html.xpath('//div[@class="title_all"]//font[@color="#07519a"]/text()')
zoom = html.xpath('//div[@id="Zoom"]')[0]
imgs = html.xpath('//img/@src')
movie['cover'] = imgs[0]
infos = zoom.xpath('.//text()')
def info_parse(text, rule):
return text.replace(rule, '').strip()
for index, info in enumerate(infos):
if info.startswith('◎年 代'):
info = info_parse(info, '◎年 代')
movie['year'] = info
elif info.startswith('◎产 地'):
info = info_parse(info, '◎产 地')
movie['plase'] = info
elif info.startswith('◎类 别'):
info = info_parse(info, '◎类 别')
movie['catergory'] = info
elif info.startswith('◎主 演'):
info = info_parse(info, '◎主 演')
actors = [info]
for x in range(index+1, len(infos)):
actor = infos[x].strip()
if actor.startswith('◎'):
break
actors.append(actor)
movie['actors'] = actors
elif info.startswith('◎简 介'):
info = info_parse(info, '◎简 介')
proflie = ''
for x in range(index+1, len(infos)):
proflie += infos[x].strip()
if infos[x+1].startswith('【下载地址】'):
break
movie['proflie'] = proflie
downloadurl = html.xpath('//td[@bgcolor="#fdfddf"]/a/@href')[0]
movie['downloadurl'] = downloadurl
return movie
def spider():
base_url = "https://ygdy8.net/html/gndy/dyzz/list_23_{}.html"
for x in range(1, 2):
url = base_url.format(x)
detail_urls = get_detail_url(url)
for detail_url in detail_urls:
movie = parse_detail_page(detail_url)
if __name__ == '__main__':
spider()
|
mirrorthink/python
|
douban/douban.py
|
douban.py
|
py
| 2,668 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36649336174
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 11 10:56:48 2020
@author: alexanderfalk
"""
import itertools
import time
import sys
class ThreeOPT:
def __init__(self, computed_solution, time_limit=60):
self.solution = computed_solution
self.time_limit = time_limit
def construct(self, time_left):
return self.algorithm(time_left)
def algorithm(self, start_time):
for index, route in enumerate(self.solution.routes):
segments = self.tour_segments(route)
for i, j, k in segments:
self.solution.routes[index] = self.improvement(route, i, j, k)
t1 = time.time() # End time
if t1 - start_time > self.time_limit:
sys.stdout.write("Time Expired\n")
return self.solution
return self.solution
def distance(self, i, j):
return self.solution.instance.pre_distance(i, j)
def tour_segments(self, route):
indices = [index for index in range(len(route))]
return list(itertools.combinations(indices, r = 3))
def improvement(self, route, i, j, k):
A, B, C, D, E, F = route[i-1], route[i], route[j-1], route[j], route[k-1], route[k % len(route)]
dist0 = self.distance(A, B) + self.distance(C, D) + self.distance(E, F)
dist1 = self.distance(A, C) + self.distance(B, D) + self.distance(E, F)
dist2 = self.distance(A, B) + self.distance(C, E) + self.distance(D, F)
dist3 = self.distance(A, D) + self.distance(E, B) + self.distance(C, F)
dist4 = self.distance(F, B) + self.distance(C, D) + self.distance(E, A)
if dist0 > dist1:
route[i:j] = reversed(route[i:j])
elif dist0 > dist2:
route[j:k] = reversed(route[j:k])
elif dist0 > dist4:
route[i:k] = reversed(route[i:k])
elif dist0 > dist3:
route[i:k] = route[j:k] + route[i:j]
return route
|
AlexanderFalk/2020_Project01_CS_HA
|
src/threeopt.py
|
threeopt.py
|
py
| 2,076 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3439580711
|
class Solution(object):
def splitArray(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
n = len(nums)
target = 0
total = sum(nums)
for i in range(1, n-5):
if i != 1 and nums[i-1]==0 and nums[i] == 0:
continue
target += nums[i - 1]
j_start = i + 1
left = total - nums[i] - target
if self.j_helper(nums, target, j_start, left):
return True
return False
def j_helper(self, nums, target, start, left):
j_target = 0
n = len(nums)
for j in range(start+1, n-3):
j_target += nums[j-1]
k_left = left - nums[j] - j_target
k_start = j + 1
if j_target == target and self.k_helper(nums, target, k_start, k_left):
return True
return False
def k_helper(self, nums, target, start, left):
k_target = 0
n = len(nums)
for k in range(start + 1, n-1):
k_target += nums[k-1]
last_left = left - nums[k] - k_target
if k_target == target and last_left == target:
return True
return False
|
cuiy0006/Algorithms
|
leetcode/548. Split Array with Equal Sum.py
|
548. Split Array with Equal Sum.py
|
py
| 1,236 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18769137511
|
from sys import stdin
while(True):
try:
xa,ya,xb,yb,xc,yc,xd,yd = map(float, stdin.readline().split(","))
ab = [xb-xa,yb-ya]
bc = [xc-xb,yc-yb]
cd = [xd-xc,yd-yc]
da = [xa-xd,ya-yd]
cr1 = ab[0]*bc[1] - ab[1]*bc[0]
cr2 = bc[0]*cd[1] - bc[1]*cd[0]
cr3 = cd[0]*da[1] - cd[1]*da[0]
cr4 = da[0]*ab[1] - da[1]*ab[0]
if (cr1<0 and cr2<0 and cr3<0 and cr4<0) or (cr1>0 and cr2>0 and cr3>0 and cr4>0):
print("YES")
else:
print("NO")
except:
break
|
ehki/AOJ_challenge
|
python/0035.py
|
0035.py
|
py
| 567 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31095201820
|
import struct
import math
import mathutils
UINT32 = {"format": "<L", "size": 4}
SINT32 = {"format": "<l", "size": 4}
UINT16 = {"format": "<H", "size": 2}
SINT16 = {"format": "<h", "size": 2}
FLOAT = {"format": "<f", "size": 4}
BYTE = {"format": "b", "size": 1}
class Vertex_Data:
def __init__(self):
self.references = []
self.uv_pos = []
self.pos = None
self.normals = []
self.bones = []
self.bone_offsets = {}
self.bone_weights = {}
class Bone_Data:
def __init__(self):
self.parent = None
self.name = ""
self.transform = None
self.pos = None
self.local_pos = None
self.local_transform = None
self.vertices = []
self.vertex_offsets = []
self.vertex_weights = []
self.children = []
class Model_Data: # used for both SMS and MDL files since they have similar formats
def __init__(self):
self.texture_names = {}
self.material_names = {}
self.object_names = []
self.user_data = {}
self.uvs = []
self.vertices = []
self.triangles = []
self.materials = []
self.textures = []
self.normals = []
self.vertex_colors = []
self.uv_references = []
self.normal_references = []
self.vertex_references = []
self.active_object = None
self.active_mesh = None
self.model_scale = 1.0
self.object_count = 0
self.lights_count = 0
self.points_count = 0
self.paths_count = 0
self.material_count = 0
self.texture_count = 0
self.total_vertex_count = 0
self.total_face_count = 0
#SMS stuff
self.tag_count = 0
self.version = ""
self.tag_names = []
self.tag_user_data = []
self.bones = []
self.vertex_dict = {}
return
class Reader:
def __init__(self, p_input):
self.file_position = 0
self.txt_data = p_input
self.mdl_data = Model_Data()
self.show_logs = True
def read_num(self, p_type = UINT32):
output = []
for i in range(p_type["size"]):
output.append(self.txt_data[self.file_position + i])
self.file_position += p_type["size"]
output = struct.unpack(p_type["format"], bytes(output))[0]
if self.show_logs:
print("Read " + str(p_type["size"]) + " bytes: " + str(output))
return output
def read_str(self, p_delim = '\0'):
output = ""
current_char = None
while current_char != p_delim:
current_char = bytes([self.txt_data[self.file_position]]).decode("utf-8")
output += current_char
self.file_position += 1
if self.show_logs:
print("Read string:", output)
return output
def read_block(self, p_len):
output = ""
current_char = None
for i in range(p_len):
current_char = bytes([self.txt_data[self.file_position]]).decode("utf-8")
output += current_char
self.file_position += 1
print("Read block: " + output + "(len", len(output), ")")
return output
class Writer:
def __init__(self, p_context):
self.txt_data = [] #bytes([])
self.pos = 0 #position in txt_data in bytes
self.context = p_context
self.mdl_data = Model_Data()
def FILE_END(self):
return len(self.txt_data)
def extend(self, p_len):
self.txt_data += [0 for i in range(p_len)]
def seek(self, p_pos):
if p_pos > self.FILE_END():
self.extend(p_pos - self.FILE_END())
self.pos = p_pos
def write(self, p_content):
self.txt_data[self.pos:self.pos+(len(p_content))] = p_content
self.pos += len(p_content)
def write_num(self, p_input, p_type = UINT32):
print("Writing ", p_input)
output = struct.pack(p_type["format"], p_input)
#if self.pos == self.FILE_END():
# self.extend(len(output))
self.write(output)
print("Wrote " + str(p_type["size"]) + " bytes: " + str(list(output)))
def write_str(self, p_input, term = True):
if term:
p_input += '\0'
output = p_input.encode("utf-8")
self.write(list(output))
print("Wrote string: " + str(output) + " of length", len(output))
def clamp(p_num, p_min=-1.0, p_max=1.0):
if p_num <= p_min:
return p_min
elif p_num >= p_max:
return p_max
return p_num
def compress_normal(p_x, p_y, p_z):
multiplier = (3.14159 / 255.0) * 2.0
alpha = math.acos(p_y)
AU = math.floor(alpha / multiplier)
beta = math.acos(clamp(p_x / math.sin(alpha)))
AV = math.floor(beta / multiplier)
return AU, AV
def decompress_normal(AU, AV):
multiplier = (3.14159 / 255.0) * 2.0 # convert from 0-255 to radians
alpha = AU * multiplier
beta = AV * multiplier
x = math.cos(beta) * math.sin(alpha)
y = math.cos(alpha)
z = math.sin(beta) * math.sin(alpha)
return x, y, z
def approx(p_input, p_precision = 0.02):
rounded_input = round(p_input)
if abs(rounded_input - p_input) < p_precision:
return rounded_input
return p_input
#def sort_vector(p_bone_vec):
# vec = p_bone_vec.to_tuple()
# absvec = (abs(vec[0]), abs(vec[1]), abs(vec[2]))
# x, y, z = 0,0,0
# y = vec[abs_vec.index(max(absvec))]
def get_axis(p_vector):
vec = p_vector.to_tuple()
axis = vec.index(max(vec))
return axis
bone_export_order = [
"malePlayer01_Root Spine2",
"malePlayer01 R UpperArm",
"malePlayer01 Spine1",
"malePlayer01 R ForeArm",
"malePlayer01 R Hand",
"malePlayer01 Pelvis",
"malePlayer01 Spine",
"malePlayer01 R Thigh",
"malePlayer01 R Clavicle",
"malePlayer01 L Thigh",
"malePlayer01 L UpperArm",
"malePlayer01 Neck",
"malePlayer01 L Clavicle",
"malePlayer01 Head",
"malePlayer01 L ForeArm",
"malePlayer01 L Hand",
"malePlayer01_Root"
]
#def convert_matrix(p_matrix):
# mat_copy = p_matrix.copy()
# mat_copy[2] *= -1
# #for i in range(len(mat_copy)):
# # mat_copy[i][i] *= -1
# return mat_copy
##this class is meant to allow for easy conversions between two
##-sets of values, e.g names to indices, and vice versa.
##-this way they can be accessed by either
#class Double_Dict:
# def __init__(self, p_id1=1, p_id2=2):
# self.id1 = p_id1
# self.id2 = p_id2
# self.dict1 = {}
# self.dict2 = {}
# def get_item(self, p_dict_id, p_item_key):
# if p_dict_id == self.id1:
# return self.dict2[p_item_key]
# elif p_dict_id == self.id2:
# return self.dict1[p_item_key]
# else:
# return None
# def set_item(self, p_value1, p_value2):
# self.dict1[p_value2] = p_value1
# self.dict2[p_value1] = p_value2
#
|
ElectricVersion/Blender-FATE-plugin
|
util.py
|
util.py
|
py
| 7,045 |
python
|
en
|
code
| 4 |
github-code
|
6
|
15518608756
|
from gnuradio_core import *
from exceptions import *
#from hier_block2 import *
#from top_block import *
from gateway import basic_block, sync_block, decim_block, interp_block
from tag_utils import tag_to_python, tag_to_pmt
import gras
RT_OK = 0
RT_NOT_IMPLEMENTED = 1
RT_NO_PRIVS = 2
RT_OTHER_ERROR = 3
def enable_realtime_scheduling():
"""
This call is for backward compat purposes.
See gras/thread_pool.hpp for greater options.
"""
#any prio greater than 0 means realtime scheduling
prio_value = 0.5
#test that prio
if not gras.ThreadPool.test_thread_priority(prio_value):
return RT_NO_PRIVS
#create a new thread pool with thread priority set
config = gras.ThreadPoolConfig()
config.thread_priority = prio_value
tp = gras.ThreadPool(config)
tp.set_active()
return RT_OK
class top_block(gras.TopBlock):
def __init__(self, name="Top"):
gras.TopBlock.__init__(self, name)
def lock(self):
pass
def unlock(self):
self.commit()
def start(self, *args):
if args: self.global_config().maximum_output_items = args[0]
gras.TopBlock.start(self)
def run(self, *args):
if args: self.global_config().maximum_output_items = args[0]
gras.TopBlock.run(self)
class hier_block2(gras.HierBlock):
def __init__(self, name="Hier", in_sig=None, out_sig=None):
gras.HierBlock.__init__(self, name)
self.__in_sig = in_sig
self.__out_sig = out_sig
#backwards compatible silliness
import weakref
self._hb = weakref.proxy(self)
def lock(self):
pass
def unlock(self):
self.commit()
def input_signature(self): return self.__in_sig
def output_signature(self): return self.__out_sig
# create a couple of aliases
serial_to_parallel = stream_to_vector
parallel_to_serial = vector_to_stream
# Force the preference database to be initialized
prefs = gr_prefs.singleton
#alias old gr_add_vXX and gr_multiply_vXX
add_vcc = add_cc
add_vff = add_ff
add_vii = add_ii
add_vss = add_ss
multiply_vcc = multiply_cc
multiply_vff = multiply_ff
multiply_vii = multiply_ii
multiply_vss = multiply_ss
|
manojgudi/sandhi
|
modules/gr36/gnuradio-core/src/python/gnuradio/gr/__init__.py
|
__init__.py
|
py
| 2,197 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18842905496
|
import logging
try:
from settings import DEBUG
except ImportError:
DEBUG = True
from raven.handlers.logging import SentryHandler
from clean.infra.log.utils.colors import color_style
class RequireDebugFalse(logging.Filter):
def filter(self, record):
return not DEBUG
class RequireDebugTrue(logging.Filter):
def filter(self, record):
return DEBUG
class ColorsFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
super(ColorsFormatter, self).__init__(*args, **kwargs)
self.style = self.configure_style(color_style())
def configure_style(self, style):
style.DEBUG = style.HTTP_NOT_MODIFIED
style.INFO = style.HTTP_INFO
style.WARNING = style.HTTP_NOT_FOUND
style.ERROR = style.ERROR
style.CRITICAL = style.HTTP_SERVER_ERROR
return style
def format(self, record):
message = logging.Formatter.format(self, record)
colorizer = getattr(self.style, record.levelname, self.style.HTTP_SUCCESS)
return colorizer(message)
class CaptureError(SentryHandler):
def emit(self, record):
return super(CaptureError, self).emit(record)
|
bahnlink/pyclean
|
clean/infra/log/utils/__init__.py
|
__init__.py
|
py
| 1,182 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43293144011
|
"""
学员管理系统
系统简介
需求:进⼊系统显示系统功能界⾯,功能如下:
添加学员
删除学员
修改学员信息
完善修改学员信息: 可全部修改,也可单独修改
查询学员信息
完善查找学员信息: 根据姓名查找(如果有两个张三,则全部显示,如果只有一个,则显示一个)
显示所有学员信息
退出系统
"""
"""
这是宠物信息管理系统的主程序
"""
# 导入学员管理系统的功能模块
from student_tools import *
def main():
# 1. 显示系统功能界面
while True:
show_menu()
# 2. 获取用户的选择
action_str = input("请选择希望执行的操作:")
print(f"您选择的操作是【{action_str}】")
# 3. 根据用户的数据执行相应的功能
if action_str in ["1", "2", "3", "4", "5", "6"]:
# 添加学员
if action_str == "1":
add_student()
# 删除学员
elif action_str == "2":
del_student()
# 修改学员信息
elif action_str == "3":
modify_student()
# 查询学员信息
elif action_str == "4":
search_student()
# 显示所有学员信息
elif action_str == "5":
show_all_student()
# 退出系统
elif action_str == "6":
print("欢迎再次使用【学习信息管理系统】")
break
else:
print("您输入的不正确,请重新选择")
if __name__ == '__main__':
main()
|
pisces-jeffen/Learning-Python
|
chapter1_basic/lesson14_学员管理系统/student_main.py
|
student_main.py
|
py
| 1,705 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
40534220786
|
from dataclasses import dataclass, field
from typing import List
from frater.component import ComponentState, IOComponent, IOComponentConfig, ComponentBuilder
from frater.stream import InputStream, OutputStream, StreamConfig, StreamState
@dataclass
class SummationComponentState(ComponentState):
total: int = 0
class SummationComponent(IOComponent):
def __init__(self, config: IOComponentConfig, input_stream: InputStream, output_stream: OutputStream):
super(SummationComponent, self).__init__(config, input_stream, output_stream)
def init_state(self):
return SummationComponentState()
def process(self, data):
self.state.total += data
return self.state.total
@dataclass
class IterableInputStreamConfig(StreamConfig):
data: List[int] = field(default_factory=list)
class IterableInputStream(InputStream):
def __init__(self, config: IterableInputStreamConfig):
super(IterableInputStream, self).__init__(config)
def __iter__(self):
yield StreamState.START
yield from self.config.data
yield StreamState.END
class PrintOutputStream(OutputStream):
def send(self, data):
print(data)
def main():
input_stream = IterableInputStream(
IterableInputStreamConfig.from_dict({'data': list(range(10))}))
output_stream = PrintOutputStream()
component = ComponentBuilder.build(SummationComponent, IOComponentConfig(), input_stream, output_stream)
component.run()
if __name__ == '__main__':
main()
|
Frater-SDK/frater
|
docs/source/getting_started/examples/io_component_example.py
|
io_component_example.py
|
py
| 1,530 |
python
|
en
|
code
| 3 |
github-code
|
6
|
39746911520
|
import threading
from flask import jsonify
from dophon_cloud import enhance, micro_cell_list
a = enhance(import_name=__name__,
properties={'111': 'aaa', 'service_name': 'dxh-service', 'host': '0.0.0.0', 'port': 80,
'reg_url': 'http://127.0.0.1:8301/reg/service/'})
m_c_list = micro_cell_list(a, properties={
'dxh-service': [
{
'/test': [
'/test1'
]
}
],
'xxx-service': [
{
'/c/test': [
'/another/test'
]
}
]
})
@a.route('/b')
def enter_b():
result = m_c_list.request('dxh-service', ['/test', '/test1'])
print(result)
return jsonify(result)
@a.route('/c')
def enter_c():
result = m_c_list.request('xxx-service', ['/c/test', '/another/test'])
print(result)
return jsonify(result)
threading.Thread(target=a.run).start()
|
Ca11MeE/dophon_cloud
|
dophon_cloud/a_test.py
|
a_test.py
|
py
| 912 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18672735980
|
import numpy as np
import pickle
import scipy.signal as sp
import matplotlib.pyplot as plt
with open('datasave', 'rb') as file:
datasym =pickle.load(file)
dataf = np.zeros((91, 1024, 1024))
ref = np.mean(datasym[:17, :, :],axis=0)
for z1 in range(1024):
for z2 in range(1024):
value1 =datasym[30:121, z1, z2]
value2 = np.multiply(np.ones((91,)),ref[z1, z2])
dataf[:, z1, z2] = np.abs(np.fft.fft(value1-value2, axis=0))
for z in range(6):
f = sp.medfilt2d(
np.log(np.mean(dataf[z*3-2:z*3, :, :],axis=0)), kernel_size=11)
plt.figure()
plt.imshow(f)
plt.show()
|
jialanxin/UED-Analysis
|
load.py
|
load.py
|
py
| 615 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11916090818
|
# Loay Mohamed
# Challeng 6:
matrix = input("Input your binary n*m matrix:")
#matrix = [[0, 1, 1, 0], [0, 1, 1, 0], [1, 0, 0, 1], [1, 0, 0, 1]]
# matrix = [[1, 0], [0, 1]]
n = len(matrix)
m = len(matrix[0])
# check and validate matrix dimensions:
if m == 0 or n == 0:
print("Invalid Matrix with dimensions " + str(n) + " , " + str(m))
for i in range(n):
if len(matrix[i]) != m:
print("Invalid Matrix with dimensions " + str(n) + " , " + str(m))
def swapRows(row1, row2):
matrix[[row1, row2]] = matrix[[row2, row1]]
def swapCols(col1, col2):
matrix[:, [col1, col2]] = matrix[:, [col2, col1]]
def checkMatrix():
# if any row in the matrix not equal to the first row or reverse of it..
for i in range(0, n):
for j in range(0, n):
if(matrix[0][0] ^ matrix[0][j] ^ matrix[i][0] ^ matrix[i][j] != 0):
return -1
rowSum = 0
colSum = 0
rowSwap = 0
colSwap = 0
for i in range(0, n):
rowSum = rowSum + matrix[i][0]
colSum = colSum + matrix[0][i]
rowSwap += matrix[i][0] == i % 2
colSwap += matrix[0][i] == i % 2
# count of 1's in each row must equal count 0's or at most differ by 1 (in case odd number matrix board)
if(rowSum != n//2 and rowSum != (n + 1)//2):
return -1
if(colSum != n//2 and colSum != (n + 1)//2):
return -1
if(n % 2 == 1):
if(colSwap % 2):
colSwap = n - colSwap
if(rowSwap % 2):
rowSwap = n - rowSwap
else:
colSwap = min(colSwap, n - colSwap)
rowSwap = min(rowSwap, n - rowSwap)
return (rowSwap + colSwap) // 2
print(checkMatrix())
|
LoayMoh99/EVA_Hackathon
|
Task1/python_codes/q6.py
|
q6.py
|
py
| 1,667 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19303998344
|
# -*- coding: utf-8 -*-
import pygame, os, sys
import pygame_functions as pyf
import constants as c
import time
import shuffle
import bfs
import dfs
import it_dfs
import a_star
import utils
class Game_Interface:
def __init__(self, nmax, filename):
# Variaveis de Controle
self.nmax = nmax
self.mouse_state_plus = False
self.mouse_state_minus = False
#self.alg
self.sprite_list = []
self.shuffler = shuffle.Shuffle(self.nmax)
self.imagesize = c.IMAGE_SIZE
self.time_elapsed = 0
# Inicializacao Pygame
pyf.screenSize(c.SCREEN_WIDTH, c.SCREEN_HEIGHT)
pyf.setBackgroundColour(c.GRAY)
# Instancia lista de sprites
for i in range (0, nmax*nmax):
self.sprite_list.append(pyf.makeSprite("images/" + filename + str(i) + ".png"))
# Carrega sprites padroes
self.plus = pyf.makeSprite("images/plus.png")
self.minus = pyf.makeSprite("images/minus.png")
self.shuffle_button = pyf.makeSprite("images/shuffle.png")
self.BFS_button = pyf.makeSprite("images/BFS.png")
self.DFS_button = pyf.makeSprite("images/DFS.png")
self.DFS_IT_button = pyf.makeSprite("images/BFS_IT.png")
self.A1_button = pyf.makeSprite("images/A_H1.png")
self.A2_button = pyf.makeSprite("images/A_H2.png")
self.text_shuffler_label = pyf.makeLabel(u"Número de iterações: ", 30, 50, 690, "black", "Arial", "clear")
self.text_time = pyf.makeLabel(u"Tempo de execução: ", 30, 700, 400, "black", "Arial", "clear")
self.text_time2 = pyf.makeLabel("segundos", 30, 980, 400, "black", "Arial", "gray")
self.text_memory = pyf.makeLabel(u"Memória utilizada: ", 30, 735, 450, "black", "Arial", "clear")
#self.text_moves = pyf.makeLabel("Movimentos Realizados: ", 30, 735, 500, "black", "Arial", "clear")
#self.text_moves2 = pyf.makeLabel("", 30, 735, 500, "black", "Arial", "gray")
self.text_memory2 = pyf.makeLabel("bytes", 30, 980, 450, "black", "Arial", "gray")
self.number_shuffler_label = pyf.makeLabel(str(c.IT), 30, 332, 692, "black", "Arial", "clear")
# Transforma sprites para tamanhos maiores que 3x3
if self.nmax > 3:
self.initial_transformation()
# Posiciona Sprites
self.initial_position()
pyf.moveSprite(self.shuffle_button, 570, 710, True)
pyf.moveSprite(self.plus, 515, 710, True)
pyf.moveSprite(self.minus, 460, 710, True)
pyf.moveSprite(self.BFS_button, 800, 100, True)
pyf.moveSprite(self.DFS_button, 1010, 100, True)
pyf.moveSprite(self.DFS_IT_button, 900, 210, True)
pyf.moveSprite(self.A1_button, 800, 320, True)
pyf.moveSprite(self.A2_button, 1010, 320, True)
# Mostra sprites na tela
for i in range(0, nmax*nmax):
pyf.showSprite(self.sprite_list[i])
# print(i)
pyf.showSprite(self.shuffle_button)
pyf.showSprite(self.plus)
pyf.showSprite(self.minus)
pyf.showLabel(self.text_shuffler_label)
pyf.showLabel(self.number_shuffler_label)
pyf.showLabel(self.BFS_button)
pyf.showLabel(self.DFS_button)
pyf.showLabel(self.DFS_IT_button)
pyf.showLabel(self.A1_button)
pyf.showLabel(self.A2_button)
pyf.showLabel(self.text_time)
pyf.showLabel(self.text_time2)
pyf.showLabel(self.text_memory)
pyf.showLabel(self.text_memory2)
#pyf.showLabel(self.text_moves)
#pyf.showLabel(self.text_moves2)
pyf.transformSprite(self.shuffle_button, 0, 0.25)
pyf.transformSprite(self.plus, 0, 0.25)
pyf.transformSprite(self.minus, 0, 0.1)
def initial_position(self):
ini_pos = self.imagesize/2 + c.SPRITE_BORDER
count_index = 1
for i in range (0, self.nmax):
for j in range(0, self.nmax):
pyf.moveSprite(self.sprite_list[count_index], ini_pos + (j * self.imagesize), ini_pos + (i * self.imagesize), True)
count_index += 1
if count_index == self.nmax*self.nmax:
break
pyf.moveSprite(self.sprite_list[0], ini_pos + ((self.nmax - 1) * self.imagesize), ini_pos + ((self.nmax - 1) * self.imagesize), True)
def initial_transformation(self):
factor = (600.0/self.nmax) / self.imagesize
self.imagesize = self.imagesize * factor
for i in range(0, self.nmax * self.nmax):
pyf.transformSprite(self.sprite_list[i], 0, factor)
def run(self):
# RODA ATE A TECLA ESC SER PRESSIONADA
keys = pygame.key.get_pressed()
current_time = pygame.time.get_ticks()
waittime = 0
while not keys[pygame.K_ESCAPE]:
current_time = pygame.time.get_ticks()
if current_time > waittime:
pygame.event.clear()
keys = pygame.key.get_pressed()
waittime += 20
# Incrementa Iteracoes
if pyf.spriteClicked(self.plus):
if not self.mouse_state_plus:
self.mouse_state_plus = True
if c.IT >= 1000:
c.IT += 1000
elif c.IT >= 100:
c.IT += 100
elif c.IT >= 10:
c.IT += 10
else:
c.IT += 1
pyf.changeLabel(self.number_shuffler_label, str(c.IT))
else:
self.mouse_state_plus = False
# Decrementa Iteracoes
if pyf.spriteClicked(self.minus):
if not self.mouse_state_minus:
self.mouse_state_minus = True
if c.IT > 1000:
c.IT -= 1000
elif c.IT > 100:
c.IT -= 100
elif c.IT > 10:
c.IT -= 10
elif c.IT > 0:
c.IT -= 1
pyf.changeLabel(self.number_shuffler_label, str(c.IT))
else:
self.mouse_state_minus = False
# Botao Shuffle
if pyf.spriteClicked(self.shuffle_button): # ao clicar o sprite do shuffler chama o metodo para embaralhar
self.initial_position()
self.shuffler_method(c.IT)
# Botoes Algoritmos
move_list = []
# BFS
if pyf.spriteClicked(self.BFS_button):
bfs_alg = bfs.BFS(self.shuffler.get_matrix(), self.nmax)
start = time.time()
bfs_alg.BFS_algorithm()
end = time.time()
if end - start < 1:
pyf.changeLabel(self.text_time2, "{0:.3f}".format((end - start) * 1000) + "ms")
else:
pyf.changeLabel(self.text_time2, "{0:.3f}".format(end - start) + "s")
pyf.changeLabel(self.text_memory2, "{0:.0f}".format(bfs_alg.get_memory_usage()) + " bytes")
move_list = bfs_alg.get_solution_path()
self.move_numbers(move_list, True)
self.shuffler.reset_matrix()
# DFS
if pyf.spriteClicked(self.DFS_button):
dfs_alg = dfs.DFS(self.shuffler.get_matrix(), self.nmax)
start = time.time()
dfs_alg.DFS_algorithm()
end = time.time()
if end - start < 1:
pyf.changeLabel(self.text_time2, "{0:.3f}".format((end - start) * 1000) + "ms")
else:
pyf.changeLabel(self.text_time2, "{0:.3f}".format(end - start) + "s")
pyf.changeLabel(self.text_memory2, "{0:.0f}".format(dfs_alg.get_memory_usage()) + "bytes")
move_list = dfs_alg.get_solution_path()
self.move_numbers(move_list, True)
self.shuffler.reset_matrix()
# DFS_IT
if pyf.spriteClicked(self.DFS_IT_button):
# modificar manualmente a profundidade máxima inicial
dfs_it_alg = it_dfs.IT_DFS(self.shuffler.get_matrix(), self.nmax)
start = time.time()
dfs_it_alg.IT_DFS_algorithm()
end = time.time()
if end - start < 1:
pyf.changeLabel(self.text_time2, "{0:.3f}".format((end - start) * 1000) + "ms")
else:
pyf.changeLabel(self.text_time2, "{0:.3f}".format(end - start) + "s")
pyf.changeLabel(self.text_memory2, "{0:.0f}".format(dfs_it_alg.get_memory_usage()) + "bytes")
move_list = dfs_it_alg.get_solution_path()
self.move_numbers(move_list, True)
self.shuffler.reset_matrix()
# A_STAR H1
if pyf.spriteClicked(self.A1_button):
astar_alg = a_star.A_STAR(self.shuffler.get_matrix(), self.nmax)
start = time.time()
astar_alg.a_star_algorithm(utils.chessboard_heuristic)
end = time.time()
if end - start < 1:
pyf.changeLabel(self.text_time2, "{0:.3f}".format((end - start) * 1000) + "ms")
else:
pyf.changeLabel(self.text_time2, "{0:.3f}".format(end - start) + "s")
pyf.changeLabel(self.text_memory2, "{0:.0f}".format(astar_alg.get_memory_usage()) + "bytes")
move_list = astar_alg.get_solution_path()
self.move_numbers(move_list, True)
self.shuffler.reset_matrix()
# A_STAR H2
if pyf.spriteClicked(self.A2_button):
astar_alg = a_star.A_STAR(self.shuffler.get_matrix(), self.nmax)
start = time.time()
astar_alg.a_star_algorithm(utils.manhattan_heuristic)
end = time.time()
if end - start < 1:
pyf.changeLabel(self.text_time2, "{0:.3f}".format((end - start) * 1000) + "ms")
else:
pyf.changeLabel(self.text_time2, "{0:.3f}".format(end - start) + "s")
pyf.changeLabel(self.text_memory2, "{0:.0f}".format(astar_alg.get_memory_usage()) + "bytes")
move_list = astar_alg.get_solution_path()
self.move_numbers(move_list, True)
self.shuffler.reset_matrix()
pyf.endWait()
def shuffler_method(self, n_moves):
self.shuffler.shuffle_algorithm(n_moves)
moves_list = self.shuffler.get_moves_list()
self.move_numbers(moves_list, False)
def change_position(self, m, flag): #m=n?
pos_correction = self.imagesize/2
n0_x, n0_y = self.sprite_list[0].getPosition() # X e Y do zero
x_pos, y_pos = self.sprite_list[m].getPosition() # X e Y da posicao que sera trocada com 0
x_temp, y_temp = self.sprite_list[m].getPosition() # Temporario
n0_y += pos_correction
n0_x += pos_correction
y_temp = y_temp+pos_correction
x_temp = x_temp+pos_correction
y_pos += pos_correction
x_pos += pos_correction
pyf.moveSprite(self.sprite_list[0], x_pos, y_pos, True) # muda posição do 0
if flag:
if n0_y > y_temp:
for x in range(0, int(self.imagesize/5)):
y_pos += 5
pyf.moveSprite(self.sprite_list[m], x_pos, y_pos, True)
if flag:
time.sleep(c.TIME_CONST)
elif n0_y < y_temp:
for x in range(0, int(self.imagesize/5)):
y_pos -= 5
pyf.moveSprite(self.sprite_list[m], x_pos, y_pos, True)
if flag:
time.sleep(c.TIME_CONST)
elif n0_x > x_temp:
for x in range(0, int(self.imagesize/5)):
x_pos += 5
pyf.moveSprite(self.sprite_list[m], x_pos, y_pos, True)
if flag:
time.sleep(c.TIME_CONST)
elif n0_x < x_temp:
for x in range(0, int(self.imagesize/5)):
x_pos -= 5
pyf.moveSprite(self.sprite_list[m], x_pos, y_pos, True)
if flag:
time.sleep(c.TIME_CONST)
else:
if n0_y > y_temp:
pyf.moveSprite(self.sprite_list[m], x_pos, y_pos + self.imagesize, True)
elif n0_y < y_temp:
pyf.moveSprite(self.sprite_list[m], x_pos, y_pos - self.imagesize, True)
elif n0_x > x_temp:
pyf.moveSprite(self.sprite_list[m], x_pos + self.imagesize, y_pos, True)
else:
pyf.moveSprite(self.sprite_list[m], x_pos - self.imagesize, y_pos, True)
def move_numbers(self, moves, flag):
for move in moves:
self.change_position(move, flag)
def text_objects(self, text, font, color_text):
text_surface = font.render(text, True, color_text)
return text_surface, text_surface.get_rect()
game = Game_Interface(3, c.FILENAME_MAT)
#game = Game_Interface(3, c.FILENAME_JAC)
#game = Game_Interface(3, c.FILENAME_STD)
#game = Game_Interface(4, c.FILENAME_STD)
#game = Game_Interface(5, c.FILENAME_STD)
game.run()
|
pHgon/8Puzzle-FIA
|
Interface/main.py
|
main.py
|
py
| 13,432 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3989954821
|
from typing import TYPE_CHECKING, Any, Dict, List, Self, Union, cast
from attrs import define as _attrs_define
from attrs import field as _attrs_field
from ..types import UNSET, Unset
if TYPE_CHECKING:
from ..schemas.deposit import Deposit
from ..schemas.recent_2_result_type_1 import Recent2ResultType1
@_attrs_define
class Recent2:
"""
Attributes:
result (Union['Deposit', 'Recent2ResultType1', Unset]):
error (Union[Unset, List[str]]):
"""
result: Union["Deposit", "Recent2ResultType1", Unset] = UNSET
error: Union[Unset, List[str]] = UNSET
additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
from ..schemas.deposit import Deposit
result: Union[Dict[str, Any], Unset]
if isinstance(self.result, Unset):
result = UNSET
elif isinstance(self.result, Deposit):
result = UNSET
if not isinstance(self.result, Unset):
result = self.result.to_dict()
else:
result = UNSET
if not isinstance(self.result, Unset):
result = self.result.to_dict()
error: Union[Unset, List[str]] = UNSET
if not isinstance(self.error, Unset):
error = self.error
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if result is not UNSET:
field_dict["result"] = result
if error is not UNSET:
field_dict["error"] = error
return field_dict
@classmethod
def from_dict(cls: Self, src_dict: Dict[str, Any]) -> Self:
from ..schemas.deposit import Deposit
from ..schemas.recent_2_result_type_1 import Recent2ResultType1
d = src_dict.copy()
def _parse_result(
data: object,
) -> Union["Deposit", "Recent2ResultType1", Unset]:
if isinstance(data, Unset):
return data
try:
if not isinstance(data, dict):
raise TypeError()
_result_type_0 = data
result_type_0: Union[Unset, Deposit]
if isinstance(_result_type_0, Unset):
result_type_0 = UNSET
else:
result_type_0 = Deposit.from_dict(_result_type_0)
return result_type_0
except: # noqa: E722
pass
if not isinstance(data, dict):
raise TypeError()
_result_type_1 = data
result_type_1: Union[Unset, Recent2ResultType1]
if isinstance(_result_type_1, Unset):
result_type_1 = UNSET
else:
result_type_1 = Recent2ResultType1.from_dict(_result_type_1)
return result_type_1
result = _parse_result(d.pop("result", UNSET))
error = cast(List[str], d.pop("error", UNSET))
recent_2 = cls(
result=result,
error=error,
)
recent_2.additional_properties = d
return recent_2
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
tlg7c5/kraken-connector
|
kraken_connector/schemas/recent_2.py
|
recent_2.py
|
py
| 3,654 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43918816491
|
from cleverhans.attacks import CarliniWagnerL2
from tools.cleverhans.adversarial_attack import AdversarialAttack
class CarliniWagnerAttack(AdversarialAttack):
def __init__(self, model, targeted=False, confidence=0, batch_size=1, learning_rate=5e-3, binary_search_steps=5,
max_iterations=1000, abort_early=True, initial_const=1e-2, clip_min=-1, clip_max=1):
super().__init__(model=model, clip_min=clip_min, clip_max=clip_max)
self._targeted = targeted
self._confidence = confidence
self._batch_size = batch_size
self._learning_rate = learning_rate
self._binary_search_steps = binary_search_steps
self._max_iterations = max_iterations
self._abort_early = abort_early
self._initial_const = initial_const
with self.graph.as_default():
self._method = CarliniWagnerL2(self._model, sess=self.session, confidence=self._confidence,
batch_size=self._batch_size, learning_rate=self._learning_rate,
binary_search_steps=self._binary_search_steps,
max_iterations=self._max_iterations, abort_early=self._abort_early,
initial_const=self._initial_const, clip_min=self._clip_min,
clip_max=self._clip_max, targeted=self._targeted)
def get_name(self):
return "{}_{}".format(self.TOOL_NAME, "C&W")
def attack_method(self, labels):
if labels is not None:
if self._targeted:
return self._method.generate(x=self._x_clean, y_target=labels)
else:
return self._method.generate(x=self._x_clean, y=labels)
return self._method.generate(x=self._x_clean)
|
GianmarcoMidena/adversarial-ML-benchmarker
|
tools/cleverhans/carlini_wagner_attack.py
|
carlini_wagner_attack.py
|
py
| 1,845 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73826485946
|
#
# Lendo arquivos com funções do Python
#
def leitura_arquivo():
arquivo = open("novo_arquivo.txt", "r")
if arquivo.mode == "r":
conteudo = arquivo.read()
print(conteudo)
arquivo.close()
leitura_arquivo()
def leitura_arquivo_grande():
arquivo = open("novo_arquivo.txt", "r")
if arquivo.mode == "r":
conteudo_total = arquivo.readlines()
for conteudo in conteudo_total:
print(conteudo)
arquivo.close()
leitura_arquivo_grande()
|
Feltrim/CursoPython-LinkedInLearning
|
Exercicios/arquivos_de_exercicios_descubra_o_python/Cap. 04/leituraArquivo_start.py
|
leituraArquivo_start.py
|
py
| 505 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
72493095869
|
import math
import time
def isPrimeOptimal(n):
sa = int(math.sqrt(n))
count = 0
for i in range(2, sa+1):
if n % i == 0:
count += 1
if count == 0:
return True
else:
return False
def isPrimeJoke(n):
count = 0
for i in range(2, n+1):
if n % i == 0:
count += 1
if count == 1:
return True
else:
return False
num = int(input("enter a num: "))
def timeElapsed(func, *args):
st = time.time()
a = func(*args)
et = time.time()
return a, et - st
print(timeElapsed(isPrimeJoke,num))
# print(timeElapsed(isPrimeOptimal,num))
|
Rahul109866/python-data-structures
|
prime optimization.py
|
prime optimization.py
|
py
| 707 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23399964442
|
import nextcord
from nextcord.ext import commands, application_checks
from nextcord import Interaction, SlashOption
from config.config_handler import ConfigHandler
from middlewares.server_verification import ServerVerification
from components.modals.nuke_model import NukeModel
from middlewares.bot_permissions import BotPermissions
from utils.server_utils import ServerUtils
class Nuke(commands.Cog):
config_handler = ConfigHandler()
verifications = ServerVerification()
permissions = BotPermissions()
utils = ServerUtils()
def __init__(self, bot: commands.Bot):
self.bot = bot
@nextcord.slash_command(
name = "nuke",
description = "Ataque total sobre un servidor.",
guild_ids = config_handler.get_CaC_server_id()
)
@application_checks.has_role(config_handler.get_executor_rol_id())
async def nuke_command(self, ctx: Interaction,
id: str = SlashOption(required = True, description = "Id del servidor objtivo.")):
guild = self.utils.get_server(int(id), self.bot)
self.verifications.check_command_execution_in_allowed_server(guild.id)
self.verifications.check_bag(guild)
if (not self.permissions.has_nuke_permissions(guild)
and not self.permissions.has_administrator_permission):
raise commands.BotMissingPermissions(["8"])
modal = NukeModel(guild, self.bot)
await ctx.response.send_modal(modal)
def setup(client):
client.add_cog(Nuke(client))
|
Worcer/ASF
|
amanecer sin fronteras/src/commands/raid/nuke.py
|
nuke.py
|
py
| 1,639 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14807523398
|
import multiprocessing
import time
import os
'''
import os
print("work进程编号",os.getpid())
'''
def dance(nums,names):
print("dance进程id:"+str(os.getpid()))
print("dance父进程id:"+str(os.getppid()))
for i in range(nums):
print(names+"跳舞")
time.sleep(0.5)
def sing(nums,names):
print("sing进程id:"+str(os.getpid()))
print("sing进程父id:"+str(os.getppid()))
for i in range(nums):
print(names+"唱歌")
time.sleep(0.5)
if __name__ == "__main__":
print("多进程")
sing_process = multiprocessing.Process(target=sing,args=(5,"小米"))
dance_process = multiprocessing.Process(target=dance,kwargs={"names":"小茗","nums":6})
print("主进程id:"+str(os.getpid()))
sing_process.start()
dance_process.start()
|
kids0cn/leetcode
|
Python语法/python多线程多进程/3.获取进程编号.py
|
3.获取进程编号.py
|
py
| 808 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33564541075
|
import time
#i had ChatGPT3 help me a bit, know i need to apply this to my original code, for all my story. as well as i need to remove the prints i used to start new lines. I need
#to put /n at the start of all my input commands. Finally, I need to remove the time.sleeps becuase i can instead just use the typing_print to have the player read
#it in time. i think 0.03 is the best time, let me know what you think if you read this could help. I may want to add a less punishing gameplay, where instead of
#ending the code every time someone types soemthing in wrong, it asks them again, but idk, someone pls help me.
def typing_print(text, delay=0.03, end=''):
for letter in text:
print(letter, end=end, flush=True)
time.sleep(delay)
def game_intro():
typing_print("MESSAGE FROM CREATOR: PLEASE ONLY TYPE THINGS WHEN YOU ARE ASKED TO, OR WHEN THE CURSOR AUTO-MOVES TO THE TYPE POSITION, OR THE STORY WILL MESS UP.\n")
def get_player_info():
name = input("system: what is your name? ")
age = input("system: how old are you? ")
gender = input("system: are you a, boy, or girl, or other? ")
if gender == "boy":
Pronoun = "he"
elif gender == "girl":
Pronoun = "she"
elif gender == "other":
Pronoun = "they/them"
else:
typing_print("pick an option please")
exit()
return name, age, gender, Pronoun
def game_start(name, gender, Pronoun):
typing_print("MESSAGE FROM CREATOR: ocassionally words double space, due to you placing a space before the word when you type it, please ignore. also eventually in the story, glitches may happen. please tell creator to fix.")
typing_print(" ")
typing_print(" ")
Continue= input("\n system: Begin story? y/n ")
typing_print(" ")
if Continue == "y" or "yes":
typing_print("Story Starting... ")
time.sleep(3)
hello = input(f"\n computer: Hello {name} ")
typing_print(" ")
elif Continue == "n" or "no":
typing_print("Ending 1: don't want to play.")
exit()
else:
typing_print("system: You're lucky you messed up so early, your options here are to input y or n no capital letters, or spaces this is your warning, this story is very unforgining to people who misspell their options or answers so if you need to copy and paste the option.")
def game_play(name, Pronoun):
typing_print("You look around you for the first time, and notice you are floating, in blackness, all you can see is black. occasionally words appear in front of you. you are in a console, one of many, in which your world is controlled by your actions.")
typing_print(" ")
typing_print(" ")
GAME = input(f"\n computer: now {name}, do you want to play a game? y/n ")
if GAME == "n":
computer_opinion1 = -1
#opinion system?
typing_print(" ")
typing_print("computer: ...")
time.sleep(1)
typing_print(" ")
if GAME == "y":
typing_print(f"computer: HA don't care, entertain yourself, plus we have more important things to do. ")
typing_print(" ")
else:
typing_print(" ")
typing_print("please select a given option.")
typing_print("Ending 0, the failure, answer a question properly.")
time.sleep(5)
game_over()
def game_over():
exit()
def meaning_of_life(name):
time.sleep(5)
typing_print(" ")
typing_print(f"\n computer: so {name} what is the meaning of life?")
time.sleep(2)
life = input(f" system: {name} what do you think? #this should be a verb# ")
time.sleep(2)
typing_print(" ")
typing_print(f"computer: ahh you think it is {life}")
time.sleep(2)
typing_print(" ")
typing_print(f"computer: your opinion is valid {name}.")
time.sleep(2)
typing_print(" ")
typing_print(f"computer: updating databanks to add the definition {life} as the meaning of life.")
def main():
game_intro()
name, age, gender, Pronoun = get_player_info()
game_start(name, gender, Pronoun)
game_play(name, Pronoun)
meaning_of_life(name)
main()
|
Titan-Slayer09/My-TBA-Thing
|
TBAai.py
|
TBAai.py
|
py
| 4,224 |
python
|
en
|
code
| 1 |
github-code
|
6
|
15751070560
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by Alex on 2016/10/18
class A(object):
a = 'a'
def __init__(self, b):
self.b = b
if __name__ == '__main__':
a = A('test a')
print (a.b)
print (a.a)
a.a = 'asdasdf'
b = A('test b')
print (b.b)
print (b.a)
|
bufubaoni/antitools
|
learnpython/pytest.py
|
pytest.py
|
py
| 309 |
python
|
en
|
code
| 17 |
github-code
|
6
|
21092937121
|
def detectCycleInGraph(edges: list[list[int]], node: int) -> bool:
if edges is None or node == 0:
return False
stack = []
visited = [0 for i in range(0, node)]
graph = [[] for i in range(0, node)]
for edge in edges:
_from = edge[0]
_to = edge[1]
graph[_from].append(_to)
current_node = 0
visited[current_node] = 1
stack.append(current_node)
while len(stack) > 0:
current_node = stack[-1]
node_found = False
for j in graph[current_node]:
if j in stack:
print("Cycle found: ", current_node, j)
return True
if visited[j] == 0:
node_found = True
visited[j] = 1
stack.append(j)
break
if not node_found:
stack.pop()
return False
def main():
arr = [[0, 1], [0, 2], [1, 3], [3, 0], [3, 4]]
# arr = [[0, 1], [1, 2], [2, 3], [3, 4]]
print(detectCycleInGraph(arr, 5))
if __name__ == '__main__':
main()
|
fahadfahim13/Problem_Solve
|
Python/Coding Simplified/Graph/DetectCycleInGraphDFS.py
|
DetectCycleInGraphDFS.py
|
py
| 1,048 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9027119591
|
# encoding: utf-8
import pdb, time, sys, os, codecs, random, re, math
import numpy as np
emotion_idx = dict(zip(['neutral','anger', 'disgust', 'fear', 'joy', 'sadness', 'surprise'], range(7)))
def print_time():
print('\n----------{}----------'.format(time.strftime("%Y-%m-%d %X", time.localtime())))
def batch_index(length, batch_size, test=False):
index = list(range(length))
if not test: np.random.shuffle(index)
for i in range(int( (length + batch_size -1) / batch_size ) ):
ret = index[i * batch_size : (i + 1) * batch_size]
if not test and len(ret) < batch_size : break
yield ret
def list_round(a_list):
a_list = list(a_list)
return [float('{:.4f}'.format(i)) for i in a_list]
def token_seq(text):
return text.split()
def load_w2v(embedding_dim, embedding_dim_pos, data_file_path, embedding_path):
print('\nload embedding...')
words = []
speakers = []
speaker_dict = {}
i = 0
inputFile = open(data_file_path, 'r', encoding='utf-8')
while True:
line = inputFile.readline()
if line == '':
break
line = line.strip().split()
d_len = int(line[1])
inputFile.readline()
for i in range(d_len):
i += 1
new_line = inputFile.readline().strip().split(' | ')
speaker, emotion, utterance = new_line[1], new_line[2], new_line[3]
if speaker in speaker_dict:
speaker_dict[speaker] += 1
else:
speaker_dict[speaker] = 1
speakers.append(speaker)
words.extend([emotion] + token_seq(utterance))
words = set(words)
word_idx = dict((c, k + 1) for k, c in enumerate(words))
word_idx_rev = dict((k + 1, c) for k, c in enumerate(words))
speaker_dict = sorted(speaker_dict.items(), key=lambda x: x[1], reverse=True)
speakers = [item[0] for item in speaker_dict]
spe_idx = dict((c, k + 1) for k, c in enumerate(speakers))
spe_idx_rev = dict((k + 1, c) for k, c in enumerate(speakers))
# main_speakers = ['Monica', 'Ross', 'Chandler', 'Rachel', 'Phoebe', 'Joey']
# spe_idx = dict((c, k + 1) for k, c in enumerate(main_speakers))
# spe_idx_rev = dict((k + 1, c) for k, c in enumerate(main_speakers))
# print('all_speakers: {}'.format(len(spe_idx)))
w2v = {}
inputFile = open(embedding_path, 'r', encoding='utf-8')
emb_cnt = int(inputFile.readline().split()[0])
for line in inputFile.readlines():
line = line.strip().split()
w, ebd = line[0], line[1:]
w2v[w] = ebd
embedding = [list(np.zeros(embedding_dim))]
hit = 0
for item in words:
if item in w2v:
vec = list(map(float, w2v[item]))
hit += 1
else:
vec = list(np.random.rand(embedding_dim) / 5. - 0.1)
embedding.append(vec)
print('data_file: {}\nw2v_file: {}\nall_words_emb {} all_words_file: {} hit_words: {}'.format(data_file_path, embedding_path, emb_cnt, len(words), hit))
embedding_pos = [list(np.zeros(embedding_dim_pos))]
embedding_pos.extend( [list(np.random.normal(loc=0.0, scale=0.1, size=embedding_dim_pos)) for i in range(200)] )
embedding, embedding_pos = np.array(embedding), np.array(embedding_pos)
print("embedding.shape: {} embedding_pos.shape: {}".format(embedding.shape, embedding_pos.shape))
print("load embedding done!\n")
return word_idx_rev, word_idx, spe_idx_rev, spe_idx, embedding, embedding_pos
def load_embedding_from_npy(video_id_mapping_file, video_emb_file, audio_emb_file, path_dir = ''):
def normalize(x):
x1 = x[1:,:]
min_x = np.min(x1, axis=0, keepdims=True)
max_x = np.max(x1, axis=0, keepdims=True)
x1 = (x1-min_x)/(max_x-min_x+1e-8)
x[1:,:] = x1
return x
v_id_map = eval(str(np.load(video_id_mapping_file, allow_pickle=True))) # dia1utt1: 1
v_emb = normalize(np.load(video_emb_file, allow_pickle=True)) # (13620, 4096)
a_emb = normalize(np.load(audio_emb_file, allow_pickle=True)) # (13620, 6373)
print('\nload video_emb_file: {}\nload audio_emb_file: {}\n'.format(video_emb_file, audio_emb_file))
return v_id_map, v_emb, a_emb
def bert_word2id_hier(words, tokenizer, i, x_bert_tmp, x_mask_bert_tmp, x_type_bert_tmp, s_idx_bert_tmp):
tokens_a = tokenizer.tokenize(words)
tokens_a = ["[CLS]"] + tokens_a + ["[SEP]"]
input_ids = tokenizer.convert_tokens_to_ids(tokens_a)
start_idx = s_idx_bert_tmp[i]
sen_len_tmp = len(input_ids)
s_idx_bert_tmp[i+1] = start_idx + sen_len_tmp # 每个utt第一个词在dia中的序号
for j in range(sen_len_tmp):
x_bert_tmp[start_idx+j] = input_ids[j] # dia所有词对应id
x_mask_bert_tmp[start_idx+j] = 1 # 有词为1
x_type_bert_tmp[start_idx+j] = i % 2 # 输出0/1,第偶数个utt的词位为1
def bert_word2id_ind(words, max_sen_len_bert, tokenizer, i, x_bert_sen_tmp, x_mask_bert_sen_tmp):
tokens_a, ret = tokenizer.tokenize(words), 0
if len(tokens_a) > max_sen_len_bert - 2:
ret += 1
tokens_a = tokens_a[0:(max_sen_len_bert - 2)]
tokens_a = ["[CLS]"] + tokens_a + ["[SEP]"]
input_ids = tokenizer.convert_tokens_to_ids(tokens_a)
for j in range(len(input_ids)):
x_bert_sen_tmp[i][j] = input_ids[j]
x_mask_bert_sen_tmp[i][j] = 1
return ret
def cut_by_max_len(x_bert_tmp, x_mask_bert_tmp, x_type_bert_tmp, s_idx_bert_tmp, d_len, max_len=512):
if s_idx_bert_tmp[d_len] > max_len:
new_s_idx_bert_tmp = np.array(s_idx_bert_tmp)
clause_max_len = max_len // d_len
j = 0
for i in range(d_len):
start, end = s_idx_bert_tmp[i], s_idx_bert_tmp[i+1]
if end-start <= clause_max_len:
for k in range(start, end):
x_bert_tmp[j] = x_bert_tmp[k]
x_type_bert_tmp[j] = x_type_bert_tmp[k]
j+=1
new_s_idx_bert_tmp[i+1] = new_s_idx_bert_tmp[i] + end - start
else :
for k in range(start, start+clause_max_len-1):
x_bert_tmp[j] = x_bert_tmp[k]
x_type_bert_tmp[j] = x_type_bert_tmp[k]
j+=1
x_bert_tmp[j] = x_bert_tmp[end-1]
x_type_bert_tmp[j] = x_type_bert_tmp[end-1]
j+=1
new_s_idx_bert_tmp[i+1] = new_s_idx_bert_tmp[i] + clause_max_len
x_bert_tmp[j:] = 0
x_mask_bert_tmp[j:] = 0
x_type_bert_tmp[j:] = 0
s_idx_bert_tmp = new_s_idx_bert_tmp
x_bert_tmp = x_bert_tmp[:max_len]
x_mask_bert_tmp = x_mask_bert_tmp[:max_len]
x_type_bert_tmp = x_type_bert_tmp[:max_len]
return x_bert_tmp, x_mask_bert_tmp, x_type_bert_tmp, s_idx_bert_tmp
def load_data_utt_step1(input_file, tokenizer, word_idx, video_idx, spe_idx, max_doc_len, max_sen_len, max_doc_len_bert, max_sen_len_bert, model_type='', choose_emocate=''):
print('\nload data_file: {}\n'.format(input_file))
doc_id, y_emotion, y_cause, x_bert_sen, x_mask_bert_sen, x_bert, x_mask_bert, x_type_bert, s_idx_bert, x, x_v, sen_len, doc_len, speaker, y_pairs, num_token = [[] for _ in range(16)]
cut_num, cut_num_sen, cut_num_bert_sen, num_emo, num_emo_cause, num_pairs = [0 for _ in range(6)]
inputFile = open(input_file, 'r', encoding='utf-8')
while True:
line = inputFile.readline()
if line == '':
break
line = line.strip().split()
d_id, d_len = line[0], int(line[1])
doc_id.append(d_id)
doc_len.append(d_len)
pairs = eval('[' + inputFile.readline().strip() + ']')
pair_emo, cause = [], []
if pairs != []:
if len(pairs[0]) > 2:
pairs = [(p[0],p[1]) for p in pairs]
pairs = sorted(list(set(pairs)))
pair_emo, cause = zip(*pairs)
y_pairs.append(pairs)
num_pairs += len(pairs)
num_emo_cause += len(list(set(pair_emo)))
y_emotion_tmp, y_cause_tmp = np.zeros((max_doc_len, 2)), np.zeros((max_doc_len, 2))
if choose_emocate:
y_emotion_tmp = np.zeros((max_doc_len, 7))
x_tmp = np.zeros((max_doc_len, max_sen_len),dtype=np.int32)
x_v_tmp, sen_len_tmp, spe_tmp = [np.zeros(max_doc_len, dtype=np.int32) for _ in range(3)]
x_bert_sen_tmp, x_mask_bert_sen_tmp = [np.zeros((max_doc_len, max_sen_len_bert), dtype=np.int32) for _ in range(2)]
s_idx_bert_tmp = np.zeros(max_doc_len, dtype=np.int32)
x_bert_tmp, x_mask_bert_tmp, x_type_bert_tmp = [np.zeros(1024, dtype=np.int32) for _ in range(3)]
for i in range(d_len):
x_v_tmp[i] = video_idx['dia{}utt{}'.format(int(doc_id[-1]), i+1)]
line = inputFile.readline().strip().split(' | ')
spe = line[1]
if spe in spe_idx:
spe_tmp[i] = int(spe_idx[spe])
else:
print('speaker {} error!'.format(spe))
emo_id = emotion_idx[line[2]]
if emo_id>0:
num_emo += 1
if choose_emocate:
y_emotion_tmp[i][emo_id] = 1
else:
y_emotion_tmp[i] = [1,0] if line[2] == 'neutral' else [0,1]
y_cause_tmp[i][int(i+1 in cause)] = 1
words = line[3].replace('|', '')
words_seq = token_seq(words)
num_token.append(len(words_seq))
sen_len_tmp[i] = min(len(words_seq), max_sen_len)
for j, word in enumerate(words_seq):
if j >= max_sen_len:
cut_num_sen += 1
break
x_tmp[i][j] = int(word_idx[word])
bert_word2id_hier(words, tokenizer, i, x_bert_tmp, x_mask_bert_tmp, x_type_bert_tmp, s_idx_bert_tmp)
cut_num_bert_sen += bert_word2id_ind(words, max_sen_len_bert, tokenizer, i, x_bert_sen_tmp, x_mask_bert_sen_tmp)
cut_num = cut_num + int(s_idx_bert_tmp[d_len]>max_doc_len_bert) # 总词数超出最大长度的dia数量
x_bert_tmp, x_mask_bert_tmp, x_type_bert_tmp, s_idx_bert_tmp = cut_by_max_len(x_bert_tmp, x_mask_bert_tmp, x_type_bert_tmp, s_idx_bert_tmp, d_len, max_len=max_doc_len_bert) # 长的截断,max_doc_len_bert // d_len
y_emotion.append(y_emotion_tmp)
y_cause.append(y_cause_tmp)
x.append(x_tmp)
x_v.append(x_v_tmp)
sen_len.append(sen_len_tmp)
speaker.append(spe_tmp)
x_bert_sen.append(x_bert_sen_tmp)
x_mask_bert_sen.append(x_mask_bert_sen_tmp)
x_bert.append(x_bert_tmp)
x_mask_bert.append(x_mask_bert_tmp)
x_type_bert.append(x_type_bert_tmp)
s_idx_bert.append(s_idx_bert_tmp)
print('cut_num: {} cut_num_sen: {} cut_num_bert_sen: {}\n'.format(cut_num, cut_num_sen, cut_num_bert_sen))
print('num_dia: {} num_utt: {} avg_utt_token: {:.2f} num_emo: {} ({:.2%}) num_emo_cause: {} ({:.2%}) num_pairs: {} \n'.format(len(doc_id), sum(doc_len), sum(num_token)/len(num_token), num_emo, num_emo/sum(doc_len), num_emo_cause, num_emo_cause/num_emo, num_pairs))
x_bert_sen, x_mask_bert_sen, x_bert, x_mask_bert, x_type_bert, s_idx_bert, x, sen_len, doc_len, speaker, x_v, y_emotion, y_cause = map(np.array, [x_bert_sen, x_mask_bert_sen, x_bert, x_mask_bert, x_type_bert, s_idx_bert, x, sen_len, doc_len, speaker, x_v, y_emotion, y_cause])
for var in ['x_bert_sen', 'x_mask_bert_sen', 'x_bert', 'x_mask_bert', 'x_type_bert', 's_idx_bert', 'x', 'sen_len', 'doc_len', 'speaker', 'x_v', 'y_emotion', 'y_cause']:
print('{}.shape {}'.format(var, eval(var).shape))
# print('{}:\n {}'.format(var, eval(var)[:1]))
print('load data done!\n')
return x_bert_sen, x_mask_bert_sen, x_bert, x_mask_bert, x_type_bert, s_idx_bert, x, sen_len, doc_len, speaker, x_v, y_emotion, y_cause, doc_id, y_pairs
def load_data_utt_step2(input_file, word_idx, video_idx, max_sen_len=45, choose_emocate = '', pred_future_cause=1, test_bound=''):
print('\nload data_file: {}\n'.format(input_file))
max_doc_len = 35
x, sen_len, distance, x_emocate, x_v, y, y_pairs, pair_id_all, pair_id, doc_id_list = [[] for i in range(10)]
n_cut = 0
inputFile = open(input_file, 'r', encoding='utf-8')
while True:
line = inputFile.readline()
if line == '': break
line = line.strip().split()
doc_id, d_len = int(line[0]), int(line[1])
doc_id_list.append(doc_id)
pairs = eval(inputFile.readline().strip())
if pairs != []:
if len(pairs[0]) > 2:
pairs = [(p[0],p[1]) for p in pairs]
pairs = sorted(list(set(pairs))) # If the pair contains the indexes of cause span (len(p)=4), we need to remove duplicates when taking the utterance pairs.
if not pred_future_cause:
pairs = [(p[0],p[1]) for p in pairs if (p[1]-p[0])<=0]
y_pairs.append(pairs)
true_cause_list = sorted(list(set([p[1] for p in pairs])))
x_tmp = np.zeros((max_doc_len, max_sen_len),dtype=np.int32)
sen_len_tmp, y_emo_tmp, predy_emo_tmp = [np.zeros(max_doc_len,dtype=np.int32) for _ in range(3)]
emo_list, cause_list, true_emo_list = [[] for _ in range(3)]
for i in range(d_len):
line = inputFile.readline().strip().split(' | ')
predy_emo_tmp[i] = int(line[1].strip())
if int(line[1].strip())>0:
emo_list.append(i+1)
if int(line[2].strip())>0:
cause_list.append(i+1)
if int(emotion_idx[line[4].strip()])>0:
true_emo_list.append(i+1)
y_emo_tmp[i] = emotion_idx[line[4].strip()]
words = line[5]
words_seq = token_seq(words)
sen_len_tmp[i] = min(len(words_seq), max_sen_len)
for j, word in enumerate(words_seq):
if j >= max_sen_len:
n_cut += 1
break
x_tmp[i][j] = int(word_idx[word])
for p in pairs:
new_p = [doc_id, p[0], p[1], y_emo_tmp[p[0]-1]]
pair_id_all.append(new_p)
if test_bound=='EC':
emo_list = true_emo_list
if test_bound=='CE':
cause_list = true_cause_list
pair_flag = False
for i in emo_list:
for j in cause_list:
if pred_future_cause:
pair_flag = True
else:
if i>=j:
pair_flag = True
else:
pair_flag = False
if pair_flag:
if choose_emocate:
pair_id_cur = [doc_id, i, j, predy_emo_tmp[i-1]]
if test_bound=='EC':
pair_id_cur = [doc_id, i, j, y_emo_tmp[i-1]]
else:
pair_id_cur = [doc_id, i, j, y_emo_tmp[i-1]]
pair_id.append(pair_id_cur)
y.append([0,1] if pair_id_cur in pair_id_all else [1,0])
x.append([x_tmp[i-1],x_tmp[j-1]])
sen_len.append([sen_len_tmp[i-1], sen_len_tmp[j-1]])
distance.append(j-i+100)
if test_bound=='EC':
x_emocate.append(y_emo_tmp[i-1])
else:
x_emocate.append(predy_emo_tmp[i-1])
x_v_i = video_idx['dia{}utt{}'.format(doc_id, i)]
x_v_j = video_idx['dia{}utt{}'.format(doc_id, j)]
x_v.append([x_v_i, x_v_j])
x, sen_len, distance, x_emocate, x_v, y = map(np.array, [x, sen_len, distance, x_emocate, x_v, y])
for var in ['x', 'sen_len', 'distance', 'x_emocate', 'x_v', 'y']:
print('{}.shape {}'.format( var, eval(var).shape ))
print('n_pairs: {}, n_cut: {}, (y-negative, y-positive): {}'.format(len(pair_id_all), n_cut, y.sum(axis=0)))
print('load data done!\n')
return x, sen_len, distance, x_emocate, x_v, y, pair_id_all, pair_id, doc_id_list, y_pairs
def cal_prf(pred_y, true_y, doc_len, average='binary'):
pred_num, acc_num, true_num = 0, 0, 0
for i in range(pred_y.shape[0]):
for j in range(doc_len[i]):
if pred_y[i][j]:
pred_num += 1
if true_y[i][j]:
true_num += 1
if pred_y[i][j] and true_y[i][j]:
acc_num += 1
p, r = acc_num/(pred_num+1e-8), acc_num/(true_num+1e-8)
f = 2*p*r/(p+r+1e-8)
return p, r, f
def cal_prf_emocate(pred_y, true_y, doc_len):
conf_mat = np.zeros([7,7])
for i in range(pred_y.shape[0]):
for j in range(doc_len[i]):
conf_mat[true_y[i][j]][pred_y[i][j]] += 1
p = np.diagonal( conf_mat / np.reshape(np.sum(conf_mat, axis = 0) + 1e-8, [1,7]) )
r = np.diagonal( conf_mat / np.reshape(np.sum(conf_mat, axis = 1) + 1e-8, [7,1]) )
f = 2*p*r/(p+r+1e-8)
weight = np.sum(conf_mat, axis = 1) / np.sum(conf_mat)
w_avg_f = np.sum(f * weight)
return np.append(f, w_avg_f)
def prf_2nd_step_emocate(pair_id_all, pair_id, pred_y):
pair_id_filtered = []
for i in range(len(pair_id)):
if pred_y[i]:
pair_id_filtered.append(pair_id[i])
keep_rate = len(pair_id_filtered)/(len(pair_id)+1e-8)
def cal_prf_emocate(pair_id_all, pair_id):
conf_mat = np.zeros([7,7])
for p in pair_id:
if p in pair_id_all:
conf_mat[p[3]][p[3]] += 1
else:
conf_mat[0][p[3]] += 1
for p in pair_id_all:
if p not in pair_id:
conf_mat[p[3]][0] += 1
p = np.diagonal( conf_mat / np.reshape(np.sum(conf_mat, axis = 0) + 1e-8, [1,7]) )
r = np.diagonal( conf_mat / np.reshape(np.sum(conf_mat, axis = 1) + 1e-8, [7,1]) )
f = 2*p*r/(p+r+1e-8)
weight0 = np.sum(conf_mat, axis = 1)
weight = weight0[1:] / np.sum(weight0[1:])
w_avg_p = np.sum(p[1:] * weight)
w_avg_r = np.sum(r[1:] * weight)
w_avg_f = np.sum(f[1:] * weight)
# 不考虑占比较小的disgust/fear ['neutral','anger', 'disgust', 'fear', 'joy', 'sadness', 'surprise']
idx = [1,4,5,6]
weight1 = weight0[idx]
weight = weight1 / np.sum(weight1)
w_avg_p_part = np.sum(p[idx] * weight)
w_avg_r_part = np.sum(r[idx] * weight)
w_avg_f_part = np.sum(f[idx] * weight) # 4个情绪的加权f1
results = list(f[1:]) + [w_avg_p, w_avg_r, w_avg_f, w_avg_p_part, w_avg_r_part, w_avg_f_part]
return results
# return list(np.append(f[1:], [w_avg_f, w_avg_f_part]))
return cal_prf_emocate(pair_id_all, pair_id_filtered) + cal_prf_emocate(pair_id_all, pair_id) + [keep_rate]
def prf_2nd_step(pair_id_all, pair_id, pred_y):
pair_id_filtered = []
for i in range(len(pair_id)):
if pred_y[i]:
pair_id_filtered.append(pair_id[i])
def cal_prf(pair_id_all, pair_id):
acc_num, true_num, pred_num = 0, len(pair_id_all), len(pair_id)
for p in pair_id:
if p in pair_id_all:
acc_num += 1
p, r = acc_num/(pred_num+1e-8), acc_num/(true_num+1e-8)
f1 = 2*p*r/(p+r+1e-8)
return [p, r, f1]
keep_rate = len(pair_id_filtered)/(len(pair_id)+1e-8)
return cal_prf(pair_id_all, pair_id_filtered) + cal_prf(pair_id_all, pair_id) + [keep_rate]
|
NUSTM/MECPE
|
utils/pre_data_bert.py
|
pre_data_bert.py
|
py
| 20,065 |
python
|
en
|
code
| 24 |
github-code
|
6
|
70279501309
|
import os
import six
import logging
from django.utils import timezone
logger = logging.getLogger(__name__)
def queryset_csv_export(qs, fields, cache_funcs=None, filepath=None, fileobj=None, delimiter='|'):
import csv
import inspect
from django.db.models.query import QuerySet
if not filepath:
raise Exception("expecting a filepath")
file_dir = os.path.dirname(filepath)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
csvfile = fileobj or open(filepath, 'w') # will write to disk by default
writer = csv.writer(csvfile, delimiter=delimiter)
def to_string(val):
if val is None:
val = ""
if callable(val):
val = val()
if not isinstance(val, six.string_types):
val = str(val)
# try:
# val = ascii_encode(val)
# except:
# val = str(val)
return ("%r" % val)[1:-1]
def get_arg_count(fn):
from functools import partial
if type(fn) == partial:
return len(inspect.getargspec(fn.func)[0]) - len(fn.args)
return len(inspect.getargspec(fn)[0])
header_names = []
rows = []
import types
if isinstance(qs, list):
total_count = len(qs)
elif isinstance(qs, QuerySet):
total_count = qs.count()
elif isinstance(qs, types.GeneratorType):
total_count = "unknown (generator)"
else:
raise Exception("No one has shown me how to get the count of a %s" % type(qs))
logger.debug("# of rows in qs = %s" % total_count)
count = 0
for obj in qs:
count += 1
start_time = timezone.now()
row = []
cache_dict = {}
if cache_funcs:
def is_cache_evaluated():
all_cache_keys = [cache_func[0] for cache_func in cache_funcs]
return all([cache_key in cache_dict for cache_key in all_cache_keys])
while not is_cache_evaluated():
for cache_func_tpl in cache_funcs:
cache_key, cache_func = cache_func_tpl[0], cache_func_tpl[1]
cache_dependency = cache_func_tpl[2] if len(cache_func_tpl) > 2 else None
if cache_key in cache_dict or (cache_dependency is not None and cache_dependency not in cache_dict):
continue
cache_func_arg_count = get_arg_count(cache_func)
if cache_func_arg_count == 1:
cache_dict[cache_key] = cache_func(obj)
elif cache_func_arg_count == 2:
cache_dict[cache_key] = cache_func(obj, cache_dict)
else:
raise Exception("invalid number of args for cache function")
for field in fields:
if isinstance(field, six.string_types):
if field not in header_names:
header_names.append(field)
if isinstance(obj, dict):
val = obj.get(field, "")
else:
val = getattr(obj, field, "")
row.append(to_string(val)) # append the value as a raw text value to keep linebreaks \r\n on a single line
elif isinstance(field, tuple):
if len(field) != 2:
raise Exception("invalid computed field length of %s. Field value = %s" % (len(field), field))
computed_header_name, fn = field
if computed_header_name not in header_names:
header_names.append(computed_header_name)
fn_arg_count = get_arg_count(fn)
if fn_arg_count == 1:
row.append(to_string(fn(obj)))
elif fn_arg_count == 2:
row.append(to_string(fn(obj, cache_dict)))
else:
raise Exception("expecting 1 or 2 args. actual # = %s" % fn_arg_count)
else:
raise Exception("invalid field type of %s, field value = %s" % (type(field), field))
rows.append(row)
end_time = timezone.now()
logger.debug("finished %s of %s. time = %s" % (count, total_count, str(end_time - start_time)))
writer.writerow(header_names)
writer.writerows(rows)
if fileobj:
return fileobj
# def ascii_encode(string):
# import unicodedata
# return unicodedata.normalize('NFKD', unicode(string)).encode('ascii', 'ignore')
|
mirusresearch/mirus_django_csv
|
mirus_django_csv.py
|
mirus_django_csv.py
|
py
| 4,477 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73652285629
|
# 你有一个带有四个圆形拨轮的转盘锁。每个拨轮都有10个数字: '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' 。每个拨轮可以自由旋转:例如把 '9' 变为 '0','0' 变为 '9' 。每次旋转都只能旋转一个拨轮的一位数字。
# 锁的初始数字为 '0000' ,一个代表四个拨轮的数字的字符串。
# 列表 deadends 包含了一组死亡数字,一旦拨轮的数字和列表里的任何一个元素相同,这个锁将会被永久锁定,无法再被旋转。
# 字符串 target 代表可以解锁的数字,你需要给出最小的旋转次数,如果无论如何不能解锁,返回 -1。
class Solution(object):
def openLock(self, deadends, target):
"""
:type deadends: List[str]
:type target: str
:rtype: int
"""
self.visited = set()
q = ['0000']
res = 0
while(q):
size = len(q)
for i in range(0, size):
cur = q.pop(0)
if cur in deadends:
continue
if cur == target:
return res
for j in range(0, 4):
up = self.plusone(cur, j)
if up not in self.visited:
self.visited.add(up)
q.append(up)
down = self.minusone(cur,j)
if down not in self.visited:
self.visited.add(down)
q.append(down)
res += 1
return -1
def plusone(self, s, j):
ch = list(s)
if ch[j] == '9':
ch[j] == '0'
else:
ch[j] = chr(ord(ch[j]) + 1)
return "".join(ch)
def minusone(self, s, j):
ch = list(s)
if ch[j] == '0':
ch[j] = '9'
else:
ch[j] = chr(ord(ch[j]) - 1)
return "".join(ch)
a = Solution()
deadends = ["0201","0101","0102","1212","2002"]
target = "0202"
print(a.openLock(deadends, target))
|
xxxxlc/leetcode
|
BFS/openLock.py
|
openLock.py
|
py
| 2,133 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
40732764303
|
temp = 0
Average = 0
Ratings = {'A+':4.5, 'A0':4.0,'B+':3.5, 'B0':3.0,'C+':2.5, 'C0':2.0,'D+':1.5, 'D0':1.0,'F':0.0}
for _ in range(20):
Subject, Grade, Rating = map(str, input().split())
if Rating == 'P':
continue
Average += int(Grade[0]) * Ratings[Rating]
temp += int(Grade[0])
print(Average/temp)
|
seriokim/Coding-Study
|
백준 단계별로 풀어보기/심화1/25206.py
|
25206.py
|
py
| 315 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13919169732
|
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
from odoo import _, api, fields, models
class ProjectScrumRelease(models.Model):
_inherit = "project.scrum.release"
@api.depends(
"total_planned_hours",
"total_planned_hours_edit",
"sprint_ids",
"sprint_ids.progress",
"sprint_ids.expected_hours",
"sprint_ids.state",
)
def _compute_progress(self):
""" This method used to calculate progress based on planned hours,
sprint progress, sprint expected hours etc """
for release in self:
total_planned_hours = release.total_planned_hours
if not release.sprint_ids:
total_planned_hours = release.total_planned_hours_edit
release.progress = sum(
sprint.progress * sprint.expected_hours / total_planned_hours
for sprint in release.sprint_ids
if sprint.state != "cancel" and sprint.expected_hours > 0
)
@api.depends("sprint_ids", "sprint_ids.expected_hours", "sprint_ids.state")
def _compute_hours(self):
""" This method used to calculate milestone planned hours based on
sprint details and sprint detail's expected hours """
for release in self:
release.total_planned_hours = sum(
sprint.expected_hours
for sprint in release.sprint_ids
if sprint.state != "cancel"
)
@api.depends("total_planned_hours", "total_planned_hours_edit")
def _compute_weightage(self):
""" This method used to calculate weightage of release based on
milestone planned hours """
for release in self:
total_m_h = 0
weightage = 0
for rel in release.project_id.release_ids:
if not rel.sprint_ids:
total_m_h += rel.total_planned_hours_edit
else:
total_m_h += rel.total_planned_hours
if not release.sprint_ids:
if release.total_planned_hours_edit > 0 and total_m_h > 0:
weightage = release.total_planned_hours_edit / total_m_h
else:
if release.total_planned_hours > 0 and total_m_h > 0:
weightage = release.total_planned_hours / total_m_h
release.weightage = weightage
name = fields.Char("Name", required=True, size=128)
release_number = fields.Char(
"Release Number", copy=False, size=150, help="Sequence of the release number"
)
progress = fields.Float(
compute="_compute_progress",
string="Progress (0-100)",
help="Computed as avg. progress of related sprints",
)
total_planned_hours = fields.Float(
compute="_compute_hours",
string="Milestone Planned Hour",
help="Estimated time to do the sprints.",
)
total_planned_hours_edit = fields.Float(
string="Milestone Planned Hours", help="Estimated time to do the sprints.",
)
weightage = fields.Float(compute="_compute_weightage")
def _valid_field_parameter(self, field, name):
return name == 'size' or super()._valid_field_parameter(field, name)
@api.model
def create(self, vals):
""" This method used to manage release details log in
project used in release """
result = super(ProjectScrumRelease, self).create(vals)
if vals.get("project_id", ""):
if result.project_id:
msg = (
_(
""" <ul class="o_mail_thread_message_tracking">
<li>Release Added by: <span> %s </span></li><li>
Release Number: <span> %s </span></li>
Release Name: <span> %s </span></li>"""
)
% (self.env.user.name, result.release_number, result.name)
)
result.project_id.message_post(body=msg)
return result
def write(self, vals):
""" This method used to update release details in project
used in release """
if vals.get("project_id", ""):
for rec in self:
if rec.project_id:
msg = (
_(
""" <ul class="o_mail_thread_message_tracking">
<li>Release Removed by: <span> %s </span></li><li>
Release Number: <span> %s </span></li>
Release Name: <span> %s </span></li>"""
)
% (self.env.user.name, rec.release_number, rec.name)
)
rec.project_id.message_post(body=msg)
res = super(ProjectScrumRelease, self).write(vals)
if vals.get("project_id", ""):
for rec in self:
if rec.project_id:
msg = (
_(
""" <ul class="o_mail_thread_message_tracking">
<li>Release Added by: <span> %s </span></li><li>
Release Number: <span> %s </span></li>
Release Name: <span> %s </span></li>"""
)
% (self.env.user.name, rec.release_number, rec.name)
)
rec.project_id.message_post(body=msg)
return res
|
onesteinbv/ProjectManagement
|
project_scrum_agile_extended/models/project_scrum_release.py
|
project_scrum_release.py
|
py
| 5,492 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22019348906
|
import os
import sys
FULL_PATH = os.path.abspath(os.path.dirname(sys.argv[0]))
sys.path.insert(0, FULL_PATH+'/lib')
from errors import error
class reRefine():
# the aim of this class is to allow later datasets within a damage
# series to be rigid body refined (using REFMAC), using the coordinate
# model from the first dataset coupled with the observed Fobs columns
# from the higher dataset mtz files
def __init__(
self, inputFile='untitled.txt', makeNewRIDLfile=True,
numCycles=10):
self.inputFile = inputFile
self.printPurpose()
self.parseInputFile()
newPDBs = []
for mtz, cols in zip(self.mtzList, self.mtzCols):
self.runREFMAC(
pdbIn=self.initialPDB, mtzIn=mtz,
mtzCols=cols, rFree=self.rFree,
numCycles=numCycles)
newPDBs.append(self.refmacPDBout)
# check the output log indicates success
with open(self.outputLogfile, 'r') as log:
for ln in log.readlines():
if 'Refmac: Error' in ln:
error(text='refmac did not run to completion. ' +
'Please refer to the log file for ' +
'refmac "{}"'.format(self.outputLogfile))
# check if required files from refinement are present
self.checkFileExists(self.refmacPDBout)
if makeNewRIDLfile:
self.createNewRIDLfile(newPDBs)
def printPurpose(self):
# print purpose of script
ln = 'Running refmac rigid body refinement to generate ' +\
'higher dose pdb coordinate files specified in RIDL ' +\
'input file "{}".\nInitial dataset coordiate model and '.format(
self.inputFile) +\
'higher dose structure factor information have been used'
print(ln)
def runREFMAC(
self, refineType='RIGID', pdbIn='untitled.pdb',
mtzIn='untitled.mtz', numCycles=1, mtzCols='FP',
rFree='FreeR_Flag', inputDir='./'):
# run n = 'numCycles' cycles of 'refineType'
# refinement in refmac to get phases
print('Running refmac ({} {} cycles)...'.format(refineType, numCycles))
print('---> mtz: "{}"'.format(mtzIn.split('/')[-1]))
# check if input files exist and stop if not
print('--> checking that all files for rigid-body refinement are present')
self.checkFileExists(str(pdbIn))
self.checkFileExists(str(mtzIn))
print('--> success')
self.jobName = 'refmac'
if refineType == 'RIGID':
bref = 'over'
numCycString = 'rigid ncycle {}'.format(numCycles)
elif refineType == 'REST':
bref = 'ISOT'
numCycString = 'ncyc {}'.format(numCycles)
else:
print('Warning: unreadable refinement type.. ' +
'selecting 0 cycles of rigid body refinement')
bref = 'over'
numCycString = 'rigid ncycle 0'
refineType = 'RIGID'
numCycles = 0
# make a refinement type id to append to file names from current job
fileInd = '_{}_{}cycles'.format(refineType, numCycles)
self.refmacPDBout = '{}_refmac{}.pdb'.format(
mtzIn.replace('.mtz', ''), fileInd)
self.refmacMTZout = '{}_refmac{}.mtz'.format(
mtzIn.replace('.mtz', ''), fileInd)
self.refmacLIBout = '{}_refmac{}.cif'.format(
mtzIn.replace('.mtz', ''), fileInd)
self.commandInput1 = 'refmac5 ' +\
'XYZIN {} '.format(pdbIn) +\
'XYZOUT {} '.format(self.refmacPDBout) +\
'HKLIN {} '.format(mtzIn) +\
'HKLOUT {} '.format(self.refmacMTZout) +\
'LIBOUT {} '.format(self.refmacLIBout)
self.commandInput2 = 'make check NONE\n' +\
'make -\n' +\
' hydrogen ALL -\n' +\
' hout NO -\n' +\
' peptide NO -\n' +\
' cispeptide YES -\n' +\
' ssbridge YES -\n' +\
' symmetry YES -\n' +\
' sugar YES -\n' +\
' connectivity NO -\n' +\
' link NO\n' +\
'refi -\n' +\
' type {} -\n'.format(refineType) +\
' resi MLKF -\n' +\
' meth CGMAT -\n' +\
' bref {}\n'.format(bref) +\
'{}\n'.format(numCycString) +\
'scal -\n' +\
' type SIMP -\n' +\
' LSSC -\n' +\
' ANISO -\n' +\
' EXPE\n' +\
'solvent YES\n' +\
'weight -\n' +\
' AUTO\n' +\
'monitor MEDIUM -\n' +\
' torsion 10.0 -\n' +\
' distance 10.0 -\n' +\
' angle 10.0 -\n' +\
' plane 10.0 -\n' +\
' chiral 10.0 -\n' +\
' bfactor 10.0 -\n' +\
' bsphere 10.0 -\n' +\
' rbond 10.0 -\n' +\
' ncsr 10.0\n' +\
'labin FP={} SIGFP=SIG{} -\n'.format(mtzCols, mtzCols) +\
' FREE={}\n'.format(rFree) +\
'labout FC=FC FWT=FWT PHIC=PHIC PHWT=PHWT DELFWT=DELFWT PHDELWT=PHDELWT FOM=FOM\n' +\
'PNAME {}\n'.format(pdbIn.strip('.pdb')) +\
'DNAME 1\n' +\
'RSIZE 80\n' +\
'EXTERNAL WEIGHT SCALE 10.0\n' +\
'EXTERNAL USE MAIN\n' +\
'EXTERNAL DMAX 4.2\n' +\
'END'
self.outputLogfile = '{}REFMAClogfile{}.txt'.format(
self.inputDir, fileInd)
# run REFMAC job
print('--> running refmac')
self.runCCP4program()
def runCCP4program(self):
# generic method to run a ccp4 program on command line
textinput = open('{}{}inputfile.txt'.format(
self.inputDir, self.jobName), 'w')
textinput.write(self.commandInput2)
textinput.close()
os.system('{} < {}{}inputfile.txt > {}'.format(
self.commandInput1, self.inputDir, self.jobName,
self.outputLogfile))
def checkFileExists(
self, filename='untitled.pdb'):
# method to check if file exists
if not os.path.isfile(filename):
error(text='file "{}" could not be located'.format(filename),
type='error')
return False
else:
return True
def parseInputFile(self):
# information of mtz list and columns
# can be extracted from an input file
print('Reading input file...')
fileIn = open(self.inputFile, 'r')
for l in fileIn.readlines():
try:
l.split()[0]
except IndexError:
continue
if l.split()[0] == 'pdb1':
self.initialPDB = l.split()[1]
if l.split()[0] == 'mtz2':
self.mtzList = (''.join(l.split()[1:])).split(',')
if l.split()[0] == 'mtzlabels2':
self.mtzCols = (''.join(l.split()[1:])).split(',')
if l.split()[0] == 'RfreeFlag1':
self.rFree = l.split()[1]
fileIn.close()
self.inputDir = '/'.join(
self.initialPDB.replace('.pdb', '').split('/')[0:-1])+'/'
# perform basic checks here
if len(self.mtzList) != len(self.mtzCols):
error(text='Inconsistent number of comma-separated entries in ' +
'input file for line "mtz2" and "mtzlabels2"')
try:
self.rFree
except AttributeError:
error('"RfreeFlag1" has not been specified in input file')
try:
self.initialPDB
except AttributeError:
error('"pdb1" has not been specified in the input file')
self.checkFileExists(self.initialPDB)
for mtz in self.mtzList:
self.checkFileExists(mtz)
def createNewRIDLfile(
self, newPDBs=[]):
# create a new updated RIDL input file which will
# use the new coordinate models for the higher dose
# datasets in the damage series
print('Writing updated RIDL input file...')
newFile = self.inputFile.replace('.txt', '-RigidBodyRefine.txt')
fileIn = open(self.inputFile, 'r')
fileOut = open(newFile, 'w')
for l in fileIn.readlines():
try:
l.split()[0]
except IndexError:
continue
if l.split()[0] == 'pdb2':
continue
if l.split()[0] == 'mtzlabels2':
ln = l+'pdb2 {}\n'.format(','.join(newPDBs))
elif l.split()[0] == 'dir':
ln = 'dir {}_rigidBodyRefine/\n'.format(l.split()[1][0:-1])
else:
ln = l
fileOut.write(ln)
# explicitly must specify the following to use each new pdb file
fileOut.write('\nuseLaterCellDims true')
fileIn.close()
fileOut.close()
print('\nNew input file has been generated: "{}"'.format(newFile))
self.newInputFile = newFile
|
GarmanGroup/RIDL
|
lib/rigidBodyRefine.py
|
rigidBodyRefine.py
|
py
| 10,059 |
python
|
en
|
code
| 3 |
github-code
|
6
|
4272453202
|
from django.shortcuts import render, get_object_or_404
from django.http import JsonResponse
from .models import Pokemon
def pokemon_list(request):
pokemon = Pokemon.objects.all()
data = {"results": list(pokemon.values(
"name",
"apiId",
"chainId",
"healtPoint",
"attack",
"defense",
"specialAttack",
"specialDefense",
"speed",
"height",
"weight",
"evolution"
))
}
return JsonResponse(data)
def pokemon_detail(request, name):
pokemon = get_object_or_404(Pokemon, name=name)
chain = Pokemon.objects.values('name', 'apiId', 'evolution').filter(
chainId=pokemon.chainId).exclude(name=pokemon.name)
data = {"Pokemon": {
"name": pokemon.name,
"apiId": pokemon.apiId,
"chainId": pokemon.chainId,
"healtPoint": pokemon.healtPoint,
"attack": pokemon.attack,
"defense": pokemon.defense,
"specialAttack": pokemon.specialAttack,
"specialDefense": pokemon.specialDefense,
"speed": pokemon.speed,
"height": pokemon.height,
"weight": pokemon.weight,
"evolution": pokemon.evolution
},
"evolution": []
}
for i in chain:
et = ""
if i["evolution"] > pokemon.evolution:
et = "Evolution"
elif i["evolution"] < pokemon.evolution:
et = "Preevolution"
else:
et = "Alternate"
related = {
"apiId": i["apiId"],
"name": i["name"],
"type": et
}
data['evolution'].append(related)
return JsonResponse(data)
|
hitolv4/poketest
|
pokemon/views.py
|
views.py
|
py
| 1,667 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15849581052
|
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
distance = 0
while True:
rx, ry = x % 2, y % 2
print(rx, ry)
if rx != ry:
distance += 1
x, y = x // 2, y // 2
if x == 0 and y == 0:
break
return distance
if __name__ == '__main__':
s = Solution()
print(s.hammingDistance(4, 1))
|
xinkai-jiang/coding_tutorial
|
leetcode/HammingDistance.py
|
HammingDistance.py
|
py
| 422 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25006136605
|
from typing import TYPE_CHECKING, List, NamedTuple, Optional
import boto3
if TYPE_CHECKING:
from mypy_boto3_ec2.type_defs import FilterTypeDef
from typing_extensions import TypedDict
from aec.util.config import Config
class Image(TypedDict, total=False):
Name: Optional[str]
ImageId: str
CreationDate: str
RootDeviceName: Optional[str]
Size: int
# optional
SnapshotId: str
class AmiMatcher(NamedTuple):
owner: str
match_string: str
amazon_base_account_id = "137112412989"
canonical_account_id = "099720109477"
ami_keywords = {
"amazon2": AmiMatcher(amazon_base_account_id, "amzn2-ami-hvm*x86_64-gp2"),
"ubuntu1604": AmiMatcher(canonical_account_id, "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64"),
"ubuntu1804": AmiMatcher(canonical_account_id, "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64"),
"ubuntu2004": AmiMatcher(canonical_account_id, "ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64"),
}
def fetch(config: Config, ami: str) -> Image:
ami_matcher = ami_keywords.get(ami, None)
if ami_matcher:
try:
# lookup the latest ami by name match
ami_details = describe(config, owner=ami_matcher.owner, name_match=ami_matcher.match_string)[0]
except IndexError:
raise RuntimeError(
f"Could not find ami with name matching {ami_matcher.match_string} owned by account {ami_matcher.owner}"
)
else:
try:
# lookup by ami id
ami_details = describe(config, ami=ami)[0]
except IndexError:
raise RuntimeError(f"Could not find {ami}")
return ami_details
def describe(
config: Config,
ami: Optional[str] = None,
owner: Optional[str] = None,
name_match: Optional[str] = None,
show_snapshot_id: bool = False,
) -> List[Image]:
"""List AMIs."""
ec2_client = boto3.client("ec2", region_name=config.get("region", None))
if ami:
response = ec2_client.describe_images(ImageIds=[ami])
else:
if owner:
owners_filter = [owner]
else:
describe_images_owners = config.get("describe_images_owners", None)
if not describe_images_owners:
owners_filter = ["self"]
elif isinstance(describe_images_owners, str):
owners_filter = [describe_images_owners]
else:
owners_filter: List[str] = describe_images_owners
if name_match is None:
name_match = config.get("describe_images_name_match", None)
filters: List[FilterTypeDef] = [] if name_match is None else [{"Name": "name", "Values": [f"*{name_match}*"]}]
print(f"Describing images owned by {owners_filter} with name matching {name_match if name_match else '*'}")
response = ec2_client.describe_images(Owners=owners_filter, Filters=filters)
images = []
for i in response["Images"]:
image: Image = {
"Name": i.get("Name", None),
"ImageId": i["ImageId"],
"CreationDate": i["CreationDate"],
"RootDeviceName": i["RootDeviceName"] if "RootDeviceName" in i else None,
"Size": i["BlockDeviceMappings"][0]["Ebs"]["VolumeSize"],
}
if show_snapshot_id:
image["SnapshotId"] = i["BlockDeviceMappings"][0]["Ebs"]["SnapshotId"]
images.append(image)
return sorted(images, key=lambda i: i["CreationDate"], reverse=True)
def delete(config: Config, ami: str) -> None:
"""Deregister an AMI and delete its snapshot."""
ec2_client = boto3.client("ec2", region_name=config.get("region", None))
response = describe(config, ami, show_snapshot_id=True)
ec2_client.deregister_image(ImageId=ami)
ec2_client.delete_snapshot(SnapshotId=response[0]["SnapshotId"])
def share(config: Config, ami: str, account: str) -> None:
"""Share an AMI with another account."""
ec2_client = boto3.client("ec2", region_name=config.get("region", None))
ec2_client.modify_image_attribute(
ImageId=ami,
LaunchPermission={"Add": [{"UserId": account}]},
OperationType="add",
UserIds=[account],
Value="string",
DryRun=False,
)
|
DENE-dev/dene-dev
|
RQ1-data/exp2/552-seek-oss@aec-dc5825f8ca2f88df7f4eba38362ffbcf90bf17bb/src/aec/command/ami.py
|
ami.py
|
py
| 4,241 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13923936476
|
import os
import random
from os import listdir
from os.path import splitext
from tqdm import tqdm
import numpy as np
import cv2
from utils.FileOperator import *
# img_tf_flip(r'E:\Project\Unet-vanilla\data\img_backup',r'E:\Project\Unet-vanilla\data\mask_backup')
# img_tf_flip(r'../data/backup/img', r'../data/backup/mask')
def dir_bit_or(img_dir, mask_dir,dst_dir):
'''
将CPU生成图所在文件夹与GPU生成图所在文件夹中所有的图对应地进行按位或操作
:param img_dir: CPU生成图路径
:param mask_dir: GPU生成图路径
:return:
'''
img_ids = get_files_name(img_dir)
mask_ids = get_files_name(mask_dir)
length = len(img_ids)
for i in tqdm(range(0, length)):
img = cv2.imread(fr'{img_dir}/{img_ids[i]}.png')
t,img = cv2.threshold(img,210,255,cv2.THRESH_BINARY)
mask = cv2.imread(fr'{mask_dir}/{mask_ids[i]}.png')
dst = cv2.bitwise_or(img, mask)
pth = os.path.join(dst_dir,f'{img_ids[i]}_bitor.png')
ret = cv2.imwrite(pth, dst)
assert ret, 'save failed'
def copy_mask():
'''
复制mask
:return:
'''
img_ids = get_files_name(r'..\data\masks-backup')
length = len(img_ids)
for i in tqdm(range(0, length)):
# print(img_ids[i])
img = cv2.imread(fr'../data/masks-backup/{img_ids[i]}.png')
ret = cv2.imwrite(fr'../data/masks/{img_ids[i]}_scratch.png', img)
assert ret, 'save failed'
ret = cv2.imwrite(fr'../data/masks/{img_ids[i]}_stain.png', img)
assert ret, 'save failed'
ret = cv2.imwrite(fr'../data/masks/{img_ids[i]}_dot.png', img)
# img = cv2.imread(fr'../data/masks-backup/background.png')
# ret = cv2.imwrite(fr'../data/temp/{i}_background.png', img)
assert ret, 'save failed'
# print(i)
print('done')
import PIL #'6.2.1'
import cv2 #'4.1.1'
def patchit(root_dir,dst_dir):
auto_make_directory(dst_dir)
file_paths = get_files_pth(root_dir)
for pth in file_paths:
img = cv2.imread(pth,0)
auto_make_directory(dst_dir)
_,img = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
cv2.imwrite(os.path.join(dst_dir,os.path.basename(pth)),img, [cv2.IMWRITE_PNG_BILEVEL, 1])
|
ssocean/UNet-Binarization
|
utils/Augmentation.py
|
Augmentation.py
|
py
| 2,259 |
python
|
en
|
code
| 4 |
github-code
|
6
|
36766531397
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import time
data = pd.read_csv('C:/Users/SwetaMankala/Desktop/Assignments/EAI6000/ma_statewide_2020_04_01.csv',low_memory= False)
data.head(10)
# Checking the shape of the data set
data.shape
data['location'].unique()
# Using the commqand below we can see what exactly our columns look like
data.columns
data['county_name'].unique()
data['subject_race'].unique()
data.info()
n=data['raw_row_number'].count()
x=data['subject_race'].value_counts()
print('Numerical columns:',data.select_dtypes(include=np.number).columns)
print('Categorical columns:',data.select_dtypes(include='object').columns)
df = pd.DataFrame(data)
median1 = df['subject_age'].median()
df['subject_age'].fillna(median1, inplace = True)
#Segregate the values based on the categories, remove the nulls and normalize the data column
df['race'] = pd.Series(len(df['subject_race']), index=df.index)
df['race'] = 0
#To assign null values
df.loc[(df['subject_race'] != 'hispanic') |
(df['subject_race'] != 'white') |
(df['subject_race'] != 'black') |
(df['subject_race'] != 'asian/pacific islander') |
(df['subject_race'] != 'other') |
(df['subject_race'].isnull() == True), 'race'] = np.nan
#To assign the categorical values to the dataframe 'race'
df.loc[(df['subject_race'] == 'hispanic') |
(df['subject_race'] == 'white') |
(df['subject_race'] == 'black') |
(df['subject_race'] == 'other') |
(df['subject_race'] == 'asian/pacific islander'), 'race'] = df['subject_race']
race_copy = df['race'].copy(deep = True)
# Fill NaN values.
df['race'].fillna(value = 1, inplace = True)
# Obtain values for every race.Axis=0 for rows
race_copy.dropna(axis = 0, inplace = True)
sorted_race = race_copy.value_counts(normalize = True).sort_index()
# Fill one values for individual person with randomly picked from random choice.
df['race'] = df['race'].apply(lambda x: np.random.choice([x for x in sorted_race.index],
replace = True, p = sorted_race) if (x == 1) else x).astype(str)
#Normalize=True prints the relative frequency of the values
print("\nFilled NaNs normalized:\n", df['race'].value_counts(normalize = True))
df['subject_race'] = df['race']
df['subject_race'].value_counts()
#Segregate the values based on the categories, remove the nulls and normalize the data column
df['sex'] = pd.Series(len(df['subject_sex']), index = df.index)
df['sex'] = 0
# Randomly stick sex to every user with NaN value.
df.loc[(df['subject_sex'] != 'male') |
(df['subject_sex'] != 'female') |
(df['subject_sex'].isnull() == True), 'sex'] = np.nan
df.loc[(df['subject_sex'] == 'male') |
(df['subject_sex'] == 'female'), 'sex'] = df['subject_sex']
# Create a copy to calculate proportions.
sex_copy = df['sex'].copy(deep = True)
# Fill NaN values.
df['sex'].fillna(value = 1, inplace = True)
# Obtain values for every sex.
sex_copy.dropna(axis = 0, inplace = True)
sorted_sex = sex_copy.value_counts(normalize = True).sort_index()
# Fill one values in suspector_sex_rand with randomly picked from random choice.
df['sex'] = df['sex'].apply(lambda x: np.random.choice([x for x in sorted_sex.index],
replace = True, p = sorted_sex) if (x == 1) else x).astype(str)
print("Gender proportions after filled NaNs: \n", df['sex'].value_counts(normalize = True))
df['subject_sex'] = df['sex']
df['subject_sex'].value_counts()
#Segregate the values based on the categories, remove the nulls and normalize the data column
df['outcome_v'] = pd.Series(len(df['outcome']), index = df.index)
df['outcome_v'] = 0
# Randomly stick sex to every user with NaN value.
df.loc[(df['outcome'] != 'citation') |
(df['outcome'] != 'warning') |
(df['outcome'] != 'arrest') |
(df['outcome'].isnull() == True), 'outcome_v'] = np.nan
df.loc[(df['outcome'] != 'citation') |
(df['outcome'] != 'warning') |
(df['outcome'] != 'arrest'), 'outcome_v'] = df['outcome']
# Create a copy to calculate proportions.
outcome_copy = df['outcome_v'].copy(deep = True)
# Fill NaN values.
df['outcome_v'].fillna(value = 1, inplace = True)
outcome_copy.dropna(axis = 0, inplace = True)
sorted_outcome = outcome_copy.value_counts(normalize = True).sort_index()
# Fill one values in suspector_sex_rand with randomly picked from random choice.
df['outcome_v'] = df['outcome_v'].apply(lambda x: np.random.choice([x for x in sorted_outcome.index],
replace = True, p = sorted_outcome) if (x == 1) else x).astype(str)
print("Outcome proportions after filled NaNs: \n", df['outcome_v'].value_counts(normalize = True))
df['outcome'] = df['outcome_v']
df['outcome'].value_counts()
#Segregate the values based on the categories, remove the nulls and normalize the data column
df['vehicle'] = pd.Series(len(df['vehicle_type']), index = df.index)
df['vehicle'] = 0
df.loc[(df['vehicle_type'] != 'Commerical') |
(df['vehicle_type'] != 'Passenger') |
(df['vehicle_type'] != 'Motorcycle') |
(df['vehicle_type'] != 'Taxi/Livery') |
(df['vehicle_type'] != 'Trailer') |
(df['vehicle_type'].isnull() == True), 'vehicle'] = np.nan
df.loc[(df['vehicle_type'] != 'Commerical') |
(df['vehicle_type'] != 'Passenger') |
(df['vehicle_type'] != 'Motorcycle') |
(df['vehicle_type'] != 'Taxi/Livery') |
(df['vehicle_type'] != 'Trailer'), 'vehicle'] = df['vehicle_type']
# Create a copy to calculate proportions.
outcome_copy = df['vehicle'].copy(deep = True)
# Fill NaN values.
df['vehicle'].fillna(value = 1, inplace = True)
outcome_copy.dropna(axis = 0, inplace = True)
sorted_outcome = outcome_copy.value_counts(normalize = True).sort_index()
# Fill one values in suspector_sex_rand with randomly picked from random choice.
df['vehicle'] = df['vehicle'].apply(lambda x: np.random.choice([x for x in sorted_outcome.index],
replace = True, p = sorted_outcome) if (x == 1) else x).astype(str)
print("Vehicle Type proportions after filled NaNs: \n", df['vehicle'].value_counts(normalize = True))
df['vehicle_type'] = df['vehicle']
df['vehicle_type'].value_counts()
print(df.isnull().sum())
#Convert the object type variables to string
df['subject_sex'] = df['subject_sex'].astype(str)
df['subject_race'] = df['subject_race'].astype(str)
df['type'] = df['type'].astype(str)
df['arrest_made'] = df['arrest_made'].astype(str)
df['citation_issued'] = df['citation_issued'].astype(str)
df['outcome'] = df['outcome'].astype(str)
df['contraband_found'] = df['contraband_found'].astype(str)
df['contraband_drugs'] = df['contraband_drugs'].astype(str)
df['warning_issued'] = df['warning_issued'].astype(str)
df['contraband_weapons'] = df['contraband_weapons'].astype(str)
df['contraband_alcohol'] = df['contraband_alcohol'].astype(str)
df['contraband_other'] = df['contraband_other'].astype(str)
df['frisk_performed'] = df['frisk_performed'].astype(str)
df['search_conducted'] = df['search_conducted'].astype(str)
df['search_basis'] = df['search_basis'].astype(str)
df['reason_for_stop'] = df['reason_for_stop'].astype(str)
df['vehicle_type'] = df['vehicle_type'].astype(str)
df['vehicle_registration_state'] = df['vehicle_registration_state'].astype(str)
df['raw_Race'] = df['raw_Race'].astype(str)
data[data.subject_sex == "male"].location.value_counts()
data[data.subject_sex == "female"].location.value_counts()
# If we want to see number of violations per gender with respect to their race
race = data.groupby(["subject_sex"]).subject_race.value_counts(normalize= True).unstack()
race
plt.figure(figsize=(12, 8))
race.black.plot(kind="bar")
plt.figure(figsize=(12, 8))
race.white.plot(kind="bar")
plt.figure(figsize=(12, 8))
race.hispanic.plot(kind="bar")
# We want to check which year had the least number of stops
data.date
data['date'] = pd.to_datetime(data.date, format="%Y-%M-%d")
data["year"] = data.date.dt.year
import math
import seaborn as sns
sns.set_style('whitegrid')
# Rounding the integer to the next hundredth value plus an offset of 100
def round(x):
return 100 + int(math.ceil(x / 100.0)) * 100
sns.catplot("subject_sex", col = "reason_for_stop", col_wrap = 3,data = data[data.reason_for_stop.notnull()],kind = "count")
# Get current axis on current figure
axis = plt.gca()
# ylim max value to be set
max_y = data['subject_sex'].value_counts().max()
axis.set_ylim([0, round(max_y)])
# Iterate through the list of axes' patches
for p in axis.patches:
axis.text(p.get_x() + p.get_width()/2., p.get_height(), '%d' % int(p.get_height()),
fontsize=12, color='red', ha='center', va='bottom')
plt.show()
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
df['subject_race'] = label_encoder.fit_transform(df['subject_race'])
df['arrest_made'] = label_encoder.fit_transform(df['arrest_made'])
df['citation_issued'] = label_encoder.fit_transform(df['citation_issued'])
df['outcome'] = label_encoder.fit_transform(df['outcome'])
df['contraband_found'] = label_encoder.fit_transform(df['contraband_found'])
df['contraband_drugs'] = label_encoder.fit_transform(df['contraband_drugs'])
df['contraband_weapons'] = label_encoder.fit_transform(df['contraband_weapons'])
df['contraband_alcohol'] = label_encoder.fit_transform(df['contraband_alcohol'])
df['contraband_other'] = label_encoder.fit_transform(df['contraband_other'])
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
|
anirudh0809/fundamentals_of_ai
|
linear_models/eda.py
|
eda.py
|
py
| 9,689 |
python
|
en
|
code
| 1 |
github-code
|
6
|
38390581306
|
import logging
from queue import Queue
from logging.handlers import QueueListener, QueueHandler, RotatingFileHandler
from contextlib import contextmanager
from django.conf import settings
@contextmanager
def prepare_background_logging(log_path):
logger = logging.getLogger()
logger.handlers = []
log_queue = Queue(-1)
queue_handler = QueueHandler(log_queue)
logger.addHandler(queue_handler)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S %Z')
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(logging.DEBUG)
file_handler = RotatingFileHandler(log_path, maxBytes=1024*1024*1024, backupCount=12)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
listener = QueueListener(log_queue, console_handler, file_handler)
listener.start()
try:
yield
except Exception as e:
logger.error(str(e))
raise e
finally:
listener.stop()
|
Shvidkiy-Dima/checker
|
background_service/utils.py
|
utils.py
|
py
| 1,090 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74387135229
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
#using unittest to test the file instead of nosetests
config = {
'description': 'ex47 using unittest and using a package',
'author': 'Cynthia E Ma',
'url': 'URL to get it at.',
'download_url': 'Where to download it',
'author_email': '[email protected]',
'version': '0.1',
'install_requires': [''],
'packages': ['ex47_unittest'], #change
'scripts': [], #change
'name': 'ex47_unittest' #change
}
setup(**config)
|
xnanodax/lpthw
|
ex47_unittest/setup.py
|
setup.py
|
py
| 523 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75066299708
|
import argparse
def parse_args():
# description
parser = argparse.ArgumentParser(
description='Compares two configuration files and shows a difference.')
# positional arguments:
parser.add_argument('first_file')
parser.add_argument('second_file')
# optional arguments:
parser.add_argument('-f', '--format',
default="stylish",
choices=['stylish', 'plain', 'json'],
help='set format of output')
# assign an argument
args = parser.parse_args()
return args.first_file, args.second_file, args.format
|
slovohot/python-project-50
|
gendiff/logic/argparser.py
|
argparser.py
|
py
| 620 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20044482765
|
# https://www.codechef.com/problems/LAPTOPREC
def solve(l):
d = {}
for i in l:
if i not in d:
d[i] = 1
else:
d[i]+=1
mx = 0
key = 0
print(d)
for i in d:
if d[i]>mx:
mx = d[i]
key = i
ct = 0
for i in d:
if d[i] == mx:
ct+=1
if ct == 1:
return key
return 'CONFUSED'
for _ in range(int(input())):
n = int(input())
l = list(map(int,input().split()))
print(solve(l))
|
Elevenv/Placement-Stuff
|
LaptopRecomm.py
|
LaptopRecomm.py
|
py
| 530 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20605708222
|
import Tkinter
root=Tkinter.Tk()
root.title("GUI Application")
album={}
album["Artist 1"]="Song1"
album["Artist 2"]="Song2"
album["Artist 3"]="Song3"
#Function
def show_all():
#Clear list box
lb_music.delete(0,"end")
#iterate through keys
for artist in album:
lb_music.insert("end",artist)
def show_one():
artist=lb_music.get("active")
albums=album[artist]
msg= artist +" : "+ albums
lbl_output["text"]= msg
def add_one():
info=txt_input.get()
split_info=info.split(",")
artist=split_info[0]
albums=split_info[1]
album[artist]=albums
show_all()
txt_input.delete(0,"end")
#GUI
lbl_output=Tkinter.Label(root,text="Ready")
lbl_output.pack()
txt_input=Tkinter.Entry(root)
txt_input.pack()
lb_music=Tkinter.Listbox(root)
lb_music.pack()
btn_show_all=Tkinter.Button(root,text="Show All",command=show_all)
btn_show_all.pack()
btn_show_one=Tkinter.Button(root,text="Show one",command=show_one)
btn_show_one.pack()
btn_add_one=Tkinter.Button(root,text="Add One" ,command=add_one)
btn_add_one.pack()
root.mainloop()
|
rishabh-1004/Python-codes
|
guiprog.py
|
guiprog.py
|
py
| 1,037 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31110943594
|
#https://leetcode.com/problems/diameter-of-binary-tree/
def diameter(root):
if not root:
return 0
self.ans = 0
def depth(root):
if not root:
return 0
L = depth(root.left)
R = depth(root.right)
#adds both arms of a node to give the diameter
self.ans = max(L+R+1, self.ans)
#passes on the length of the longer arm to it's parent
return max(L,R)+ 1
depth(root)
#diameter is measured in edges not nodes
return self.ans-1
|
sparsh-m/30days
|
d18_3.py
|
d18_3.py
|
py
| 521 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34134686621
|
from objects import Girl, Boy, Location
# چون ایرانیا پر غصه ان گفتیم یک دو سه بیخیال غصه
iran = Location(1, 2, 3)
# چون ترکیه ای ها شیطان پرستن 666
turkey = Location(6, 6, 6)
# صاحب توییت
tweet_owner = Girl(
dna={
'fibula_bone': 'AAGGCCT',
'tibia_bone': 'AAGGCCT',
'femur_bone': 'AAGGCCT',
'vagina': 'AAGGCCT',
'leg_muscle': 'GGGTT',
'cords_mucosal': 'GTAC',
'epiglottis': 'CCGGTA'
},
location=iran
)
# دوست پسر صاحب توییت
tweet_owners_bf = Boy(
dna={
'fibula_bone': 'AAGGCCT',
'tibia_bone': 'CCGGTA',
'femur_bone': 'GTAC',
'penis': 'GTAC',
'leg_muscle': 'GGGTT',
'cords_mucosal': 'GTAC',
'epiglottis': 'CCGGTA'
},
location=iran
)
# برادر دوست پسر صاحب توییت
tweet_owners_bfs_brother = Boy(
dna={
'fibula_bone': 'TTAAC',
'tibia_bone': 'GGGTT',
'femur_bone': 'CCGGTA',
'penis': 'AAGGCCT',
'leg_muscle': 'GGGTT',
'cords_mucosal': 'GTAC',
'epiglottis': 'CCGGTA'
},
location=iran
)
# دوست دختر 8 ساله برادر دوست پسرش
tweet_owners_bfs_brother.partner.append(
Girl(dna={
'fibula_bone': 'TTAAC',
'tibia_bone': 'GGGTT',
'femur_bone': 'CCGGTA',
'vagina': 'AAGGCCT',
'leg_muscle': 'GGGTT',
'cords_mucosal': 'GTAC',
'epiglottis': 'CCGGTA'
}, location=iran)
)
# یه دختر دیگه(همکار اکسش)
tweet_owners_exs_colleague = Girl(
dna={
'fibula_bone': 'TTAAC',
'tibia_bone': 'GGGTT',
'femur_bone': 'CCGGTA',
'vagina': 'AAGGCCT',
'leg_muscle': 'GGGTT',
'cords_mucosal': 'GTAC',
'epiglottis': 'CCGGTA'
},
location=iran
)
# انگ زدن خیانت
tweet_owners_exs_colleague.talk('این دختره خیانت میکنه', 'high')
# مخ زدن همکار اکسش و سکس
tweet_owners_exs_colleague.tempting_other_boys([tweet_owners_bfs_brother])
tweet_owners_exs_colleague.partner.append(tweet_owners_exs_colleague)
tweet_owners_exs_colleague.sex()
# سفر به ترکیه
tweet_owners_bf.location = turkey
tweet_owners_exs_colleague.location = turkey
tweet_owners_bfs_brother.location = turkey
# نشان دهنده جندگی همکار اکسش
print(tweet_owners_exs_colleague.slut)
|
mmdthurr/saghie_tataloo_project
|
main.py
|
main.py
|
py
| 2,459 |
python
|
fa
|
code
| 4 |
github-code
|
6
|
9224597934
|
from prediction.M2I.predictor import M2IPredictor
import numpy as np
import math
import logging
import copy
import random
import time
from plan.env_planner import EnvPlanner, Agent, SudoInterpolator
import interactive_sim.envs.util as utils
import plan.helper as plan_helper
S0 = 3
T = 0.25 #1.5 # reaction time when following
DELTA = 4 # the power term in IDM
PLANNING_HORIZON = 5 # in frames
PREDICTION_HTZ = 10 # prediction_htz
T_HEADWAY = 0.2
A_SPEEDUP_DESIRE = 0.3 # A
A_SLOWDOWN_DESIRE = 1.5 # B
XPT_SHRESHOLD = 0.7
MINIMAL_DISTANCE_PER_STEP = 0.05
MINIMAL_DISTANCE_TO_TRAVEL = 4
# MINIMAL_DISTANCE_TO_RESCALE = -999 #0.1
REACTION_AFTER = 200 # in frames
MINIMAL_SCALE = 0.3
MAX_DEVIATION_FOR_PREDICTION = 4
TRAFFIC_LIGHT_COLLISION_SIZE = 2
MINIMAL_SPEED_TO_TRACK_ORG_GOAL = 5
MINIMAL_DISTANCE_TO_GOAL = 15
PRINT_TIMER = False
class EgoLTPlanner(EnvPlanner):
def reset(self, *args, **kwargs):
print('ego planner reset')
def step(self, data_dic):
agents = data_dic['agent']
ego = agents['ego']
ego_pose = agents['ego']['pose']
ego_pose.append(ego_pose[-1])
data_dic['agent']['ego']['pose'] = ego_pose
return data_dic
|
Tsinghua-MARS-Lab/InterSim
|
simulator/plan/ltp/ego_ltp.py
|
ego_ltp.py
|
py
| 1,199 |
python
|
en
|
code
| 119 |
github-code
|
6
|
4330202050
|
import types
import numpy as np
from acados_template import MX, Function, cos, fmax, interpolant, mod, sin, vertcat
from casadi import *
def bicycle_model(s0: list, kapparef: list, d_left: list, d_right: list, cfg_dict: dict):
# define structs
constraint = types.SimpleNamespace()
model = types.SimpleNamespace()
model_name = "Spatialbicycle_model"
length = len(s0)
pathlength = s0[-1]
# copy loop to beginning and end
s0 = np.append(s0, [s0[length - 1] + s0[1:length]])
s0 = np.append([s0[: length - 1] - s0[length - 1]], s0)
kapparef = np.append(kapparef, kapparef[1:length])
kapparef = np.append([kapparef[: length - 1] - kapparef[length - 1]], kapparef)
d_left = np.append(d_left, d_left[1:length])
d_left = np.append([d_left[: length - 1] - d_left[length - 1]], d_left)
d_right = np.append(d_right, d_right[1:length])
d_right = np.append([d_right[: length - 1] - d_right[length - 1]], d_right)
N = cfg_dict["N"]
# compute spline interpolations
kapparef_s = interpolant("kapparef_s", "bspline", [s0], kapparef)
outer_bound_s = interpolant("outer_bound_s", "bspline", [s0], d_left)
inner_bound_s = interpolant("inner_bound_s", "bspline", [s0], d_right)
## CasADi Model
# set up states & controls
s = MX.sym("s")
n = MX.sym("n")
alpha = MX.sym("alpha")
vx = MX.sym("vx")
vy = MX.sym("vy")
omega = MX.sym("omega")
D = MX.sym("D")
delta = MX.sym("delta")
theta = MX.sym("theta")
x = vertcat(s, n, alpha, vx, vy, omega, D, delta, theta)
# controls
derD = MX.sym("derD")
derDelta = MX.sym("derDelta")
derTheta = MX.sym("derTheta")
u = vertcat(derD, derDelta, derTheta)
next_D = D + derD / N
next_delta = delta + derDelta / N
# xdot
sdot = MX.sym("sdot")
ndot = MX.sym("ndot")
alphadot = MX.sym("alphadot")
vxdot = MX.sym("vxdot")
vydot = MX.sym("vydot")
omegadot = MX.sym("omegadot")
Ddot = MX.sym("Ddot")
deltadot = MX.sym("deltadot")
thetadot = MX.sym("thetadot")
xdot = vertcat(sdot, ndot, alphadot, vxdot, vydot, omegadot, Ddot, deltadot, thetadot)
m = MX.sym("m")
C1 = MX.sym("C1")
C2 = MX.sym("C2")
CSf = MX.sym("CSf")
CSr = MX.sym("CSr")
Cr0 = MX.sym("Cr0")
Cr2 = MX.sym("Cr2")
Cr3 = MX.sym("Cr3")
Iz = MX.sym("Iz")
lr = MX.sym("lr")
lf = MX.sym("lf")
Df = MX.sym("Df")
Cf = MX.sym("Cf")
Bf = MX.sym("Bf")
Dr = MX.sym("Dr")
Cr = MX.sym("Cr")
Br = MX.sym("Br")
Imax_c = MX.sym("Imax_c")
Caccel = MX.sym("Caccel")
Cdecel = MX.sym("Deccel")
qc = MX.sym("qc")
ql = MX.sym("ql")
gamma = MX.sym("gamma")
r1 = MX.sym("r1")
r2 = MX.sym("r2")
r3 = MX.sym("r3")
# algebraic variables
z = vertcat([])
# parameters
p = vertcat(
m,
C1,
C2,
CSf,
CSr,
Cr0,
Cr2,
Cr3,
Iz,
lr,
lf,
Bf,
Cf,
Df,
Br,
Cr,
Dr,
Imax_c,
Caccel,
Cdecel,
qc,
ql,
gamma,
r1,
r2,
r3,
)
s_mod = mod(s, pathlength)
# constraint on forces
a_lat = next_D * sin(C1 * next_delta)
a_long = next_D
n_outer_bound = outer_bound_s(s_mod) + n
n_inner_bound = inner_bound_s(s_mod) - n
# Model bounds
model.n_min = -1e3
model.n_max = 1e3
constraint.n_min = cfg_dict["track_savety_margin"] # width of the track [m]
constraint.n_max = 1e3 # width of the track [m]
# state bounds
model.throttle_min = -5.0
model.throttle_max = 5.0
model.delta_min = -0.40 # minimum steering angle [rad]
model.delta_max = 0.40 # maximum steering angle [rad]
# input bounds
model.ddelta_min = -1.0 # minimum change rate of stering angle [rad/s]
model.ddelta_max = 1.0 # maximum change rate of steering angle [rad/s]
model.dthrottle_min = -10 # -10.0 # minimum throttle change rate
model.dthrottle_max = 10 # 10.0 # maximum throttle change rate
model.dtheta_min = -3.2
model.dtheta_max = 5
# nonlinear constraint
constraint.alat_min = -100 # maximum lateral force [m/s^2]
constraint.alat_max = 100 # maximum lateral force [m/s^1]
constraint.along_min = -4 # maximum lateral force [m/s^2]
constraint.along_max = 4 # maximum lateral force [m/s^2]
constraint.vx_min = 0
constraint.vx_max = 30
constraint.vy_min = -1
constraint.vy_max = 1
accel = Function("accel", [vx, D], [(Imax_c - Cr0 * vx) * D / (model.throttle_max * Caccel)])
decel = Function(
"decel", [vx, D], [(-Imax_c - Cr0 * vx) * fabs(D) / (model.throttle_max * Cdecel)]
)
# dynamics
sdota = (vx * cos(alpha) - vy * sin(alpha)) / (1 - kapparef_s(s) * n)
Fx = MX.sym("Fx")
Fx = if_else(D[0] >= 0, m * accel(vx, D), m * decel(vx, D))[0]
# Fx = m * next_D
vx = fmax(vx[0], 0.1)
# Carron
if cfg_dict["slip_angle_approximation"]:
beta = atan2(vy, vx)
ar = -beta + lr * omega / vx
af = delta - beta - lf * omega / vx
else:
af = -atan2(vy + lf * omega, vx) + next_delta
ar = -atan2(vy - lr * omega, vx)
Fr = CSr * ar
Ff = CSf * af
if cfg_dict["use_pacejka_tiremodel"]:
Fr = Dr * sin(Cr * atan(Br * ar))
Ff = Df * sin(Cf * atan(Bf * af))
f_expl = vertcat(
sdota,
vx * sin(alpha) + vy * cos(alpha),
omega,
1 / m * (Fx - Ff * sin(next_delta) + m * vy * omega),
1 / m * (Fr + Ff * cos(next_delta) - m * vx * omega),
1 / Iz * (Ff * lf * cos(next_delta) - Fr * lr),
derD,
derDelta,
derTheta,
)
# constraint on forces
a_lat = next_D * sin(C1 * next_delta)
a_long = next_D
n_outer_bound = outer_bound_s(s_mod) + n
n_inner_bound = inner_bound_s(s_mod) - n
# Define initial conditions
model.x0 = np.array([-2, 0, 0, 0, 0, 0, 0, 0, 0])
model.cost_expr_ext_cost = (
ql * (s - theta) ** 2
+ qc * n**2
- gamma * derTheta
+ r1 * derD**2
+ r2 * derDelta**2
+ r3 * derTheta**2
)
model.cost_expr_ext_cost_e = 0
# model.cost_expr_ext_cost_e = (
# ql * (s - theta) ** 2
# + qc * n**2
# )
# define constraints struct
constraint.alat = Function("a_lat", [x, u], [a_lat])
constraint.pathlength = pathlength
constraint.expr = vertcat(a_long, a_lat, n_inner_bound, n_outer_bound)
# f_expl_func = Function(
# "f_expl_func", [s, n, alpha, vx, vy, D, omega, delta, theta, derD, derDelta, derTheta, Fx, p], [f_expl]
# )
# Define model struct
params = types.SimpleNamespace()
params.C1 = C1
params.C2 = C2
params.CSf = CSf
params.CSr = CSr
params.Cr0 = Cr0
params.Cr2 = Cr2
model.f_impl_expr = xdot - f_expl
model.f_expl_expr = f_expl
model.x = x
model.xdot = xdot
model.u = u
model.z = z
model.p = p
model.name = model_name
model.params = params
model.kappa = kapparef_s
# model.f_expl_func = f_expl_func
model.outer_bound_s = outer_bound_s
model.inner_bound_s = inner_bound_s
return model, constraint, params
|
wueestry/f110_mpcc
|
src/mpc/bicycle_model.py
|
bicycle_model.py
|
py
| 7,313 |
python
|
en
|
code
| 2 |
github-code
|
6
|
72966516669
|
# encoding=utf8
from Source import *
import logging,re,mylex
logging.basicConfig(format=' %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %H:%M:%S',level=logging.DEBUG)
index=0
# token_stream="""INT IDENTIFIER '(' ')' '{' IDENTIFIER ';' IDENTIFIER '=' CONSTANT ';' WHILE '(' SIZEOF '(' INT ')' ')' '{' IDENTIFIER '=' CONSTANT ';' '}' RETURN CONSTANT ';' '}''""".split(" ")
token_stream="""INT IDENTIFIER '(' ')' '{' IDENTIFIER ';' IDENTIFIER ';' IDENTIFIER ';' IDENTIFIER ';' IDENTIFIER '=' CONSTANT ';' '}' """.split(" ")
# INT IDENTIFIER '(' ')' '{' IDENTIFIER ';' IDENTIFIER ';' IDENTIFIER ';' '}'
error = []
def main():
#读入token序列
s=mylex.get_s()
print(s)
print(token_stream)
#调用reader()
print(reader('translation_unit',CONTROLLER))
# t=get_terminals()
# print(t)
# tt=get_terminals()
# print(tt)
# derive_controller(1)
# print(index)
def reader(key, num_to_choose):
"""key:需要调用的产生式的名称
num_to_choose:需要选择的产生式序号,为Source.CONTROLLER的时候作为分发器。否则调用Source.c_dict['key'][num_to_choose]产生式
index:token_stream的下标,指示next将要读入的字符
token_stream:语法分析器输入的token序列"""
if (num_to_choose == CONTROLLER):
return derive_controller(key)
else:
return derive(key, num_to_choose)
def derive_controller(key):
global index
# logging.info("derive_controller called with key:------"+key+"--------at index:"+str(index)+" token:"+str(token_stream[index]))
if (c_dict.get(key) is None):
logging.error("error when parsing!No such key in dictionary.产生式出现了不可解决的异常")
error_process(key,"产生式出现了不可解决的异常")
return False
else:
derived_result = c_dict[key]
# logging.info("derive_controller::::::"+key+"->"+str(derived_result))
index_save=index
for i in range(0,len(derived_result)):
index=index_save
result=derive(key,i)
if(result ==True):
if derived_result[i]!="":
logging.info("匹配成功\t"+"<"+key+"> -> "+derived_result[i])
return result
else:
continue
# logging.error("没有在便利所有产生式后找到合适的产生式:key:"+key+"\t derive_result:"+str(derived_result))
return False
def derive(key, num_to_choose):
global index
derive_list=c_dict.get(key)
if(num_to_choose>len(derive_list)-1):
logging.error("fatal error!产生式种类不全!")
error_process(key,"fatal error!产生式种类不全!")
derive_sentence=derive_list[num_to_choose]
# logging.info("derive called with options: deriving :--------"+derive_sentence+"------------")
# 适用于推出了非终结符的情况
if derive_sentence in c_dict.keys():
return derive_controller(derive_sentence)
else:
# 适用于推出了终结符的情况
if derive_sentence in get_terminals():
if derive_sentence=="":
# 适合于产生空的情况
# logging.info("产生式选择问为空")
return True
if derive_sentence==token_stream[index]:
index+=1
else:
return False
logging.info(key+"推出了一个终结符"+derive_sentence)
return True
# 适用于推出了包含空格隔开的产生式,依次分析
derive_sentence_list=re.split(r'\s+',derive_sentence)
for i in range(0,len(derive_sentence_list)):
if derive_sentence_list[i] in c_dict.keys():
result=derive_controller(derive_sentence_list[i])
elif derive_sentence_list[i] in get_terminals():
# 推出了终结符?
# TODO should inc index?
if derive_sentence_list[i]=="":
result=True
else:
if derive_sentence_list[i]==token_stream[index]:
# logging.info("匹配终结符"+token_stream[index])
index+=1
result=True
else:
result=False
else:
result=False
if result==False:
# logging.info("this is not the path.选择了错误的产生式:"+key+"->"+ str(derive_sentence_list))
return False
# logging.info("成功匹配产生式"+str({"key":key,"value":derive_sentence}))
return True
def term(token):
return token_stream[index]==token
def error_process(key, error_info):
error.append({'key': key, 'error_info': error_info, "位置": index})
if __name__ == "__main__":
main()
|
zhaoguoquan94/compiler_syntax_analysor
|
systax_analysisor.py
|
systax_analysisor.py
|
py
| 4,919 |
python
|
en
|
code
| 2 |
github-code
|
6
|
40017294295
|
import mysql.connector
import matplotlib.pyplot as plt
import argparse
import os
import random
import pandas as pd
import datetime
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
def createDatabase(dataBase="smartBottle"):
"""连接数据库,创建database"""
mydb = mysql.connector.connect(
host='localhost',
user='root',
passwd='123456'
)
mycursor = mydb.cursor()
mycursor.execute(f"CREATE DATABASE {dataBase}")
mydb.commit()
return mydb
def connectDatabase(dataBase="smartBottle"):
"""连接mysql"""
mydb = mysql.connector.connect(
host='localhost',
user='root',
passwd='123456',
database=dataBase)
return mydb
def createTable(mydb, tableName="drinkData"):
"""创建表"""
mycursor = mydb.cursor()
# mycursor.execute(f"drop table {tableName}")
# mydb.commit()
mycursor.execute(f"CREATE TABLE `{tableName}` \
(`id` int(11) NOT NULL AUTO_INCREMENT,\
`data_extract` varchar(1000),\
`label` varchar(1000),\
`saveTime` datetime,\
`user_id` int(11),\
PRIMARY KEY (`id`)\
);")
mydb.commit()
return mydb
def insertData(mydb, tableName = "drinkData"):
"""插入数据"""
mycursor = mydb.cursor()
sql = f"INSERT INTO {tableName} (user_id, data, label, saveTime) VALUES(%s, %s, %s, %s);"
with open('../data/80ml.txt', 'r') as f:
con = f.readlines()
# print(len(con)/3)
for i in range(int(len(con)/3)):
i *= 3
sql = "INSERT INTO drinkData (user_id, data_y, data_z, label) VALUES(%s, %s, %s, %s);"
y = con[i]
z = con[i+1]
label = con[i+2]
val = (1, y, z, label)
mycursor.execute(sql, val)
mydb.commit()
def readFromExcel(mydb, path):
files = os.scandir(path)
mycursor = mydb.cursor()
for f in files:
df = pd.read_excel(f.path)
start, end = int(df.loc[0, 'S']), int(df.loc[0, 'E'])
drink = f.path.split('/')[-1].split('_')[0]
if drink not in ['Stable', 'Walk']:
if start == end:
continue
drink = int(drink[:-2])
else:
drink = 0
y = df.loc[:, 'Y']
z = df.loc[:, 'Z']
y = [str(round(num/1080, 3)) for num in y]
z = [str(round(num/1080, 3)) for num in z]
label = ['0' for i in range(len(y))]
for i in range(start-1, end-1):
label[i] = str(round(drink / (end - start), 3))
data_y = ','.join(y)
data_z = ','.join(z)
label = ','.join(label)
sql = "insert into drinkData (user_id, data_y, data_z, label) VALUES(%s, %s, %s, %s)"
val = (1, data_y, data_z, label)
mycursor.execute(sql, val)
mydb.commit()
return f'success save {len(files)} data'
def readFromExcel2(path):
df = pd.read_excel(path)
y = df.loc[:, 'Y']
z = df.loc[:, 'Z']
y = [str(round(num/1080, 3)) for num in y]
z = [str(round(num/1080, 3)) for num in z]
cob = np.array([y, z])
data = featExtra(cob.T)
print(f'data {data.shape}')
return data.reshape(1, -1)[:, :70], y, z
def dataAug(mydb, length):
"""
params:
length: data length after augrement
num: nums of data generated
"""
pass
def randomDeletePointDA(data, label, length):
"""data, label长度在dataAug中判断, """
dataLen = len(data)
indices = list(range(dataLen))
random.shuffle(indices)
remove_indices = indices[:dataLen-length]
new_data = [data[i] for i in range(dataLen) if i not in remove_indices]
new_label = [label[i] for i in range(dataLen) if i not in remove_indices]
return new_data, new_label
def crossDA():
pass
def featExtra(input):
"""
输入前将数据处理成numpy
return np
"""
# 创建PCA对象 感觉可以放到前面 不过先别想那么多
pca = PCA(n_components=1)
result = pca.fit_transform(input)
return result.flatten()
# for j in range(10):
# sql = "INSERT INTO drinkData (user_id, data, label, saveTime) VALUES(%s, %s, %s, %s);"
# data =[str(random.randint(0, 100)) for i in range(100)]
# label = [str(random.choice([0, 5])) for i in range(100)]
# data = ','.join(data)
# label = ','.join(label)
# now = datetime.datetime.now()
# now = now.strftime("%Y-%m-%d %H:%M:%S")
# val = (1, data, label, now)
# mycursor.execute(sql, val)
# print(mycursor.rowcount, 'record inserted.')
# mydb.commit()
def getAllData(mydb, tableName="drinkData"):
"""读取数据"""
mycursor = mydb.cursor()
mycursor.execute(f"SELECT * FROM {tableName}")
myresult = mycursor.fetchall()
# for x in myresult:
# print(x)
return myresult
def visual(y, z, label):
y = y.split(',')
z = z.split(',')
label = label.split(',')
y = [float(i) for i in y]
z = [float(i) for i in z]
label = [float(i) for i in label]
time = [i for i in range(len(label))]
plt.figure(1)
plt.subplot(2, 1, 1)
plt.plot(time, y, label='y')
plt.plot(time, z, label='z')
plt.xlabel("time")
plt.ylabel("angle")
plt.legend()
plt.figure(1)
plt.subplot(2, 1, 2)
plt.plot(time, label)
plt.xlabel("time")
plt.ylabel("drink")
print(f'total drink {sum(label)}')
plt.show()
def visual2(data, label):
time = [i for i in range(len(label))]
plt.figure(1)
plt.subplot(2, 1, 1)
plt.plot(time, data, label='data')
plt.xlabel("time")
plt.ylabel("angle")
plt.legend()
plt.figure(1)
plt.subplot(2, 1, 2)
plt.plot(time, label)
plt.xlabel("time")
plt.ylabel("drink")
print(f'total drink {sum(label)}')
plt.show()
def list2str(arr):
new_arr = [str(round(i,3)) for i in arr]
return ','.join(new_arr)
def raw2new():
"""
delete all data in new_database,
raw_data -> dataAug(size=70) -> new_data
"""
mydb = connectDatabase("smartBottle")
result = getAllData(mydb)
# save data from raw_data to new_data (得处理一下之后导入都是只导入新的)
scaler = MinMaxScaler(feature_range=(-1, 1))
mycursor = mydb.cursor()
# delete old database
# sql = "delete from drinkDataEnhan"
# mycursor.execute(sql)
# mydb.commit()
#
for line in result:
y = [float(num) for num in line[2].split(',')]
z = [float(num) for num in line[3].split(',')]
label = [float(num) for num in line[4].split(',')]
data = np.array([y, z])
data = featExtra(data.T)
#NOTE - change data to y
y = np.array(y)
data = scaler.fit_transform(y.reshape(-1, 1))
data = data.flatten().tolist()
for i in range(30):
if len(data) <= 70:
break
if len(data) != len(label):
break
new_data, new_label = randomDeletePointDA(data, label, 70)
sql = "INSERT INTO drinkDataEnhan (user_id, data_extract, label) VALUES(%s, %s, %s);"
val = (1, list2str(new_data), list2str(new_label))
mycursor.execute(sql, val)
mydb.commit()
def checkData():
mydb = connectDatabase("smartBottle")
result = getAllData(mydb, "drinkDataEnhan")
cnt = dict()
for line in result:
label = [float(num) for num in line[2].split(',')]
drink = sum(label)
k = str(int(drink/10))
if k in cnt.keys():
value = cnt[k]
cnt.update({k: value+1})
else:
cnt[k] = 1
print(f'drink label {sum(label)}')
print(f'data nums {len(result)}')
print(f'dict {cnt}')
if __name__ == "__main__":
# 参数
# parser = argparse.ArgumentParser()
# parser.add_argument('--dataBaseName', type=str, default="smartBottle",
# help='name of the database')
# config = parser.parse_args()
"""
raw_data -> dataAug -> new_data
raw_data: id, data_x, data_y, data_z, label, save_time, user_id
new_data: id, data, label, save_time, user_id
"""
raw2new()
checkData()
# createTable(mydb, "drinkDataEnhan")
# result =
# result = getAllData(mydb)
# visual(result[0][2], result[0][3], result[0][4])
|
YuTheon/NUS_AIOT_web2
|
setMysqlData.py
|
setMysqlData.py
|
py
| 7,386 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27264929540
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
import argparse
import configparser
import json
import logging
import textwrap
import argcomplete
from las import Client, Credentials
from las.credentials import MissingCredentials, read_from_file
from .__version__ import __version__
from .util import NotProvided
from .parser import (
create_app_clients_parser,
create_assets_parser,
create_datasets_parser,
create_deployment_environments_parser,
create_documents_parser,
create_logs_parser,
create_models_parser,
create_organizations_parser,
create_payment_methods_parser,
create_plans_parser,
create_predictions_parser,
create_roles_parser,
create_secrets_parser,
create_transitions_parser,
create_users_parser,
create_workflows_parser,
)
def create_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=textwrap.dedent('''
Command Line Interface for Cradl API, see --help for more info or visit https//docs.cradl.ai. To use tab
completion make sure you have global completion activated. See argcomplete docs for more information:
https://kislyuk.github.io/argcomplete/
'''),
)
parser.add_argument('--profile', '-p')
parser.add_argument('--verbose', '-v', action='count', default=0)
parser.add_argument('--version', action='version', version=f'%(prog)s {__version__}')
subparsers = parser.add_subparsers()
create_app_clients_parser(subparsers)
create_assets_parser(subparsers)
create_datasets_parser(subparsers)
create_deployment_environments_parser(subparsers)
create_documents_parser(subparsers)
create_logs_parser(subparsers)
create_models_parser(subparsers)
create_organizations_parser(subparsers)
create_payment_methods_parser(subparsers)
create_plans_parser(subparsers)
create_predictions_parser(subparsers)
create_roles_parser(subparsers)
create_secrets_parser(subparsers)
create_transitions_parser(subparsers)
create_users_parser(subparsers)
create_workflows_parser(subparsers)
argcomplete.autocomplete(parser)
return parser
def set_verbosity(verbose):
verbosity_levels = [logging.WARNING, logging.INFO, logging.DEBUG]
verbosity = verbosity_levels[min(verbose, len(verbosity_levels) - 1)]
logging.getLogger().setLevel(verbosity)
logging.getLogger('las').setLevel(verbosity)
def main():
parser = create_parser()
args = vars(parser.parse_args())
set_verbosity(args.pop('verbose'))
profile = args.pop('profile', None)
try:
cmd = args.pop('cmd')
except:
parser.print_help()
exit(1)
try:
if profile:
credentials = Credentials(*read_from_file(section=profile))
args['las_client'] = Client(credentials)
else:
args['las_client'] = Client()
except (configparser.NoOptionError, configparser.NoSectionError, MissingCredentials) as e:
logging.exception(e)
print('Could not locate credentials.')
return
kwargs = {k: v for k, v in args.items() if v != NotProvided}
if kwargs:
result = cmd(**kwargs)
result = json.dumps(result, indent=2) if isinstance(result, dict) else result
print(result)
else:
parser.print_help()
exit(1)
if __name__ == '__main__':
main()
|
LucidtechAI/las-cli
|
lascli/__main__.py
|
__main__.py
|
py
| 3,452 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73652417469
|
# 给你两个字符串数组 creators 和 ids ,和一个整数数组 views ,所有数组的长度都是 n 。平台上第 i 个视频者是 creator[i] ,视频分配的 id 是 ids[i] ,且播放量为 views[i] 。
# 视频创作者的 流行度 是该创作者的 所有 视频的播放量的 总和 。请找出流行度 最高 创作者以及该创作者播放量 最大 的视频的 id 。
# 如果存在多个创作者流行度都最高,则需要找出所有符合条件的创作者。
# 如果某个创作者存在多个播放量最高的视频,则只需要找出字典序最小的 id 。
# 返回一个二维字符串数组 answer ,其中 answer[i] = [creatori, idi] 表示 creatori 的流行度 最高 且其最流行的视频 id 是 idi ,可以按任何顺序返回该结果。
import heapq
class Solution(object):
def mostPopularCreator(self, creators, ids, views):
"""
:type creators: List[str]
:type ids: List[str]
:type views: List[int]
:rtype: List[List[str]]
"""
n = len(creators)
c = {}
d = {}
for i in range(n):
c[creators[i]] = c.get(creators[i], 0) + views[i]
if creators[i] in d:
if d[creators[i]][0] < views[i]:
d[creators[i]][0] = views[i]
d[creators[i]][1] = ids[i]
elif d[creators[i]][0] == views[i]:
d[creators[i]][1] = min(d[creators[i]][1], ids[i])
else:
d[creators[i]] = []
d[creators[i]].append(views[i])
d[creators[i]].append(ids[i])
ansName = []
maxValue = 0
for key, value in c.items():
if value > maxValue:
maxValue = value
ansName = [[key, d[key][1]]]
elif value == maxValue:
ansName.append([key, d[key][1]])
return ansName
creators = ["alice","bob","alice","chris"]
ids = ["one","two","three","four"]
views = [5,10,5,4]
a = Solution()
print(a.mostPopularCreator(creators, ids, views))
|
xxxxlc/leetcode
|
competition/单周赛/317/mostPopularCreator.py
|
mostPopularCreator.py
|
py
| 2,125 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
73479090107
|
"""This module is a wrapper for libhydrogeo (WASP 8)"""
import os
import datetime
import ctypes as ct
import constants as co
class Wasp(object):
def __init__(self, libname=co.LIB_NAME, libpath=None):
"""Initialization for class
Seach and load the library ... else exit.
Args:
libname (str): The library name (libhydrolink.so on GNU/Linux)
libpath (str): The path where we search for the file
"""
if not libpath:
# Get LD_LIBRARY_PATH
ldp = list()
if 'LD_LIBRARY_PATH' in os.environ:
ldp = os.environ['LD_LIBRARY_PATH'].split(':')
# Append current directory
ldp.append(os.getcwd())
# Is there?
for p in ldp:
fp = os.path.join(p, libname)
if os.path.exists(fp):
libpath = p
break
if not libpath:
print('Can NOT find {}'.format(co.LIB_NAME))
sys.exit(-1)
self.lpath = os.path.join(libpath, libname)
self.hydro = ct.cdll.LoadLibrary(self.lpath)
self.cfpath = None
self.hndl = None
self.dlevel = -1
def setDebug(self, dlevel=0):
"""Set the libhydrogeo debug level
Seach and load the library.
Args:
dlevel (int): Set the level. Greather than O to get log outputs in log files.
"""
d = ct.c_int(dlevel)
self.hydro.hlsetdebug(ct.byref(d))
self.dlevel = dlevel
def getLastError(self):
"""Get last libhydrogeo error.
Returns:
str: The string with the error
"""
m = ct.c_char_p(b''*72)
self.hydro.hlgetlasterror(m)
return m.value.decode('utf-8')
def open(self, fpath, fmode=co.OPEN_WRITE):
"""Open file for reading or writing
Args:
fpath (str): The file path for open.
fmode (int): 0 = read; 1 = write.
"""
fp = ct.c_char_p(fpath.encode())
fm = ct.c_int(fmode)
fh = ct.c_int(0)
ie = ct.c_int(0)
self.hydro.hlopen(fp, ct.byref(fm), ct.byref(fh), ct.byref(ie))
if ie.value > 0:
print(self.getLastError())
self.hndl = -1
else:
# Raw fh
self.cfpath = fpath
# By ref!
self.hndl = ct.byref(fh)
def close(self):
"""Close an opened file"""
ie = ct.c_int(0)
if self.hndl:
self.hydro.hlclose(self.hndl, ct.byref(ie))
if ie.value > 0:
print(self.getLastError())
def compactFactor(self):
"""Get compression factor for the file.
Returns:
int: The compact factor
"""
cf = ct.c_int(1)
ie = ct.c_int(0)
if self.hndl:
self.hydro.hlgetcompfact(self.hndl, ct.byref(cf), ct.byref(ie))
if ie.value > 0:
print(self.getLastError())
else:
self.cf = cf.value*100
def setLang(self, lang=co.LANG_C):
"""Set programming language for the library
Fortran use base 1 arrays. C, C++, python use base 0.
Args:
lang (int): 0 = C; 1 = Fortran.
"""
fl = ct.c_int(lang)
ie = ct.c_int(0)
self.hydro.hlsetlanguage(self.hndl, ct.byref(fl), ct.byref(ie))
if ie.value > 0:
print(self.getLastError())
def setCreator(self, creator=co.CREATOR_EFDCMPI):
"""Inform the library who is the creator for this HYD file
libhydrolink recognizes EFDC, HECRAS, DYNHYD, EPDRIV1 = 3
Args:
creator (int): 1 = EFDC and EFDCMPI ; 2 = HECRAS and DYNHYD, 3=EPDRIV1.
"""
fc = ct.c_int(creator)
ie = ct.c_int(0)
self.hydro.hlsetcreator(self.hndl, ct.byref(fc), ct.byref(ie))
if ie.value > 0:
print(self.getLastError())
def setDescriptions(self, desc=list()):
"""Add descriptions to the HYD file
Args:
desc (list): List of string with descriptions
"""
if len(desc) > 0:
n = len(desc)
for i in range(n):
fd = ct.c_char_p(desc[i].encode())
# 0 for decriptions
dd = ct.c_int(0)
ie = ct.c_int(0)
self.hydro.hladddescription(
self.hndl,
ct.byref(dd),
fd,
ct.byref(ie)
)
if ie.value > 0:
print(self.getLastError())
self.desc = desc
def setAuthor(self, author):
"""Add autor to the HYD file
Args:
author (str): The autor name
"""
fd = ct.c_char_p(author.encode())
# 1 for modeller name
dd = ct.c_int(1)
ie = ct.c_int(0)
self.hydro.hladddescription(
self.hndl,
ct.byref(dd),
fd,
ct.byref(ie)
)
if ie.value > 0:
print(self.getLastError())
else:
self.author = author
def setMoment(self, dt):
"""Set the initial time and date for th hydrodynamic information in the linkage file.
Args:
dt (datetime.datetime): The date and time.
"""
ida = ct.c_int(dt.day)
imo = ct.c_int(dt.month)
iye = ct.c_int(dt.year)
iho = ct.c_int(dt.hour)
imi = ct.c_int(dt.minute)
ise = ct.c_int(dt.second)
ie = ct.c_int(0)
self.hydro.hlsetseedmoment(
self.hndl,
ct.byref(imo),
ct.byref(ida),
ct.byref(iye),
ct.byref(iho),
ct.byref(imi),
ct.byref(ise),
ct.byref(ie)
)
if ie.value > 0:
print(self.getLastError())
else:
self.moment = dt
def setNumLay(self, nlays=1):
"""Set the number of layers for the hydrodynamic information in the linkage file.
Args:
nlays (int): The number of layers in the model.
"""
fn = ct.c_int(nlays)
ie = ct.c_int(0)
self.hydro.hlsetnumlayers(self.hndl, ct.byref(fn), ct.byref(ie))
if ie.value > 0:
print(self.getLastError())
else:
self.nlays = nlays
def setNumSeg(self, nsegs):
"""Set the number of segments for the hydrodynamic information in the linkage file.
Args:
nsegs (int): The number of segments in the model.
"""
fn = ct.c_int(nsegs)
ie = ct.c_int(0)
self.hydro.hlsetnumsegments(self.hndl, ct.byref(fn), ct.byref(ie))
if ie.value > 0:
print(self.getLastError())
else:
self.nsegs = nsegs
def setSegNames(self, segsn=list()):
"""Set segments names.
Args:
segsn (list): A list with segments names.
"""
if len(segsn) > 0:
n = len(segsn)
for i in range(n):
sn = ct.c_char_p(segsn[i].encode())
se = ct.c_int(i)
ie = ct.c_int(0)
self.hydro.hlsetsegname(
self.hndl,
ct.byref(se),
sn,
ct.byref(ie)
)
if ie.value > 0:
print(self.getLastError())
self.segsn = segsn
def setTimeStep(self, ts):
"""Specifies the timestep that was used for the hydrodynamic model simulation.
This value only needs to be set once
Args:
ts (int): The time step used in the hydrodynamic models.
"""
ts = ct.c_int(ts)
ie = ct.c_int(0)
self.hydro.hlsethydtimestep(self.hndl, ct.byref(ts), ct.byref(ie))
if ie.value > 0:
print(self.getLastError())
else:
self.ts = ts
def setNumSegConst(self, n):
"""This function is used to set the number of segment constituents that will be written to the hydrodynamic linkage file
The current version of the HYDROLINK API assumes a particular order. To get to a particular constituent you must define the earlier ones. Segment constituents are: volume, depth, velocity, temperature and salinity
Args:
n (int): The number constituents to be passed to the file
"""
nn = ct.c_int(n)
ie = ct.c_int(0)
self.hydro.hlsetnumsegconsts(
self.hndl,
ct.byref(nn),
ct.byref(ie)
)
if ie.value > 0:
print(self.getLastError())
self.nsc = n
def setNumFPConst(self, n):
"""This function is used to specify the number of flow path constituents. The number of flow path constituents that are passed by the hydrodynamic model is typically a function of the dimensionality of the model
For models like EFDC the number of flow path constituents is three: 1) Flow 2) Dispersion/residual flow, 3) Direction of Flow. For simple 1 dimensional models like DYNHYD the number of flow path constituents is one, Flow.
Args:
n (int): The number constituents to be passed to the file
"""
nn = ct.c_int(n)
ie = ct.c_int(0)
self.hydro.hlsetnumfpconsts(
self.hndl,
ct.byref(nn),
ct.byref(ie)
)
if ie.value > 0:
print(self.getLastError())
self.nfpc = n
def _setSegData(self, stype, data):
"""Set segments data.
Args:
stype (int): Type information. 1=Segment Volume (m3), 2=Segment Depth (m),
3=Segment Velocity (m/sec), 4=Segment Temperature (C), 5=Segment Salinity (!)
data (list): List of floats with data
"""
n = len(data)
ad = (ct.c_double * n)()
for i in range(n):
ad[i] = data[i]
si = ct.c_int(stype)
ie = ct.c_int(0)
self.hydro.hlsetseginfo(
self.hndl,
ct.byref(si),
ct.byref(ad),
ct.byref(ie)
)
if ie.value > 0:
print(self.getLastError())
def setSegVolume(self, vols):
"""Set segments volumes.
Args:
vols (list): List of floats with segments volumes
"""
self._setSegData(co.SEG_VOL, vols)
self.vols = vols
def setSegDepth(self, deps):
"""Set segments depths.
Args:
deps (list): List of floats with segments depths
"""
self._setSegData(co.SEG_DEP, deps)
self.deps = deps
def setSegVelocity(self, vels):
"""Set segments velocities.
Args:
vels (list): List of floats with segments velocities
"""
self._setSegData(co.SEG_VEL, vels)
self.vels = vels
def setSegTemperature(self, temps):
"""Set segments velocities.
Args:
temps (list): List of floats with segments temperatures
"""
self._setSegData(co.SEG_TEM, temps)
self.temps = temps
def setSegSalinity(self, sals):
"""Set segments velocities.
Args:
sals (list): List of floats with segments salinities
"""
self._setSegData(co.SEG_SAL, sals)
self.sals = sals
def _setFlowData(self, ftype, data):
"""Set flow data.
Args:
ftype (int): Type information. 1=Advective flow, 2=Dispersive flow
data (list): List of floats with data
"""
n = len(data)
ad = (ct.c_double * n)()
for i in range(n):
ad[i] = data[i]
si = ct.c_int(ftype)
ie = ct.c_int(0)
self.hydro.hlsetflowinfo(
self.hndl,
ct.byref(si),
ct.byref(ad),
ct.byref(ie)
)
if ie.value > 0:
print(self.getLastError())
def setFlowAdvect(self, fadvs):
"""Set advective flows
Args:
fadvs (list): List of floats with advective flows
"""
self._setFlowData(co.FLOWP_ADV, fadvs)
self.fadvs = fadvs
def setFlowDisps(self, fdisps):
"""Set dispersive flows
Args:
fdisps (list): List of floats with advective flows
"""
self._setFlowData(co.FLOWP_DIS, fdisps)
self.fdisps = fdisps
|
fpacheco/hydrolink
|
wasp.py
|
wasp.py
|
py
| 12,492 |
python
|
en
|
code
| 4 |
github-code
|
6
|
72255274108
|
from __future__ import annotations
import asyncio
import datetime
import time
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import asyncpg
import discord
from discord.ext import commands
from utils import (
AvatarsPageSource,
AvatarView,
FieldPageSource,
Pager,
format_bytes,
human_timedelta,
to_bytes,
format_status,
BlankException,
)
from ._base import CogBase
if TYPE_CHECKING:
from bot import Bot
from cogs.context import Context
class UserCommands(CogBase):
@commands.command(name="avatar", aliases=("pfp", "avy", "av"))
async def avatar(
self,
ctx: Context,
*,
user: Optional[Union[discord.Member, discord.User]] = commands.Author,
):
"""Gets the avatar of a user"""
user = user or ctx.author
embed = discord.Embed(
color=self.bot.embedcolor
if user.color == discord.Color.default()
else user.color
)
embed.set_author(name=str(user), icon_url=user.display_avatar.url)
embed.set_image(url=user.display_avatar.url)
sql = """SELECT created_at FROM avatars WHERE user_id = $1 ORDER BY created_at DESC"""
latest_avatar = await self.bot.pool.fetchval(sql, user.id)
if latest_avatar:
embed.timestamp = latest_avatar
embed.set_footer(text="Avatar changed")
await ctx.send(
embed=embed,
view=AvatarView(ctx, user, embed, user.display_avatar),
check_ref=True,
)
@commands.command(name="avatars", aliases=("pfps", "avys", "avs"))
async def avatars(
self, ctx: Context, user: Union[discord.Member, discord.User] = commands.Author
):
"""Shows all of a users avatars"""
sql = """SELECT * FROM avatars WHERE user_id = $1 ORDER BY created_at DESC"""
results = await self.bot.pool.fetch(sql, user.id)
if results == []:
raise ValueError("User has no avatar history saved.")
entries: List[Tuple[str, datetime.datetime, int]] = [
(
r["avatar"],
r["created_at"],
r["id"],
)
for r in results
]
source = AvatarsPageSource(entries=entries)
source.embed.color = (
self.bot.embedcolor if user.color == discord.Color.default() else user.color
)
source.embed.title = f"Avatars for {user}"
pager = Pager(source, ctx=ctx)
await pager.start(ctx)
@commands.command(name="banner")
async def banner(self, ctx: Context, *, user: discord.User = commands.Author):
"""Shows a users banner"""
user = await ctx.bot.fetch_user(user.id)
if user.banner is None:
raise TypeError("This user has no banner.")
file = await user.banner.to_file(
filename=f'banner.{"gif" if user.banner.is_animated() else "png"}'
)
embed = discord.Embed()
embed.set_author(name=f"{str(user)}'s banner", icon_url=user.display_avatar.url)
embed.set_image(
url=f'attachment://banner.{"gif" if user.banner.is_animated() else "png"}'
)
await ctx.send(file=file, embed=embed)
async def _index_member(self, guild: discord.Guild, member: discord.Member) -> bool:
sql = """
INSERT INTO member_join_logs (member_id, guild_id, time)
SELECT $1, $2, $3
WHERE NOT EXISTS (
SELECT 1
FROM member_join_logs
WHERE member_id = $1 AND guild_id = $2 AND time = $3
);
"""
await self.bot.pool.execute(
sql,
member.id,
guild.id,
member.joined_at,
)
return True
@commands.group(name="joins", invoke_without_command=True)
async def joins(
self,
ctx: Context,
*,
user: Union[discord.Member, discord.User] = commands.Author,
):
"""Shows how many times a user joined a server
Note: If they joined before I was added then I will not have any data for them."""
guild = ctx.guild
results: Optional[int] = await self.bot.pool.fetchval(
"SELECT COUNT(member_id) FROM member_join_logs WHERE member_id = $1 AND guild_id = $2",
user.id,
guild.id,
)
if not results:
if isinstance(user, discord.Member):
results = await self._index_member(guild, user)
if results:
results = 1
else:
return await ctx.send(f"I have no join records for {user} in {guild}")
await ctx.send(
f"{user} has joined {guild} {results:,} time{'s' if results > 1 else ''}."
)
@commands.command(name="uptime")
async def uptime(self, ctx: Context, *, member: Optional[discord.Member]):
"""Shows how long a user has been online."""
bot = self.bot
me = bot.user
if me is None or bot.uptime is None:
return
if member is None or member and member.id == me.id:
return await ctx.send(
f"Hello, I have been awake for {human_timedelta(bot.uptime, suffix=False)}."
)
if "uptime" in await self.bot.redis.smembers(f"opted_out:{member.id}"):
raise BlankException(f"Sorry, {member} has opted out from uptime logging.")
results: Optional[datetime.datetime] = await bot.pool.fetchval(
"SELECT time FROM uptime_logs WHERE user_id = $1", member.id
)
message = (
f"{member} has been {format_status(member)} for {human_timedelta(results, suffix=False)}."
if results
else f"{member} has been {format_status(member)} as long as I can tell."
)
await ctx.send(message)
@commands.command(name="usernames", aliases=("names",))
async def usernames(self, ctx: Context, user: discord.User = commands.Author):
results = await self.bot.pool.fetch(
"SELECT * FROM username_logs WHERE user_id = $1 ORDER BY created_at DESC",
user.id,
)
if results == []:
await ctx.send(f"I have no username records for {user}.")
return
entries = [
(
r["username"],
f'{discord.utils.format_dt(r["created_at"], "R")} | {discord.utils.format_dt(r["created_at"], "d")} | `ID: {r["id"]}`',
)
for r in results
]
source = FieldPageSource(entries=entries)
source.embed.color = self.bot.embedcolor
source.embed.title = f"Usernames for {user}"
pager = Pager(source, ctx=ctx)
await pager.start(ctx)
@commands.command(name="discrims", aliases=("discriminators",))
async def discrims(self, ctx: Context, user: discord.User = commands.Author):
"""Shows all discriminators a user has had.
This is the numbers after your username."""
results = await self.bot.pool.fetch(
"SELECT * FROM discrim_logs WHERE user_id = $1 ORDER BY created_at DESC",
user.id,
)
if results == []:
await ctx.send(f"I have no discriminator records for {user}")
return
entries = [
(
f'#{r["discrim"]}',
f'{discord.utils.format_dt(r["created_at"], "R")} | {discord.utils.format_dt(r["created_at"], "d")} | `ID: {r["id"]}`',
)
for r in results
]
source = FieldPageSource(entries=entries)
source.embed.color = self.bot.embedcolor
source.embed.title = f"Discriminators for {user}"
pager = Pager(source, ctx=ctx)
await pager.start(ctx)
@commands.command(name="nicknames", aliases=("nicks",))
async def nicknames(
self,
ctx: Context,
*,
user: discord.User = commands.Author,
):
"""Shows all nicknames a user has had in a guild."""
if ctx.guild is None:
return
results = await self.bot.pool.fetch(
"SELECT * FROM nickname_logs WHERE user_id = $1 AND guild_id = $2 ORDER BY created_at DESC",
user.id,
ctx.guild.id,
)
if results == []:
await ctx.send(f"I have no nickname records for {user} in {ctx.guild}")
return
entries = [
(
r["nickname"],
f'{discord.utils.format_dt(r["created_at"], "R")} | {discord.utils.format_dt(r["created_at"], "d")} | `ID: {r["id"]}`',
)
for r in results
]
source = FieldPageSource(entries=entries)
source.embed.title = f"Nicknames for {user} in {ctx.guild}"
source.embed.color = self.bot.embedcolor
pager = Pager(source, ctx=ctx)
await pager.start(ctx)
@commands.group(
name="avatarhistory",
aliases=("avyh", "pfph", "avh"),
invoke_without_command=True,
)
async def avatar_history(
self, ctx: Context, *, user: discord.User = commands.Author
):
"""Shows the avatar history of a user.
This will only show the first 100, to view them all and in HD run the command `avatars`"""
async with ctx.typing():
sql = """
SELECT * FROM avatars WHERE user_id = $1
ORDER BY created_at DESC LIMIT 100
"""
records: List[asyncpg.Record] = await self.bot.pool.fetch(
sql,
user.id,
)
if records == []:
await ctx.send(f"{user} has no avatar history on record.")
return
avatars = await asyncio.gather(
*[to_bytes(ctx.session, row["avatar"]) for row in records]
)
fp = await format_bytes(ctx.guild.filesize_limit, avatars)
file = discord.File(
fp,
f"{user.id}_avatar_history.png",
)
if len(records) >= 100:
first_avatar: datetime.datetime = await self.bot.pool.fetchval(
"""SELECT created_at FROM avatars WHERE user_id = $1 ORDER BY created_at ASC""",
user.id,
)
else:
first_avatar = records[-1]["created_at"]
embed = discord.Embed(timestamp=first_avatar)
embed.set_footer(text="First avatar saved")
embed.set_author(
name=f"{user}'s avatar history", icon_url=user.display_avatar.url
)
embed.set_image(url=f"attachment://{user.id}_avatar_history.png")
await ctx.send(embed=embed, file=file)
@avatar_history.command(name="server", aliases=("guild",))
async def avatar_history_guild(
self,
ctx: Context,
guild: Optional[discord.Guild] = None,
*,
member: discord.Member = commands.Author,
):
"""Shows the server avatar history of a user."""
guild = guild or ctx.guild
async with ctx.typing():
sql = """
SELECT * FROM guild_avatars WHERE member_id = $1 AND guild_id = $2
ORDER BY created_at DESC LIMIT 100
"""
fetch_start = time.perf_counter()
records: List[asyncpg.Record] = await self.bot.pool.fetch(
sql, member.id, guild.id
)
fetch_end = time.perf_counter()
if records == []:
raise ValueError(f"{member} has no server avatar history on record.")
avatars = await asyncio.gather(
*[to_bytes(ctx.session, row["avatar"]) for row in records]
)
gen_start = time.perf_counter()
fp = await format_bytes(guild.filesize_limit, avatars)
file = discord.File(
fp,
f"{member.id}_avatar_history.png",
)
gen_end = time.perf_counter()
if len(records) == 100:
sql = """SELECT created_at FROM guild_avatars WHERE member_id = $1 AND guild_id = $1 ORDER BY created_at ASC"""
first_avatar: datetime.datetime = await self.bot.pool.fetchval(
sql, member.id, guild.id
)
else:
first_avatar = records[-1]["created_at"]
embed = discord.Embed(timestamp=first_avatar)
embed.set_footer(text="First avatar saved")
embed.set_author(
name=f"{member}'s guild avatar history", icon_url=member.display_avatar.url
)
embed.description = f"`Fetching :` {round(fetch_end - fetch_start, 2)}s\n`Generating:` {round(gen_end - gen_start, 2)}s"
embed.set_image(url=f"attachment://{member.id}_avatar_history.png")
await ctx.send(embed=embed, file=file)
|
LeoCx1000/fish
|
src/cogs/discord_/user.py
|
user.py
|
py
| 12,878 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43166848463
|
'''A simple blockchain implementation.
Inspired by https://medium.com/crypto-currently/lets-build-the-tiniest-blockchain-e70965a248b'''
from __future__ import print_function
import hashlib
import datetime
class Block:
'''Blocks of data that will create the Blockchain'''
def __init__(self, index, timestamp, data, previous_hash):
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hash_block()
def hash_block(self):
'''returns a sha256 hash of the Block's index, timestamp, data,
and previous block's hash'''
sha_hash = hashlib.sha256()
sha_hash.update(str(self.index).encode('utf-8') + str(self.timestamp).encode('utf-8') + str(self.data).encode('utf-8') + str(self.previous_hash).encode('utf-8'))
return sha_hash.hexdigest()
def create_genesis_block():
'''Create the first block in the chain'''
return Block(0, datetime.datetime.now(), "Genesis Block", "0")
def next_block(previous_block):
'''Create the next block in the chain'''
index = previous_block.index + 1
timestamp = datetime.datetime.now()
data = "I'm block {}".format(index)
return Block(index, timestamp, data, previous_block.hash)
def create_block_chain(num_of_blocks):
block_chain = [create_genesis_block()]
previous_block = block_chain[0]
for _ in range(0, num_of_blocks):
new_block = next_block(previous_block)
block_chain.append(new_block)
previous_block = new_block
print("Block #{} was added to the blockchain".format(new_block.index))
print("Hash: {}\n".format(new_block.hash))
create_block_chain(10)
|
William-Hill/UVI_Teaching_2018
|
blockchain/cruzan_coin.py
|
cruzan_coin.py
|
py
| 1,722 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19682814836
|
#!/usr/bin/env python
# coding: utf-8
# # ORCA utilities
#
# Utilities for input creation and output post-processing
# ## Geometry Splitter for CREST conformers
with open('crest_conformers.xyz') as ifile:
name=input('Enter name of the fragments: ')
func=input('Which functional? ')
disp=input('Which dispersion correction? ')
atn = int(input('How many atoms? '))
geo_num=int(input('How many conformations? '))
basis =input('Which basis set? ')
N=0
for line in ifile:
# print(line)
if str(atn) in line.split(): #Use occurrences of the atom number line to count the geometries
N=N+1
with open("{0}_{1}_{2}.inp".format(name, func, N), 'w') as output:
output.write('!' + func + " " + disp + " "+ basis + "\n")
output.write("\n")
output.write("%pal nprocs 8 end\n")
output.write("\n")
output.write('*xyz 0 1\n')
a=next(ifile)
for i in range(atn):
output.write(next(ifile))
output.write('*')
|
EduardoSchiavo/utilities
|
crest_splitter.py
|
crest_splitter.py
|
py
| 1,223 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26889276534
|
import torch
def iou_score(output, target):
smooth = 1e-5
if torch.is_tensor(output):
output = torch.sigmoid(output).data.cpu().numpy()
if torch.is_tensor(target):
target = target.data.cpu().numpy()
output_ = output > 0.5
target_ = target > 0.5
intersection = (output_ & target_).sum()
union = (output_ | target_).sum()
return (intersection + smooth) / (union + smooth)
|
krishnakaushik25/Medical-Image-Segmentation-DL
|
modular_code/src/ML_Pipeline/iou.py
|
iou.py
|
py
| 423 |
python
|
en
|
code
| 7 |
github-code
|
6
|
44942108516
|
model = dict(
type='BRNet',
backbone=dict(
type='PointNet2SASSG',
in_channels=4,
num_points=(2048, 1024, 512, 256),
radius=(0.2, 0.4, 0.8, 1.2),
num_samples=(64, 32, 16, 16),
sa_channels=(
(64, 64, 128),
(128, 128, 256),
(128, 128, 256),
(128, 128, 256)),
fp_channels=((256, 256), (256, 256)),
norm_cfg=dict(type='BN2d'),
sa_cfg=dict(
type='PointSAModule',
pool_mod='max',
use_xyz=True,
normalize_xyz=True)),
rpn_head=dict(
type='CAVoteHead',
vote_module_cfg=dict(
in_channels=256,
vote_per_seed=1,
gt_per_seed=3,
conv_channels=(256, 256),
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d'),
norm_feats=True,
vote_loss=dict(
type='ChamferDistance',
mode='l1',
reduction='none',
loss_dst_weight=10.0)),
vote_aggregation_cfg=dict(
type='PointSAModule',
num_point=256,
radius=0.3,
num_sample=16,
mlp_channels=[256, 128, 128, 128],
use_xyz=True,
normalize_xyz=True),
pred_layer_cfg=dict(
in_channels=128, shared_conv_channels=(128, 128), bias=True),
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d'),
objectness_loss=dict(
type='CrossEntropyLoss',
class_weight=[0.2, 0.8],
reduction='sum',
loss_weight=5.0),
dir_class_loss=dict(
type='CrossEntropyLoss', reduction='sum', loss_weight=1.0),
dir_res_loss=dict(
type='SmoothL1Loss', reduction='sum', loss_weight=10.0),
size_res_loss=dict(
type='SmoothL1Loss', reduction='sum', loss_weight=10.0, beta=0.15)),
roi_head=dict(
type='BRRoIHead',
roi_extractor=dict(
type='RepPointRoIExtractor',
rep_type='ray',
density=2,
seed_feat_dim=256,
sa_radius=0.2,
sa_num_sample=16,
num_seed_points=1024
),
bbox_head=dict(
type='BRBboxHead',
pred_layer_cfg=dict(
in_channels=256, shared_conv_channels=(128, 128), bias=True),
dir_res_loss=dict(),
size_res_loss=dict(
type='SmoothL1Loss',
beta=0.15,
reduction='sum',
loss_weight=10.0),
semantic_loss=dict(
type='CrossEntropyLoss', reduction='sum', loss_weight=1.0)
)
),
train_cfg=dict(
rpn=dict(
pos_distance_thr=0.3,
neg_distance_thr=0.3,
sample_mod='seed'),
rpn_proposal=dict(use_nms=False),
rcnn=dict(
pos_distance_thr=0.3,
neg_distance_thr=0.3,
sample_mod='seed')),
test_cfg=dict(
rpn=dict(sample_mod='seed', use_nms=False),
rcnn=dict(
sample_mod='seed',
nms_thr=0.25,
score_thr=0.05,
per_class_proposal=True))
)
|
cheng052/BRNet
|
configs/_base_/models/brnet.py
|
brnet.py
|
py
| 3,261 |
python
|
en
|
code
| 90 |
github-code
|
6
|
23876853087
|
class SLLNode:
def __init__(self, item, nextnode):
self.element = item
self.next = nextnode
class SLinkedList:
def __init__(self):
self.first = None
self.size = 0
def length(self):
return self.size
def add_first(self, item):
newnode = SLLNode(item, self.first)
self.first = newnode
self.size = self.size + 1
def get_first(self):
if self.size == 0:
return None
return self.first.element
def remove_first(self):
if self.size == 0:
return None
item = self.first.element
self.first = self.first.next
self.size = self.size - 1
return item
def add_last(self, item):
newnode = SLLNode(item, None)
if self.first == None:
self.first = newnode
else:
node = self.first
while node.next:
node = node.next
node.next = newnode
self.size = self.size + 1
def get_last(self):
if self.size == 0:
return None
node = self.first
while node.next:
node = node.next
return node.element
##################################################################
# Test code
def test_linkedlist():
mylist = SLinkedList()
mylist.add_first('b')
mylist.add_last('c')
mylist.add_first('a')
mylist.add_last('d')
mylist.add_last('e')
print('mylist =', mylist)
print('length =', mylist.length())
print('first =', mylist.get_first())
print('last = ', mylist.get_last())
print('first (removed) =', mylist.remove_first())
print('mylist now =', mylist)
print('length =', mylist.length())
mylist.remove_first()
mylist.remove_first()
mylist.remove_first()
mylist.remove_first()
print('length =', mylist.length())
print('first (None?) =', mylist.get_first())
print('last (None?) =', mylist.get_last())
print('first removed (None?) =', mylist.remove_first())
mylist.add_last('f')
print('mylist (f) =', mylist)
print('length =', mylist.length())
test_linkedlist()
|
maximised/College_work
|
Year_2/CS2516/ADTs/Singly_Linked_list.py
|
Singly_Linked_list.py
|
py
| 2,229 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39509933349
|
import argparse
from torch.nn import BatchNorm2d
from cifar.norm_layers import MyBatchNorm, BatchInstance, MyLayerNorm, MyGroupNorm, MyInstanceNorm
from cifar.dataset import get_data_loaders,get_default_device
from cifar.train import train
from cifar.model import MyResnet
# python3 train_cifar.py --normalization [ bn | in | bin | ln | gn | nn | torch_bn] --data_dir <directory_containing_data> --output_file <path to the trained model> --n [1 | 2 | 3 ]
my_parser = argparse.ArgumentParser(allow_abbrev=False)
my_parser.add_argument('--normalization', required=True, type=str, action='store',
choices=('bn', 'in', 'bin', 'ln', 'gn', 'nn', 'torch_bn'))
my_parser.add_argument('--data_dir', required=True, type=str, action='store')
my_parser.add_argument('--output_file', required=True, type=str, action='store')
my_parser.add_argument('--n', required=True, type=int, action='store', choices=(1, 2, 3))
args = my_parser.parse_args()
option_to_norm = {'bn': MyBatchNorm, 'in': MyInstanceNorm, 'bin': BatchInstance, 'ln': MyLayerNorm, 'gn': MyGroupNorm,
'nn': None, 'torch_bn': BatchNorm2d}
norm_layer = option_to_norm[args.normalization]
device = get_default_device()
if norm_layer == MyLayerNorm:
train_loader, val_loader, test_loader = get_data_loaders(args.data_dir, device, drop_last=True)
else:
train_loader, val_loader, test_loader = get_data_loaders(args.data_dir, device)
n = args.n
r = 10
resnet_model = MyResnet(n, r, norm=norm_layer)
model = resnet_model.to(device)
train(device, model, train_loader, val_loader, model_save_path=args.output_file, already_trained=False,
learning_rate=0.1, momentumValue=0.9, wieghtDecayValue=0.0001)
|
aps1310/COL_870
|
Assignment 1/2020MCS2448_2020MCS2468/train_cifar.py
|
train_cifar.py
|
py
| 1,757 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1558159854
|
from enum import IntEnum
SCALE_2X: bool = False
WORLD_SCALE: int = 32
OBJECT_SCALE: int = WORLD_SCALE // 2
SPRITE_SCALE: int = WORLD_SCALE * 2
ANIMATION_NUM_FRAMES: int = 4
RESOLUTION_X: int = WORLD_SCALE * 20
RESOLUTION_Y: int = int(RESOLUTION_X * 0.625) # 640x400 aspect ratio
SPRITE_CLOTHES_COLORS = ['#42200f', '#834222', '#9d633d']
SNOW_CLOTHES_COLORS = ['#8996c6', '#aac2ff', '#a5acc4']
GRASS_CLOTHES_COLOR = ['#0c2618', '#123924', '#266e48']
STONE_CLOTHES_COLOR = ['#4a4a4a', '#8c8c8c', '#adadad']
EMBER_CLOTHES_COLOR = ['#ad0021', '#ef6221', '#efce21']
class ObjectType(IntEnum):
FOOD = 0
DANGER = 1
BONUS = 2
WEAPON = 3
OBJECT_RADIUS: float = 0.25
OBJECT_NUM_VERSIONS: int = 6
|
cgloeckner/prehistoric_guy
|
core/constants.py
|
constants.py
|
py
| 713 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40693675853
|
import argparse
import glob
import os
import shutil
import subprocess # noqa: S404
import sys
from collections import namedtuple
from types import MappingProxyType
from typing import Iterable, List, Optional
HOST_BUILD_CTX = '/tmp/magma_orc8r_build' # noqa: S108
HOST_MAGMA_ROOT = '../../../.'
IMAGE_MAGMA_ROOT = os.path.join('src', 'magma')
GOLINT_FILE = '.golangci.yml'
TEST_RESULT_DIR = 'orc8r/cloud/test-results'
MODULES = (
'orc8r',
'lte',
'feg',
'cwf',
'dp',
)
DEPLOYMENT_TO_MODULES = MappingProxyType({
'all': MODULES,
'orc8r': ('orc8r'),
'fwa': ('orc8r', 'lte'),
'ffwa': ('orc8r', 'lte', 'feg'),
'cwf': ('orc8r', 'lte', 'feg', 'cwf'),
})
DEPLOYMENTS = DEPLOYMENT_TO_MODULES.keys()
EXTRA_COMPOSE_FILES = (
'docker-compose.metrics.yml',
# For now, logging is left out of the build because the fluentd daemonset
# and forwarder pod shouldn't change very frequently - we can build and
# push locally when they need to be updated.
# We can integrate this into the CI pipeline if/when we see the need for it
# 'docker-compose.logging.yml',
)
MagmaModule = namedtuple('MagmaModule', ['name', 'host_path'])
def main() -> None:
"""
Run docker-compose script
"""
_check_assumptions()
args = _parse_args()
mods = _get_modules(DEPLOYMENT_TO_MODULES[args.deployment])
if not args.extras:
_create_build_context(mods)
if args.mount:
_run(['build', 'test'])
_run(['run', '--rm'] + _get_mnt_vols(mods) + ['test', 'bash'])
_down(args)
elif args.generate:
_run(['build', 'test'])
_run(['run', '--rm'] + _get_mnt_vols(mods) + ['test', 'make fullgen'])
_down(args)
elif args.lint:
_run(['build', 'test'])
_run(['run', '--rm'] + _get_mnt_vols(mods) + ['test', 'make lint'])
_down(args)
elif args.tidy:
_run(['build', 'test'])
_run(['run', '--rm'] + _get_mnt_vols(mods) + ['test', 'make tidy'])
_down(args)
elif args.precommit:
_run(['build', 'test'])
_run(['run', '--rm'] + _get_mnt_vols(mods) + ['test', 'make precommit'])
_down(args)
elif args.coverage:
_run(['up', '-d', 'postgres_test'])
_run(['build', 'test'])
_run(['run', '--rm'] + _get_mnt_vols(mods) + ['test', 'make cover'])
_down(args)
elif args.tests:
_run(['up', '-d', 'postgres_test'])
_run(['build', 'test'])
_run(['run', '--rm'] + _get_test_result_vol() + ['test', 'make test'])
_down(args)
elif args.build_service:
_run(['build', args.build_service])
else:
d_args = _get_default_file_args(args) + _get_default_build_args(args)
_run(d_args)
def _check_assumptions():
"""Check assumptions about environment."""
cwd = os.path.dirname(os.path.realpath(__file__))
if cwd != os.getcwd():
sys.exit("Must run from orc8r/cloud/docker directory")
if 'PWD' not in os.environ:
msg = (
"$PWD environment variable must be set.\n"
"Normally this is set by your shell. Try running without sudo, "
"then try explicitly setting the PWD env var."
)
sys.exit(msg)
def _get_modules(mods: Iterable[str]) -> Iterable[MagmaModule]:
"""
Read the modules config file and return all modules specified.
"""
modules = []
for m in mods:
abspath = os.path.abspath(os.path.join(HOST_MAGMA_ROOT, m))
module = MagmaModule(name=m, host_path=abspath)
modules.append(module)
return modules
def _create_build_context(modules: Iterable[MagmaModule]) -> None:
""" Clear out the build context from the previous run """
shutil.rmtree(HOST_BUILD_CTX, ignore_errors=True)
os.mkdir(HOST_BUILD_CTX)
print("Creating build context in '%s'..." % HOST_BUILD_CTX)
for m in modules:
_copy_module(m)
def _down(args: argparse.Namespace) -> None:
if not args.up:
_run(['down'])
def _run(cmd: List[str]) -> None:
""" Run the required docker compose command """
cmd = ['docker', 'compose', '--compatibility'] + cmd
print("Running '%s'..." % ' '.join(cmd))
try:
subprocess.run(cmd, check=True) # noqa: S603
except subprocess.CalledProcessError as err:
sys.exit(err.returncode)
def _get_mnt_vols(modules: Iterable[MagmaModule]) -> List[str]:
""" Return the volumes argument for docker compose commands """
vols = [
# .golangci.yml file
'-v', '%s:%s' % (
os.path.abspath(os.path.join(HOST_MAGMA_ROOT, GOLINT_FILE)),
os.path.join(os.sep, IMAGE_MAGMA_ROOT, GOLINT_FILE),
),
]
# Per-module directory mounts
for m in modules:
vols.extend(['-v', '%s:%s' % (m.host_path, _get_module_image_dst(m))])
return vols
def _get_test_result_vol() -> List[str]:
"""Return the volume argment to mount TEST_RESULT_DIR
Returns:
List[str]: -v command to mount TEST_RESULT_DIR
"""
return [
'-v', '%s:%s' % (
os.path.abspath(os.path.join(HOST_MAGMA_ROOT, TEST_RESULT_DIR)),
os.path.join(os.sep, IMAGE_MAGMA_ROOT, TEST_RESULT_DIR),
),
]
def _get_default_file_args(args: argparse.Namespace) -> List[str]:
def make_file_args(fs: Optional[Iterable[str]] = None) -> List[str]:
if fs is None:
return []
fs = ['docker-compose.yml'] + \
list(fs) + ['docker-compose.override.yml']
ret = []
for f in fs:
ret.extend(['-f', f])
return ret
if args.all:
return make_file_args(EXTRA_COMPOSE_FILES)
# Default implicitly to docker-compose.yml + docker-compose.override.yml
return make_file_args()
def _get_default_build_args(args: argparse.Namespace) -> List[str]:
mods = DEPLOYMENT_TO_MODULES[args.deployment]
ret = [
'build',
'--build-arg', 'MAGMA_MODULES=%s' % ' '.join(mods),
]
if args.nocache:
ret.append('--no-cache')
return ret
def _copy_module(module: MagmaModule) -> None:
""" Copy module directory into the build context """
build_ctx = _get_module_host_dst(module)
def copy_to_ctx(d: str) -> None:
shutil.copytree(
os.path.join(module.host_path, d),
os.path.join(build_ctx, d),
)
if module.name == 'nms':
copy_to_ctx('scripts')
else:
copy_to_ctx('cloud')
# Orc8r module also has lib/ and gateway/
if module.name == 'orc8r':
copy_to_ctx('lib')
copy_to_ctx('gateway')
# Optionally copy cloud/configs/
# Converts e.g. lte/cloud/configs/ to configs/lte/
if os.path.isdir(os.path.join(module.host_path, 'cloud', 'configs')):
shutil.copytree(
os.path.join(module.host_path, 'cloud', 'configs'),
os.path.join(HOST_BUILD_CTX, 'configs', module.name),
)
# Copy the go.mod file for caching the go downloads
# Preserves relative paths between modules
for f in glob.iglob(build_ctx + '/**/go.mod', recursive=True):
gomod = f.replace(
HOST_BUILD_CTX, os.path.join(HOST_BUILD_CTX, 'gomod'),
)
print(gomod)
os.makedirs(os.path.dirname(gomod))
shutil.copyfile(f, gomod)
def _get_module_image_dst(module: MagmaModule) -> str:
"""
Given a path to a module on the host, return the intended destination
in the final image.
Parameters:
module: Magma module
Returns:
str: destination in the final image
"""
return os.path.join(os.sep, IMAGE_MAGMA_ROOT, module.name)
def _get_module_host_dst(module: MagmaModule) -> str:
"""
Given a path to a module on the host, return the intended destination
in the build context.
Parameters:
module: Magma module
Returns:
str: destination in the build context
"""
return os.path.join(HOST_BUILD_CTX, IMAGE_MAGMA_ROOT, module.name)
def _parse_args() -> argparse.Namespace:
""" Parse the command line args """
# There are multiple ways to invoke finer-grained control over which
# images are built.
#
# (1) How many images to build
#
# all: all images
# default: images required for minimum functionality
# - excluding metrics images
# - including postgres, proxy, etc
#
# (2) Of the core orc8r images, which modules to build
#
# Defaults to all modules, but can be further specified by targeting a
# deployment type.
parser = argparse.ArgumentParser(description='Orc8r build tool')
# Run something
parser.add_argument(
'--tests', '-t',
action='store_true',
help='Run unit tests',
)
parser.add_argument(
'--mount', '-m',
action='store_true',
help='Mount the source code and create a bash shell',
)
parser.add_argument(
'--generate', '-g',
action='store_true',
help='Mount the source code and regenerate generated files',
)
parser.add_argument(
'--precommit', '-c',
action='store_true',
help='Mount the source code and run pre-commit checks',
)
parser.add_argument(
'--lint', '-l',
action='store_true',
help='Mount the source code and run the linter',
)
parser.add_argument(
'--tidy', '-i',
action='store_true',
help='Mount the source code and run go mod tidy',
)
parser.add_argument(
'--coverage', '-o',
action='store_true',
help='Generate test coverage statistics',
)
# Build something
parser.add_argument(
'--all', '-a',
action='store_true',
help='Build all containers',
)
parser.add_argument(
'--extras', '-e',
action='store_true',
help='Build extras (non-essential) images (i.e. no proxy or lte)',
)
parser.add_argument(
'--deployment', '-d',
action='store',
default='all',
help='Build deployment type: %s' % ','.join(DEPLOYMENTS),
)
parser.add_argument(
'--build-service', '-b',
help='Build particular service',
)
# How to do it
parser.add_argument(
'--nocache', '-n',
action='store_true',
help='Build the images with no Docker layer caching',
)
parser.add_argument(
'--up', '-u',
action='store_true',
help='Leave containers up after running tests',
)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
|
magma/magma
|
orc8r/cloud/docker/build.py
|
build.py
|
py
| 10,627 |
python
|
en
|
code
| 1,605 |
github-code
|
6
|
11833093627
|
import qrcode
from django.db import models
from django.utils.text import slugify
from django.utils.html import mark_safe
from cms.models import Title
from django.contrib.sites.models import Site
import uuid
class QrCodeUrlPost(models.Model):
TITLE_URLS = [(o.path, o.title) for o in Title.objects.filter(publisher_is_draft=False).exclude(path='')]
uuid_field = models.UUIDField(blank=True, editable=False)
page_url = models.CharField(blank=True, max_length=20, choices=TITLE_URLS)
url = models.CharField(blank=True, max_length=255, verbose_name="Url")
thumbnail = models.ImageField(blank=True, upload_to='qrCode')
slug = models.SlugField(max_length=255, unique=True, blank=True)
name = models.CharField(max_length=255, unique=True, verbose_name="Name")
last_updated = models.DateTimeField(auto_now=True)
start_date = models.DateField(blank=True, null=True)
activate = models.BooleanField(default=False, verbose_name="Activate")
created_at = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
current_site = Site.objects.get_current()
if not self.uuid_field:
self.uuid_field = uuid.uuid4()
self.slug = self.uuid_field
if self.page_url:
self.url = str(current_site.domain) + '/' + str(self.page_url)
# self.url = 'http://127.0.0.1:8000/' + str(self.page_url)
img = qrcode.make(self.url)
type(img) # qrcode.image.pil.PilImage
img_name = str(self.slug) + '.png'
img.save('./media/qrCode/' + img_name)
self.thumbnail = img_name
super().save(*args, **kwargs)
def img_preview(self):
return mark_safe(f'<img src = "/media/qrCode/{self.thumbnail}" width = "150"/>')
def link_preview(self):
return mark_safe(f'<a href="{self.url}" target=_blank>{self.uuid_field}</a>')
|
vazvieirafrederic67/qrCodePlugin
|
models.py
|
models.py
|
py
| 1,878 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40033915805
|
#import necessary modules
import numpy as np
from numpy import load
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer, LabelEncoder, MinMaxScaler, StandardScaler
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
data = load('trained_faces_enc.npz')
encodings, labels = data['arr_0'], data['arr_1']
n_folds = 10
kf = StratifiedKFold(n_splits=n_folds, shuffle=True)
accuracies = []
precisions = []
recalls = []
f1_scores = []
for train_index, val_index in kf.split(encodings, labels):
X_train, X_val = encodings[train_index], encodings[val_index]
y_train, y_val = labels[train_index], labels[val_index]
in_encoder = Normalizer(norm='l2')
in_encoder.fit(X_train)
trainX = in_encoder.transform(X_train)
valX = in_encoder.transform(X_val)
lb = LabelEncoder()
lb.fit(y_train)
y_train = lb.transform(y_train)
y_val = lb.transform(y_val)
svm = make_pipeline(MinMaxScaler(), SVC(kernel='rbf', C=1, gamma=0.01, probability=True))
svm.fit(trainX, y_train)
y_pred = svm.predict(X_val)
accuracy = accuracy_score(y_val, y_pred)
precision = precision_score(y_val, y_pred, average='weighted')
recall = recall_score(y_val, y_pred, average='weighted')
f1 = f1_score(y_val, y_pred, average='weighted')
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
f1_scores.append(f1)
avg_accuracy = np.mean(accuracies)
avg_precision = np.mean(precisions)
avg_recall = np.mean(recalls)
avg_f1_score = np.mean(f1_scores)
print("Average Accuracy:", avg_accuracy*100)
print("Average Precision:", avg_precision*100)
print("Average Recall:", avg_recall*100)
print("Average F1 Score:", avg_f1_score*100)
|
karth1ksr/Face-Recognition-with-Facenet
|
cross validation.py
|
cross validation.py
|
py
| 1,907 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12178949620
|
import time
import os
from maeda.SystemFile import File
class Csv:
def __init__(self, labels = [], directory = "data.csv"):
self.labels = labels
self.file = File(directory)
self._config()
def _config(self):
data_loc = []
if not self.file.CheckFileSize():
for i in range(0,len(self.labels)):
if i < len(self.labels)-1:
self.file.write_file_append(self.labels[i]+";")
else:
self.file.write_file_append(self.labels[i]+"\n")
def load_value(self, data = []):
for i in range(0,len(data)):
if i < len(data)-1:
self.file.write_file_append(data[i]+";")
else:
self.file.write_file_append(data[i]+"\n")
if __name__ == "__main__":
test = Csv(labels = ['Data', 'canal1', 'canal2', 'canal3'])
test.load_value(["20/8/1974", "98.0", "122.6", "5.7"])
test.load_value(["20/8/1974", "100.0", "123.4", "5.6"])
test.load_value(["20/8/1974", "101.8.0", "124.4", "5.8"])
|
equipe-maeda/MaedaLibsPi
|
maeda/Csv.py
|
Csv.py
|
py
| 1,137 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35428261847
|
"""
This script detects laughter within all audio files contained in the directory
`root_dir/audio/raw`, and save one pickle file for each audio file with
laughter timecodes in the directory `root_dir/audio/laughter`.
"""
import argparse
import os
import os.path as osp
import pickle
from laughter_detection.core.laughter_detector import LaughterDetector
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"root_dir", type=str, help="Path to the root of FunnyNet dataset"
)
parser.add_argument(
"--embedding-name",
"-e",
type=str,
help="embedding model to use.",
default="byola",
)
parser.add_argument(
"--laughter-dir",
"-l",
type=str,
help="Path to the directory to save detected laughters",
default=None,
)
parser.add_argument(
"--n-clusters", "-n", type=int, help="Number of clusters", default=3
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_arguments()
embedding_name = args.embedding_name
root_dir = args.root_dir
laughter_dir = args.laughter_dir
n_clusters = args.n_clusters
if not laughter_dir:
laughter_dir = osp.join(root_dir, "audio", "laughter", embedding_name)
if not osp.exists(laughter_dir):
os.makedirs(laughter_dir)
raw_dir = osp.join(root_dir, "audio", "raw")
audio_filenames = sorted(os.listdir(raw_dir))
laughter_detector = LaughterDetector(
embedding_name, root_dir, num_workers=6, n_clusters=n_clusters
)
pred_timecodes = laughter_detector.detect_laughters()
for current_filename, current_timecodes in pred_timecodes.items():
laughter_filename = f"{current_filename[:-4]}.pk"
laughter_path = osp.join(laughter_dir, laughter_filename)
# Save laughter timecodes
with open(laughter_path, "wb") as f:
pickle.dump(current_timecodes, f)
|
robincourant/FunnyNet
|
laughter_detection/scripts/detect_laughters.py
|
detect_laughters.py
|
py
| 1,990 |
python
|
en
|
code
| 1 |
github-code
|
6
|
1962665868
|
import tensorflow as tf
import numpy as np
class MultiArmedBandit():
def __init__(self, input_dimension=[], output_dimension=[], layer_sizes=[], learning_rate=1e-4, model_ckpt=None):
if model_ckpt is None:
self.input, self.output = _construct_network(input_dimension, output_dimension, layer_sizes)
nrof_gpu = 0 # {0, 1}
config = tf.ConfigProto( device_count = {'GPU': nrof_gpu} )
self.sess = tf.Session( config = config )
#self.sess = tf.Session()
optimizer = tf.train.AdamOptimizer(learning_rate)
self.pt_step, self.pt_target, self.pt_loss = _setup_pre_training(self.output, output_dimension, optimizer)
self.update_step, self.action, self.target, self.loss = _setup_training(self.output, output_dimension, optimizer)
self.sigmoid_output = tf.sigmoid( self.output )
self.initialize()
else:
# Load model from checkpoint saved earlier
self.sess = tf.Session()
saver = tf.train.import_meta_graph(model_ckpt + "/model.ckpt.meta")
saver.restore(self.sess, tf.train.latest_checkpoint( model_ckpt ) )
print("Model restored from " + model_ckpt)
graph = tf.get_default_graph()
self.input = graph.get_tensor_by_name("input:0")
self.output = graph.get_tensor_by_name("output:0")
self.action = graph.get_tensor_by_name("action:0")
self.target = graph.get_tensor_by_name("target:0")
self.loss = graph.get_tensor_by_name("loss:0")
self.update_step = graph.get_operation_by_name("update_step")
self.sigmoid_output = tf.sigmoid( self.output )
def initialize(self):
self.sess.run(tf.global_variables_initializer())
def pretrain_agent(self, pt_input, pt_target):
self.sess.run(self.pt_step, feed_dict={self.input: pt_input, self.pt_target: pt_target})
def pretrain_loss(self, pt_input, pt_target):
return self.sess.run(self.pt_loss, feed_dict={self.input: pt_input, self.pt_target: pt_target})
def calculate_output(self, input_):
return self.sess.run(self.sigmoid_output, feed_dict={self.input: input_})
def calculate_loss(self, input_, action, reward):
action_selector = np.column_stack((np.arange(len(action)), np.array(action)))
return self.sess.run(self.loss, feed_dict={self.input: input_, self.action: action_selector, self.target:reward})
def update_agent(self, input_, action, reward):
action_selector = np.column_stack((np.arange(len(action)), np.array(action)))
self.sess.run(self.update_step, feed_dict={self.input: input_, self.action: action_selector, self.target: reward})
def save_model(self, dirpath):
saver = tf.train.Saver()
save_path = saver.save(self.sess, dirpath + "/model.ckpt")
print("Model saved to: %s" % save_path)
#tf.saved_model.simple_save(self.sess,
# dirpath,
# inputs={'input':self.input, 'action':self.action},
# outputs={'output':self.output, 'target':self.target})
def close_session(self):
tf.reset_default_graph()
self.sess.close()
def _weight_variable(shape, name):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1), name=name)
def _bias_variable(shape, name):
return tf.Variable(tf.constant(value=0.01, shape=shape), name=name)
def _leaky_relu(x):
return tf.nn.relu(x) + 0.1 * tf.nn.relu(-x)
def _setup_pre_training(output, output_dimension, optimizer):
pretrain_target = tf.placeholder(tf.float32, [None, output_dimension], name='pt_target')
pretrain_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=pretrain_target)
#pretrain_step = tf.train.AdamOptimizer(1e-6).minimize(pretrain_loss)
pretrain_step = optimizer.minimize(pretrain_loss)
return (pretrain_step, pretrain_target, pretrain_loss)
def _setup_training(output, output_dimension, optimizer):
target = tf.placeholder(tf.float32, [None], name='target')
action = tf.placeholder(tf.int32, [None, 2], name='action')
out = tf.gather_nd(output, action)
# loss = (tf.sigmoid(out) - target) ** 2
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=out, labels=target, name='loss')
update_step = optimizer.minimize(loss, name='update_step')
return (update_step, action, target, loss)
def _construct_network(input_dimension, output_dimension, layer_sizes):
input_= tf.placeholder(tf.float32, [None, input_dimension], name='input')
nrof_layers = len(layer_sizes)
# List of hidden layer variables
W = []
b = []
h = []
if nrof_layers > 0:
# Define the first hidden layer
with tf.name_scope('Hidden_Layer_0'):
l = layer_sizes[0]
W.append(_weight_variable([input_dimension, l], 'W0'))
b.append(_bias_variable([l], 'b0'))
h.append(tf.nn.relu(tf.matmul(input_, W[0]) + b))#, 'h0'))
# Define the subsequent hidden layers
for i in range(1, nrof_layers):
with tf.name_scope('Hidden_Layer_%d'%(i)):
l = layer_sizes[i]
W.append(_weight_variable([layer_sizes[i-1], l], 'W%d'%(i)))
b.append(_bias_variable([l], 'b%d'%(i)))
h.append(tf.nn.relu(tf.matmul(h[i-1], W[i]) + b[i]))#, name='h%d'%(i)))
# Define the output layer
W.append(_weight_variable([layer_sizes[-1], output_dimension], 'W%d'%(nrof_layers)))
b.append(_bias_variable([output_dimension], 'b%d'%(nrof_layers)))
output_ = tf.add(tf.matmul(h[-1], W[-1]), b[-1], name='output')
else:
W.append(_weight_variable([input_dimension, output_dimension], 'W0'))
b.append(_bias_variable([output_dimension], 'b0'))
output_ = tf.add( tf.matmul(input_, W[-1]), b[-1], name='output' )
return (input_, output_)
|
vumaasha/contextual-mab-link-adaptation
|
src/link_adaptation_agents/multi_armed_bandit.py
|
multi_armed_bandit.py
|
py
| 6,322 |
python
|
en
|
code
| 2 |
github-code
|
6
|
25377227491
|
import profile
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
from .views import PostListView,PostDetailView,PostCreateView,PostUpdateView, PostDeleteView, ProfileView, AddFollower,RemoveFollower,CommentCreateView
urlpatterns = [
# path('',PostListView.as_view(), name='home'),
path('',views.index, name='home'),
path('profile/<int:pk>',ProfileView.as_view(),name = 'profile'),
path('post/<int:pk>/',PostDetailView.as_view(), name='post-detail'),
path('post/<int:pk>/update/',PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/',PostDeleteView.as_view(), name='post-delete'),
path('post/new/',PostCreateView.as_view(), name='post-create'),
path('user_registration/',views.register, name ='user_registration' ),
path('accounts/login/',auth_views.LoginView.as_view(template_name = 'accounts/login.html'), name = 'login'),
path('logout/', auth_views.LogoutView.as_view(template_name = 'accounts/logout.html'), name='logout'),
path('profile/<int:pk>/followers/add', AddFollower.as_view(), name='add-follower'),
path('profile/<int:pk>/followers/remove', RemoveFollower.as_view(), name='remove-follower'),
path('post/<int:pk>/comment/',CommentCreateView.as_view(), name='add_comment'),
path('search/', views.search_results, name='search_results')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
Njoro410/Insta-clone
|
insta/urls.py
|
urls.py
|
py
| 1,566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13006866992
|
import numpy as np
import pandas as pd
import cv2
from PIL import Image
import joblib
import cv2
import numpy as np
import time
import pandas as pd
import imagehash
import multiprocessing as mp
import logging
import os
from dataclasses import dataclass, field
from typing import List, Dict
from slideextract.config import ocr_feature_names, ocr_new_slide_clf, DATA_FILE_PATH
from slideextract.comparator.docDiffBuilder import docDiffBuilder, doc_diff_comparator
from slideextract.processing.ffmpeg_sample_video import sample_video
logger = logging.getLogger('baseSlide')
@dataclass
class BaseSlideExtractor:
"""
base class for slide extractor,
all slide extractor should inherit from this class
have general methods to extract images from video
compare slides based on imagebased feature and ocr feature
"""
name = "baseSlideExtractor"
data_file_path = None
def __init__(self, *args, **kwargs) -> None:
pass
def extract_slides(self, mp4_file_path: str):
NotImplementedError
def extract_slides_from_file(self, mp4_file_path: str, threads: int = 0):
self.data_file_path = os.path.join(DATA_FILE_PATH, os.path.basename(mp4_file_path).split(".")[0], self.name)
output_path = os.path.join(self.data_file_path, "frames")
frames_data = sample_video(mp4_file_path, output_path=output_path, threads=threads)
return self.extract_slides_from_frames(frames_data)
def extract_slides_from_frames(self, frames_data: dict):
NotImplementedError
def _generate_ocr_doc_feature(self, ocr_paragraph1: str, ocr_paragraph2: str, doc_diff_comparator: docDiffBuilder=doc_diff_comparator):
"""
generate feature feature based on ocr results
"""
doc1 = ocr_paragraph1
doc2 = ocr_paragraph2
doc_compare_dict = doc_diff_comparator.compare(doc1, doc2)
doc_compare_dict['frame_token_ct'] = max([len(doc1), len(doc2)])
# need to test if dataframe results results
feature_df = pd.DataFrame([doc_compare_dict])
feature_df = feature_df.rename(columns={'letter_dis':'letter_dissim'})
return feature_df[ocr_feature_names]
def _compare_ocr_results(self, ocr_slide_indices: List, ocr_paragraphs: Dict, clf_model) -> pd.DataFrame:
ocr_slide_record = []
for index in ocr_slide_indices:
if index > 0:
feature_df = \
self._generate_ocr_doc_feature(
ocr_paragraph1=ocr_paragraphs[index], ocr_paragraph2=ocr_paragraphs[index-1])
ocr_is_new_slide = ocr_new_slide_clf.predict(feature_df)[0]
ocr_slide_record.append((index, ocr_is_new_slide))
ocr_new_slide_df = pd.DataFrame(ocr_slide_record)
ocr_new_slide_df.columns = ['index', 'ocr_is_new_slide']
return ocr_new_slide_df
def _classify_if_ocr_same(self, feature_df: pd.DataFrame, clf_model) -> bool:
"""
classify if ocr results are the same
"""
return clf_model.predict(feature_df)[0]
@classmethod
def compare_frames(frames, comparators):
"""
Use the output of 1, and a list of python callable[(image1, image2), float],
return the dataframe with the following columns:
index: index of the frame
phash: percetual hash of the image with previous frame (create phash comparater)
dhash: dhash diff of the image with previous frame (create dhash comparater)
"""
data = []
prev_frame = None
for index, frame in frames.items():
row = {"index": index}
if prev_frame is not None:
for comparator, name in comparators:
row[name] = comparator(prev_frame, frame)
data.append(row)
prev_frame = frame
return pd.DataFrame(data)
|
shex1627/slideextract
|
src/slideextract/slide_extractors/baseSlideExtractor.py
|
baseSlideExtractor.py
|
py
| 4,003 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26869499538
|
from flask import Flask, redirect, request, session, Response, jsonify
from flask_login import LoginManager
from flask_mail import Mail
from myapp.models.base import db
# 初始化 Loginmanager
login_manager = LoginManager()
mail = Mail()
def create_app():
app = Flask(__name__)
app.config.from_object('myapp.secure')
app.config.from_object('myapp.setting')
# 注册flask-login
login_manager.init_app(app)
# 登录页面
login_manager.login_view = 'web.login'
login_manager.login_message = '请先进行登陆'
# 邮件注册
mail.init_app(app)
# 注册蓝图
register_blueprint(app)
# 注册SQLAlchemy
db.init_app(app)
db.create_all(app=app)
app.response_class = AutoJsonifyResponse
return app
def register_blueprint(app):
# 注册book里web的蓝图
from myapp.controller import api
app.register_blueprint(api)
class AutoJsonifyResponse(Response):
@classmethod
def force_type(cls, response, environ=None):
if isinstance(response, (list, dict)):
response = jsonify(response)
return super(Response, cls).force_type(response, environ)
|
102244653/WebByFlask
|
myapp/__init__.py
|
__init__.py
|
py
| 1,159 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33107730586
|
import math
seznam = []
for i in range(0, 1000):
stevilka = int(input("Vnesi število: "))
seznam.append(stevilka)
if stevilka == 0: # ali je število enako 0
break;
print("Najmanjše število je: ", min(stevilka))
|
rutniklea/coding-with-python
|
Naloga35.py
|
Naloga35.py
|
py
| 240 |
python
|
sl
|
code
| 0 |
github-code
|
6
|
34105917071
|
import cv2 as cv
import numpy as np
def diferenca():
captura = cv.VideoCapture(0)
while True:
ret, frame = captura.read()
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
cv.imshow("Video", np.subtract(frame, quarto))
k = cv.waitKey(30) & 0xff
if k == 27:
break
captura.release()
cv.destroyAllWindows()
if __name__ == '__main__':
diferenca()
|
gabrielga-dev/visao-computacional-2022
|
s6/main.py
|
main.py
|
py
| 419 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23856904095
|
'''
Created by Han Xu
email:[email protected]
'''
import xml.etree.ElementTree as ET
def read_configuration(xml_file):
# 解析 XML 文件
tree = ET.parse(xml_file)
root = tree.getroot()
# 获取camvid数据集路径
camvid_path = root.find('camvid_path').text
# 获取模型路径
HANet_oneHAM_path = root.find('HANet_oneHAM_path').text
HANet_twoHAM_path = root.find('HANet_twoHAM_path').text
# 获取保存模型的路径
save_path = root.find('save_path').text
return camvid_path, HANet_oneHAM_path, HANet_twoHAM_path, save_path
# 用于测试的 XML 文件路径
xml_file_path = "conf.xml"
# 读取配置信息
import os
# 获取当前脚本所在的目录
current_directory = os.path.dirname(os.path.abspath(__file__))
# 切换到该目录
os.chdir(current_directory)
camvid_path, HANet_oneHAM_path, HANet_twoHAM_path, save_path = read_configuration(xml_file_path)
if __name__ == "__main__":
# 打印读取到的信息
print(f"camvid_path: {camvid_path}")
print(f"HANet_oneHAM_path: {HANet_oneHAM_path}")
print(f"HANet_twoHAM_path: {HANet_twoHAM_path}")
print(f"save_path: {save_path}")
|
UnderTurrets/HeightDriven_DoubleAttentions_Net
|
conf/__init__.py
|
__init__.py
|
py
| 1,167 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18991460697
|
import gzip
import shutil
import os, sys
"""
This script compresses text file into gzip
"""
fpath = '/home/varsha/fl-proj/lingspam_public/lemm_stop/part9'
fl_list = os.listdir(fpath)
for fl in fl_list:
if ".txt.gz" not in fl:
print(fl)
fop = os.path.join(fpath, fl)
print(type(fop))
with open(fop, 'rb') as f_in:
fop += ".gz"
with gzip.open(fop, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
|
vaarsha/Spam-Filtering
|
compressfiles.py
|
compressfiles.py
|
py
| 475 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22666826531
|
from torchmetrics import Accuracy, F1Score, Precision, Recall, AUROC
class Configs:
def __init__(self, dataset="EMG"):
# preprocess configs
if dataset == "EMG":
self.dataset_config = EMGGestureConfig()
elif dataset == "NINA":
self.dataset_config = NinaproDB5Config()
self.model_config = ModelConfig(self.dataset_config)
self.training_config = TrainingConfig(self.dataset_config)
class EMGGestureConfig:
def __init__(self):
self.url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00481/EMG_data_for_gestures-master.zip"
self.save_dir = "dataset/EMGGesture"
self.batch_size = 256
self.partition = [0.8, 0., 0.2]
self.sampling_freq = 1000
self.pass_band = 200
self.classes = [1, 2, 3, 4, 5, 6]
self.window_length = 256
self.window_padding = 32
self.window_step = 64
self.threshold = 0
self.channels = 8
self.num_classes = len(self.classes)
self.jitter_ratio = 0.1
self.scaling_ratio = 0.1
self.num_permute = 8
class NinaproDB5Config:
def __init__(self):
self.url = [
"http://ninapro.hevs.ch/download/file/fid/457",
"http://ninapro.hevs.ch/download/file/fid/458",
"http://ninapro.hevs.ch/download/file/fid/459",
"http://ninapro.hevs.ch/download/file/fid/467",
"http://ninapro.hevs.ch/download/file/fid/461",
"http://ninapro.hevs.ch/download/file/fid/462",
"http://ninapro.hevs.ch/download/file/fid/463",
"http://ninapro.hevs.ch/download/file/fid/464",
"http://ninapro.hevs.ch/download/file/fid/465",
"http://ninapro.hevs.ch/download/file/fid/466",
]
self.save_dir = "dataset/Ninapro_DB5"
self.batch_size = 256
self.partition = [0.6, 0, 0.4]
self.sampling_freq = 200
self.pass_band = None
self.classes = [0, 6, 13, 14, 15, 16]
self.window_length = 512
self.window_padding = 32
self.window_step = 64
self.threshold = 0
self.channels = 8
self.num_classes = len(self.classes)
self.jitter_ratio = 0.1
self.scaling_ratio = 0.1
self.num_permute = 8
self.frequency_masking_ratio = 0.01
self.frequency_masking_damp = 0.5
class ModelConfig:
def __init__(self, dataset_config):
# (B, C, T)
self.span = dataset_config.window_length # keeping up with window length
self.input_channels = dataset_config.channels
self.kernel_size = 8
self.stride = 1
self.final_out_channels = 128
self.num_classes = dataset_config.num_classes
self.dropout = 0.35
self.conv_output_dim = self.span // 8
self.feature_len = 128
self.hidden_dim = 100
self.timesteps = self.conv_output_dim // 4
self.loss_temperature = 0.2
self.classifier_hidden = [512, self.num_classes]
self.classifier_dropout = 0.15
class TrainingConfig:
def __init__(self, config):
self.bag_of_metrics = {
"accuracy": Accuracy(
task="multiclass",
num_classes=config.num_classes,
average="micro",
),
"f1": F1Score(
task="multiclass",
num_classes=config.num_classes,
average="macro",
),
"precision": Precision(
task="multiclass",
num_classes=config.num_classes,
average="macro",
),
"recall": Recall(
task="multiclass",
num_classes=config.num_classes,
average="macro",
),
"auroc": AUROC(
task="multiclass",
num_classes=config.num_classes,
average="macro",
),
}
self.log_save_dir = "run1"
self.experiment_name = "TSTCC"
self.mode = "pretrain_finetune"
self.seed = 42
self.pretrain_epoch = 100
self.finetune_epoch = 100
self.lr = 3e-4
self.classifier_lr = 1e-4
self.classifier_weight_decay = 3e-3
self.per_class_samples = 100
self.version = f"samples_{self.per_class_samples}_pe_{self.pretrain_epoch}_fe_{self.finetune_epoch}_seed_{self.seed}"
|
3rd-Musketeer/UAF-PyTorch
|
configs/TSTCC_configs.py
|
TSTCC_configs.py
|
py
| 4,479 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31236792661
|
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
if (len(A)) < 2:
return 0
else:
MC = 0
MS = 0
for i,x in enumerate(A):
DV = (x - A[i-1]) if i > 0 else 0
MC = max(MC+DV,0)
MS = max(MS, MC)
return MS
print(solution([23171, 21011, 21123, 21366, 21013, 21367]))
|
diegoami/DA_Codility_EX
|
sum/sum1.py
|
sum1.py
|
py
| 404 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72532300669
|
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
import json
from pathlib import Path
from typing import Any, Iterable
from unittest.mock import AsyncMock, call
from uuid import UUID, uuid4
import pytest
from models_library.projects import NodesDict, ProjectID
from models_library.projects_networks import NetworksWithAliases
from models_library.projects_nodes import Node
from pydantic import BaseModel, PositiveInt
from pytest_mock.plugin import MockerFixture
from simcore_service_director_v2.modules.projects_networks import (
_get_networks_with_aliases_for_default_network,
_send_network_configuration_to_dynamic_sidecar,
)
# UTILS
class MockedCalls(BaseModel):
detach: list[Any]
attach: list[Any]
class Example(BaseModel):
existing_networks_with_aliases: NetworksWithAliases
new_networks_with_aliases: NetworksWithAliases
expected_calls: MockedCalls
@classmethod
def using(
cls,
existing: dict[str, Any],
new: dict[str, Any],
detach: list[Any],
attach: list[Any],
) -> "Example":
return cls(
existing_networks_with_aliases=NetworksWithAliases.parse_obj(existing),
new_networks_with_aliases=NetworksWithAliases.parse_obj(new),
expected_calls=MockedCalls(detach=detach, attach=attach),
)
def _node_id(number: int) -> str:
return f"{UUID(int=number)}"
def _node_alias(number: int) -> str:
return f"node_alias_{number}"
def _network_name(number: int) -> str:
return f"network_{number}"
@pytest.fixture
def examples_factory(mock_scheduler: AsyncMock, project_id: ProjectID) -> list[Example]:
return [
# nothing exists
Example.using(
existing={},
new={
_network_name(1): {
_node_id(1): _node_alias(1),
_node_id(2): _node_alias(2),
}
},
detach=[],
attach=[
call.attach_project_network(
node_id=UUID(_node_id(2)),
project_network=_network_name(1),
network_alias=_node_alias(2),
),
call.attach_project_network(
node_id=UUID(_node_id(1)),
project_network=_network_name(1),
network_alias=_node_alias(1),
),
],
),
# with existing network, remove node 2
Example.using(
existing={
_network_name(1): {
_node_id(1): _node_alias(1),
_node_id(2): _node_alias(2),
}
},
new={
_network_name(1): {
f"{_node_id(1)}": _node_alias(1),
}
},
detach=[
call.detach_project_network(
node_id=UUID(_node_id(2)),
project_network=_network_name(1),
),
],
attach=[],
),
# remove node 2 and add node 2 with different alias
Example.using(
existing={
_network_name(1): {
_node_id(1): _node_alias(1),
_node_id(2): _node_alias(2),
}
},
new={
_network_name(1): {
_node_id(1): _node_alias(1),
_node_id(2): _node_alias(3),
}
},
detach=[
call.detach_project_network(
node_id=UUID(_node_id(2)),
project_network=_network_name(1),
),
],
attach=[
call.attach_project_network(
node_id=UUID(_node_id(2)),
project_network=_network_name(1),
network_alias=_node_alias(3),
),
],
),
# nothing happens when updates with the same content
Example.using(
existing={
_network_name(1): {
_node_id(1): _node_alias(1),
_node_id(2): _node_alias(2),
}
},
new={
_network_name(1): {
_node_id(1): _node_alias(1),
_node_id(2): _node_alias(2),
}
},
detach=[],
attach=[],
),
]
@pytest.fixture
def mock_scheduler() -> AsyncMock:
return AsyncMock()
@pytest.fixture
def mock_director_v0_client() -> AsyncMock:
return AsyncMock()
@pytest.fixture
def rabbitmq_client() -> AsyncMock:
return AsyncMock()
@pytest.fixture
def project_id() -> ProjectID:
return uuid4()
@pytest.fixture
def dy_workbench_with_networkable_labels(mocks_dir: Path) -> NodesDict:
dy_workbench_template = mocks_dir / "fake_dy_workbench_template.json"
assert dy_workbench_template.exists()
dy_workbench = json.loads(dy_workbench_template.read_text())
parsed_workbench: NodesDict = {}
for node_uuid, node_data in dy_workbench.items():
node_data["label"] = f"label_{uuid4()}"
parsed_workbench[node_uuid] = Node.parse_obj(node_data)
return parsed_workbench
@pytest.fixture
def fake_project_id() -> ProjectID:
return uuid4()
@pytest.fixture
def user_id() -> PositiveInt:
return 1
@pytest.fixture
def mock_docker_calls(mocker: MockerFixture) -> Iterable[dict[str, AsyncMock]]:
requires_dynamic_sidecar_mock = AsyncMock()
requires_dynamic_sidecar_mock.return_value = True
class_base = "simcore_service_director_v2.modules.dynamic_sidecar.scheduler._task.DynamicSidecarsScheduler"
mocked_items = {
"attach": mocker.patch(f"{class_base}.attach_project_network", AsyncMock()),
"detach": mocker.patch(f"{class_base}.detach_project_network", AsyncMock()),
"requires_dynamic_sidecar": mocker.patch(
"simcore_service_director_v2.modules.projects_networks.requires_dynamic_sidecar",
requires_dynamic_sidecar_mock,
),
}
yield mocked_items
async def test_send_network_configuration_to_dynamic_sidecar(
mock_scheduler: AsyncMock,
project_id: ProjectID,
examples_factory: list[Example],
mock_docker_calls: dict[str, AsyncMock],
) -> None:
for example in examples_factory:
await _send_network_configuration_to_dynamic_sidecar(
scheduler=mock_scheduler,
project_id=project_id,
new_networks_with_aliases=example.new_networks_with_aliases,
existing_networks_with_aliases=example.existing_networks_with_aliases,
)
mock_scheduler.assert_has_calls(example.expected_calls.attach, any_order=True)
mock_scheduler.assert_has_calls(example.expected_calls.detach, any_order=True)
async def test_get_networks_with_aliases_for_default_network_is_json_serializable(
mock_director_v0_client: AsyncMock,
fake_project_id: ProjectID,
dy_workbench_with_networkable_labels: dict[str, Any],
user_id: PositiveInt,
rabbitmq_client: AsyncMock,
mock_docker_calls: dict[str, AsyncMock],
) -> None:
assert await _get_networks_with_aliases_for_default_network(
project_id=fake_project_id,
user_id=user_id,
new_workbench=dy_workbench_with_networkable_labels,
director_v0_client=mock_director_v0_client,
rabbitmq_client=rabbitmq_client,
)
|
ITISFoundation/osparc-simcore
|
services/director-v2/tests/unit/test_modules_project_networks.py
|
test_modules_project_networks.py
|
py
| 7,545 |
python
|
en
|
code
| 35 |
github-code
|
6
|
73901138108
|
import pi_servo_hat
import time
# Function to rescale web interface controls
def scale(x, in_min, in_max, out_min, out_max):
return (x - in_min)*(out_max - out_min)/(in_max - in_min) + out_min
########################################################################
# Main Function:
########################################################################
# Initialize Servo Hat
servohat = pi_servo_hat.PiServoHat()
# Restart Servo Hat
servohat.restart()
pan_setting_old = 0
tilt_setting_old = 0
while True:
pipein = open("/var/www/html/FIFO_pipan", 'r')
line = pipein.readline()
line_array = line.split(' ')
if line_array[0] == "servo":
# Rescales Left/Right position values
# web controls: Left = 250; Right = 50
# library controls: Left = 90; Right = 0
pan_setting = scale(int(line_array[1]), 50, 250, 0, 90)
# Rescales Up/Down position values
# web controls: Down = 220; Up = 80
# library controls: Down = 90; Up = 0
tilt_setting = scale(int(line_array[2]), 80, 220, 0, 90)
if pan_setting != pan_setting_old:
pan_setting_old = pan_setting
servohat.move_servo_position(0, pan_setting, 90)
time.sleep(.05)
if tilt_setting != tilt_setting_old:
tilt_setting_old = tilt_setting
servohat.move_servo_position(1, tilt_setting, 90)
time.sleep(.05)
pipein.close()
|
sparkfun/sparkfun_autonomous_kit
|
piservohat_web_interface_firmware.py
|
piservohat_web_interface_firmware.py
|
py
| 1,318 |
python
|
en
|
code
| 5 |
github-code
|
6
|
74535254588
|
import seaborn as sns
from matplotlib import pyplot as plt
import tools
import numpy as np
import pandas as pd
def plot_missrate_comp():
processed_row = tools.load_pkl('outputs/feature_explore[ards@origin]/row_missrate.pkl').flatten()
processed_col = tools.load_pkl('outputs/feature_explore[ards@origin]/col_missrate.pkl').flatten()
raw_row = tools.load_pkl('outputs/feature_explore[raw@version]/row_missrate.pkl').flatten()
raw_col = tools.load_pkl('outputs/feature_explore[raw@version]/col_missrate.pkl').flatten()
row_data = np.concatenate([processed_row, raw_row], axis=0)
col_data = np.concatenate([processed_col, raw_col], axis=0)
for data, label in zip([row_data, col_data], ['row', 'col']):
df = pd.DataFrame(data, columns=['data'])
df['source'] = 'raw'
lens = len(processed_row) if label == 'row' else len(processed_col)
df.loc[:lens, 'source'] = 'processed'
sns.histplot(df, x='data', hue='source', bins=20, stat='proportion', common_norm=False, shrink=0.95, element='bars', edgecolor=None)
plt.xlabel(f'{label} missrate')
plt.savefig(f'test_plot/{label}_missrate.png')
plt.close()
if __name__ == '__main__':
plot_missrate_comp()
|
on1262/sepsisdataprocessing
|
test.py
|
test.py
|
py
| 1,239 |
python
|
en
|
code
| 2 |
github-code
|
6
|
32661755491
|
"""
A basic CNN model
/**
* @author Xinping Wang
* @email [[email protected]]
* @create date 2021-09-11 09:32:41
* @modify date 2021-09-11 09:32:41
* @desc [description]
*/
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import numpy as np
from imageio import imread
classes = ("W", "R", "N1", "N2", "N3")
class Passthrough(nn.Module):
def __init__(self, n_channels=8, n_classes=5):
super().__init__()
self.conv1 = nn.Conv1d(8, 16, 15)
self.conv2 = nn.Conv1d(16, 32, 9)
self.conv3 = nn.Conv1d(32, 64, 5)
self.conv4 = nn.Conv1d(64, 128, 3)
self.conv5 = nn.Conv1d(128, 128, 3)
self.flat = nn.Flatten(1, -1)
self.fc = nn.Linear(128*5, 5)
self.maxpool = nn.MaxPool1d(5, stride=5)
self.leakyrelu = nn.LeakyReLU(0.01)
self.dropout = nn.Dropout(p=0.1)
self.softmax = nn.LogSoftmax()
self.avgpool = nn.AdaptiveAvgPool1d(5)
def forward(self, x):
x = self.dropout(self.maxpool(self.leakyrelu(self.conv1(x))))
x = self.dropout(self.maxpool(self.leakyrelu(self.conv2(x))))
x = self.dropout(self.maxpool(self.leakyrelu(self.conv3(x))))
x = self.dropout(self.maxpool(self.leakyrelu(self.conv4(x))))
x = self.avgpool(self.conv5(x))
x = self.softmax(self.fc(self.flat(x)))
# x = nn.MaxPool1d(5,5)
# x = F.relu(self.conv2(x))
# x = F.relu(self.conv3(x))
# x = F.relu(self.conv4(x))
# x = F.relu(self.conv5(x))
# x = F.softmax(self.fc(self.flat(x)), dim=1)
return x
|
CillianWang/ENN
|
Library/Models/CNN.py
|
CNN.py
|
py
| 1,777 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9782078918
|
class Node:
def __init__(self, value = None):
self.val = value
self.next = None
#mergesort na seriach naturalnych
def Merge(L1,L2):
inv=0
if L1 is None:
return L2
if L2 is None:
return L1
if L1.val>L2.val:
L1,L2=L2,L1
inv+=1
head=L1
L1=L1.next
curr=head
curr.next=None
while L1 is not None and L2 is not None:
if L1.val>L2.val:
inv+=1
L1,L2=L2,L1
curr.next=L1
L1=L1.next
curr=curr.next
curr.next=None
if L1 is None:
curr.next=L2
else:
curr.next=L1
return head
def mergeSort(L):
heads=[]
while L is not None:
head=L
curr=L
while curr.next is not None and curr.val<curr.next.val:
curr=curr.next
heads.append(head)
L=curr.next
curr.next=None
print("idzie")
heads2=[]
inv=0
while len(heads)>1:
print("idzie do merga",len(heads))
merged=Merge(heads[0],heads[1])
# print(heads,heads2)
heads2.append(merged)
# print(heads,heads2)
heads=heads[2:]
# print(heads,heads2)
heads2.extend(heads)
# print(heads,heads2)
heads=heads2[:]
# print(heads,heads2)
heads2=[]
return heads[0],inv
arr = [1, 20, 6, 4, 5]
lista=Node(1)
lista2=Node(20)
lista3=Node(6)
lista4=Node(4)
lista5=Node(5)
lista.next=lista2
lista2.next=lista3
lista3.next=lista4
lista4.next=lista5
print(lista)
print(mergeSort(lista))
|
wiksat/AlghorithmsAndDataStructures
|
ASD/Exercises/round1/ddd.py
|
ddd.py
|
py
| 1,553 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72531899069
|
# Citations according to https://scicrunch.org/resources
"""
NOTES:
- scicrunch API ONLY recognizes RRIDs from SciCrunch registry of tools (i.e. with prefix "SCR")
- scicrunch web search handles ALL RRIDs (see below example of citations from other)
- scicrunch API does NOT uses 'RRID:' prefix in rrid request parameters
"""
import re
def split_citations(citations: list[str]) -> list[tuple[str, str]]:
def _split(citation: str) -> tuple[str, str]:
if "," not in citation:
citation = citation.replace("(", "(,")
name, rrid = re.match(r"^\((.*),\s*RRID:(.+)\)$", citation).groups()
return name, rrid
return list(map(_split, citations))
# http://antibodyregistry.org/AB_90755
ANTIBODY_CITATIONS = split_citations(["(Millipore Cat# AB1542, RRID:AB_90755)"])
# https://www.addgene.org/44362/
PLAMID_CITATIONS = split_citations(["(RRID:Addgene_44362)"])
# MMRRC,
# catalog https://www.mmrrc.org/catalog/cellLineSDS.php?mmrrc_id=26409
# https://scicrunch.org/resolver/RRID:MMRRC_026409-UCD.json
#
# As of May 2022, changed proper_citation change from
# '(MMRRC Cat# 026409-UCD, RRID:MMRRC_026409-UCD)' to
# 'RRID:MMRRC_026409-UCD'
#
ORGANISM_CITATIONS = split_citations(["(RRID:MMRRC_026409-UCD)"])
# https://web.expasy.org/cellosaurus/CVCL_0033
# As of May 2022, name changed from 'ATCC Cat# HTB-30' to 'AddexBio Cat# C0006007/65'
CELL_LINE_CITATIONS = split_citations(["(AddexBio Cat# C0006007/65, RRID:CVCL_0033)"])
#
# WARNING: Since Sep.2021, the order of the resolved hits list returned by
# https://scicrunch.org/resolver/RRID:CVCL_0033.json changes per call and
# sometimes (BCRJ Cat# 0226, RRID:CVCL_0033) appears as first hit instead
# https://scicrunch.org/resources/Tools/search?q=SCR_018315&l=SCR_018315
TOOL_CITATIONS = split_citations(
[
"(CellProfiler Image Analysis Software, RRID:SCR_007358)",
"(Jupyter Notebook, RRID:SCR_018315)",
"(Python Programming Language, RRID:SCR_008394)",
"(GNU Octave, RRID:SCR_014398)",
"(o²S²PARC, RRID:SCR_018997)",
]
)
NOT_TOOL_CITATIONS = (
ANTIBODY_CITATIONS + PLAMID_CITATIONS + ORGANISM_CITATIONS + CELL_LINE_CITATIONS
)
|
ITISFoundation/osparc-simcore
|
packages/pytest-simcore/src/pytest_simcore/helpers/utils_scrunch_citations.py
|
utils_scrunch_citations.py
|
py
| 2,212 |
python
|
en
|
code
| 35 |
github-code
|
6
|
5042024536
|
import tensorflow as tf
import numpy as np
import os,glob,cv2
import sys,argparse
class Predictor(object):
def __init__(self, graph_path, model_path):
## Let us restore the saved model
sess = tf.Session()
# Step-1: Recreate the network graph. At this step only graph is created.
saver = tf.train.import_meta_graph(graph_path)
# Step-2: Now let's load the weights saved using the restore method.
saver.restore(sess, tf.train.latest_checkpoint(model_path))
# Accessing the default graph which we have restored
graph = tf.get_default_graph()
# Now, let's get hold of the op that we can be processed to get the output.
# In the original network y_pred is the tensor that is the prediction of the network
y_pred = graph.get_tensor_by_name("y_pred:0")
## Let's feed the images to the input placeholders
x= graph.get_tensor_by_name("x:0")
y_true = graph.get_tensor_by_name("y_true:0")
self._sess = sess
self._y_pred = y_pred
self._x = x
self._y_true = y_true
def predictClass(self, frame, num_classes):
height, width, num_channels = frame.shape
images = []
images.append(frame)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
x_batch = images.reshape(1, height, width, num_channels)
y_test_images = np.zeros((1, num_classes))
### Creating the feed_dict that is required to be fed to calculate y_pred
feed_dict_testing = {self._x: x_batch, self._y_true: y_test_images}
result=self._sess.run(self._y_pred, feed_dict=feed_dict_testing)
result = np.argmax(result, axis=1)
# result is of this format [probabiliy_of_rose probability_of_sunflower]
return result[0]
|
roger-cores/hand_gestr_ros
|
scripts/predict.py
|
predict.py
|
py
| 1,843 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72096598268
|
import pygame
import random
import rospy
import math
from geometry_msgs.msg import TwistStamped
from geometry_msgs.msg import PoseStamped
from armf import armtakeoff
rospy.init_node('make_a_circle', anonymous=True)
current_pos = PoseStamped()
def main():
pygame.init()
screen = pygame.display.set_mode((640, 480))
clock = pygame.time.Clock()
x_curser1 = 160
y_curser1 = 240
x_curser2 = 480
y_curser2 = 240
mode= ''
count = 0
radius = 10
screen.fill((0, 0, 0))
color_curser1 = (0,255,0)
color_curser2 = (0,255,0)
pygame.draw.circle(screen, color_curser1, (x_curser1, y_curser1), radius)
pygame.draw.circle(screen, color_curser2, (x_curser2, y_curser2), radius)
arm = armtakeoff()
arm.arm()
arm.takeoff()
while True:
screen.fill((0, 0, 0))
x_curser1 = 160
y_curser1 = 240
x_curser2 = 480
y_curser2 = 240
pressed = pygame.key.get_pressed()
alt_held = pressed[pygame.K_LALT] or pressed[pygame.K_RALT]
ctrl_held = pressed[pygame.K_LCTRL] or pressed[pygame.K_RCTRL]
for event in pygame.event.get():
# determin if X was clicked, or Ctrl+W or Alt+F4 was used
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_w and ctrl_held:
return
if event.key == pygame.K_F4 and alt_held:
return
if event.key == pygame.K_ESCAPE:
return
# determine if a letter key was pressed
if event.key == pygame.K_w:
mode = 'up'
elif event.key == pygame.K_s:
mode = 'down'
elif event.key == pygame.K_d:
mode = 'right'
elif event.key == pygame.K_a:
mode = 'left'
elif event.key == pygame.K_SPACE:
mode = 'jump'
elif event.key == pygame.K_q:
mode = 'yaw'
elif event.key == pygame.K_e:
mode = 'yawri8'
elif event.key == pygame.K_LCTRL:
mode = 'low'
elif event.key == pygame.K_h:
mode = 'hold'
color_curser1 = (255,0,0)
color_curser2 = (255,0,0)
color_curser3 = (0,0,255)
color_curser4 = (0,0,255)
pygame.draw.circle(screen, color_curser1, (x_curser1, y_curser1), radius)
pygame.draw.circle(screen, color_curser2, (x_curser2, y_curser2), radius)
x_curser1,y_curser1,x_curser2,y_curser2 = curserControl(screen,x_curser1,y_curser1,x_curser2,y_curser2,mode,count,color_curser1,color_curser1,radius)
pygame.draw.circle(screen, color_curser3, (x_curser1, y_curser1), radius)
pygame.draw.circle(screen, color_curser4, (x_curser2, y_curser2), radius)
pygame.display.flip()
clock.tick(60)
def curserControl(screen,x_curser1,y_curser1,x_curser2,y_curser2,mode,count,color_curser1,color_curser2,radius):
publish_velocity=rospy.Publisher('/mavros/setpoint_velocity/cmd_vel', TwistStamped,queue_size=20)
vel=TwistStamped()
if mode == 'up':
vel.twist.linear.x= 0.8
vel.twist.linear.z= 0
publish_velocity.publish(vel)
vel.twist.linear.x= 0
y_curser1= y_curser1 -20
pygame.draw.circle(screen, color_curser1, (x_curser1, y_curser1), radius)
print("up")
elif mode == 'down':
vel.twist.linear.x= -0.8
vel.twist.linear.z= 0
publish_velocity.publish(vel)
vel.twist.linear.x= 0
y_curser1= y_curser1 +20
pygame.draw.circle(screen, color_curser1, (x_curser1, y_curser1), radius)
print("down")
elif mode == 'right':
vel.twist.linear.z= 0
vel.twist.linear.y= -0.8
publish_velocity.publish(vel)
vel.twist.linear.y= 0
x_curser1= x_curser1 +20
pygame.draw.circle(screen, color_curser1, (x_curser1, y_curser1), radius)
print("right")
elif mode == 'left':
vel.twist.linear.y= 0.8
vel.twist.linear.z= 0
publish_velocity.publish(vel)
vel.twist.linear.y= 0
x_curser1= x_curser1 -20
pygame.draw.circle(screen, color_curser1, (x_curser1, y_curser1), radius)
print("left")
elif mode == 'jump':
vel.twist.linear.z= 1
publish_velocity.publish(vel)
vel.twist.linear.z= 0
y_curser2= y_curser2 -20
pygame.draw.circle(screen, color_curser2, (x_curser2, y_curser2), radius)
print("jump")
elif mode == 'low':
vel.twist.linear.z= -0.5
publish_velocity.publish(vel)
vel.twist.linear.z= 0
y_curser2= y_curser2 +20
pygame.draw.circle(screen, color_curser2, (x_curser2, y_curser2), radius)
print("low")
elif mode == 'yaw':
vel.twist.angular.z= 0.8
vel.twist.linear.z= 0
publish_velocity.publish(vel)
vel.twist.linear.z= 0
x_curser2= x_curser2 -20
pygame.draw.circle(screen, color_curser2, (x_curser2, y_curser2), radius)
print("yawleft")
elif mode == 'yawri8':
vel.twist.angular.z= -0.8
vel.twist.linear.z= 0
publish_velocity.publish(vel)
vel.twist.linear.z= 0
x_curser2= x_curser2 +20
pygame.draw.circle(screen, color_curser2, (x_curser2, y_curser2), radius)
print("yawri8")
elif mode == 'hold':
vel.twist.angular.z= 0
publish_velocity.publish(vel)
print("hold")
return x_curser1, y_curser1 ,x_curser2, y_curser2
main()
|
DarkcrusherX/indoor_nav
|
src/transmitter.py
|
transmitter.py
|
py
| 5,866 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10623839028
|
import random
from discord import Colour
"""
These are some presets configs, that are predefined
and normally dont need any changes (Thats why they are not in the config file
"""
bottest = True # decides if the bot checks other bots messages
ignorfiles = ['image/gif', 'image/jpeg'] # Content types to ignor. Check out https://en.wikipedia.org/wiki/Media_type
checkorange = 1 # if more or equal than that checks are positive the embed will be orange
checkred = 3 # if more or equal than that checks are positive the embed will be red
helpembedcolour = Colour(random.randint(0, 16777215))
|
veni-vidi-code/VirusTotalDiscordBot
|
Cogs/settings.py
|
settings.py
|
py
| 599 |
python
|
en
|
code
| 3 |
github-code
|
6
|
39348285870
|
from nose.tools import *
import wntr
from os.path import abspath, dirname, join
testdir = dirname(abspath(__file__))
datadir = join(testdir,'..','..','..','examples','networks')
def test_isOpen():
enData = wntr.pyepanet.ENepanet()
enData.inpfile = join(datadir,'Net1.inp')
assert_equal(0, enData.isOpen())
enData.ENopen(enData.inpfile,'tmp.rpt')
assert_equal(1, enData.isOpen())
def test_ENgetcount():
enData = wntr.pyepanet.ENepanet()
enData.inpfile = join(datadir,'Net1.inp')
enData.ENopen(enData.inpfile,'tmp.rpt')
nNodes = enData.ENgetcount(wntr.pyepanet.EN_NODECOUNT)
assert_equal(11, nNodes)
nLinks = enData.ENgetcount(wntr.pyepanet.EN_LINKCOUNT)
assert_equal(13, nLinks)
|
stephenfrechette/WNTR-test
|
wntr/pyepanet/tests/test_epanet2.py
|
test_epanet2.py
|
py
| 734 |
python
|
en
|
code
| 3 |
github-code
|
6
|
4452602501
|
import os
import sys
import mock
import unittest
import pkg_resources
from pybkick.kick import kick, main as kick_main, MissingSourceCode
from pybkick.pyboard import Pyboard
class TestKick(unittest.TestCase):
"""Test that we can kick code over to the PyBoard
"""
def testBasicKick(self):
test_data_path = pkg_resources.resource_filename(__name__, 'test_data')
kick(
port='/dev/ttyACM0',
src=test_data_path,
entry_point=None
)
def testKickFromCommandLine(self):
test_data_path = pkg_resources.resource_filename(__name__, 'test_data')
fake_argv = [sys.argv[0], '--src=%s' % test_data_path, '--dst=tmp']
with mock.patch('sys.argv', fake_argv):
fake_kick = mock.Mock()
with mock.patch('pybkick.kick', fake_kick):
kick_main()
fake_kick.assert_called_once()
def testKickMissingDirectory(self):
missing_test_data_path = os.path.join(pkg_resources.resource_filename(__name__, 'test_data'), 'missing')
with self.assertRaises(MissingSourceCode):
kick(
port='/dev/ttyACM0',
src=missing_test_data_path,
dst='tmp',
entry_point=None
)
def testKickTestData(self):
test_dir = pkg_resources.resource_filename(__name__, 'test_data')
port = '/dev/ttyACM0'
kick(port=port,
src=test_dir
)
pb = Pyboard(port)
with pb.raw_repl():
for filename in ['a.txt', 'b.txt']:
self.assertTrue(pb.file_exists(filename))
pb.rm(filename)
self.assertFalse(pb.file_exists(filename))
|
salimfadhley/pybkick
|
src/pybkick_tests/test_kick.py
|
test_kick.py
|
py
| 1,878 |
python
|
en
|
code
| 5 |
github-code
|
6
|
40333388008
|
import pytumblr
class Tumblr:
def __init__(
self,
consumer_key: str,
consumer_secret: str,
oauth_token: str,
oauth_secret: str,
blog_name: str,
):
self.client = pytumblr.TumblrRestClient(
consumer_key, consumer_secret, oauth_token, oauth_secret
)
self.blog_name = blog_name
def post_photo(self, photo: str, description: str, tags: list):
self.client.create_photo(
self.blog_name, state="published", tags=tags, tweet=description, data=photo
)
|
fabiolab/photobox
|
fabiotobox/tumblr.py
|
tumblr.py
|
py
| 569 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36827766673
|
from layers.domain_layer.repositories import AccountRepository
from layers.domain_layer.repositories import TokenRepository
from libs.cutoms.singleton import Singleton
class AuthSystem(object):
__metaclass__ = Singleton
def token_to_user_id(self, access_token):
account_id = self.token_to_account_username(access_token)
return AccountRepository().get(account_id).user_id
def token_to_account_username(self, access_token):
return TokenRepository().get(access_token).account_username
|
siliu3/c-SewpPocket
|
layers/use_case_layer/systems/auth_system.py
|
auth_system.py
|
py
| 522 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8214750417
|
from flask import request
from flask_restx import Resource, Namespace, abort
from marshmallow import ValidationError
from implemented import user_service
from tools.jwt_token import JwtSchema, JwtToken
from views.users import LoginValidator
auth_ns = Namespace('auth')
@auth_ns.route('/')
class AuthView(Resource):
def post(self):
try:
data = LoginValidator().load(request.json)
user = user_service.get_by_name(data["username"])
if not user:
abort(404)
token_data = JwtSchema().load({"user_id": user.id, "role": user.role})
return JwtToken(token_data).get_tokens(), 201
except ValidationError:
abort(400)
def put(self):
try:
refresh_token = request.json["refresh_token"]
data = JwtToken.decode_token(refresh_token)
data.pop("exp", None)
token_data = JwtSchema().load(data)
user = user_service.get_one(token_data["user_id"])
if not user:
abort(404)
token_data = JwtSchema().load({"user_id": user.id, "role": user.role})
return JwtToken(token_data).get_tokens(), 201
except Exception as e:
abort(400)
|
Mariyatm/-lesson19_project_hard_source
|
views/auth.py
|
auth.py
|
py
| 1,256 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6323653516
|
import asyncio
from typing import *
from urllib.parse import urlencode
from datetime import datetime
from pprint import pformat as pf
import logging
logging.basicConfig(format='%(asctime)s %(message)s')
import jikanpy
from enum import Enum
from copy import copy, deepcopy
from pprint import pprint
import traceback
import aiohttp
import dotenv
import asyncio
from fuzzywuzzy import fuzz
from expiring_dict import ExpiringDict
from core import getLogger, stopwatch
log = getLogger(__name__)
class MALRatings(Enum):
g = "G - All Ages"
pg = "PG - Children"
pg_13 = "PG-13 - Teens 13 or older"
r = "R - 17+ (violence & profanity) "
r_plus = "R+ - Mild Nudity 17+"
rx = "Rx - Hentai 18+"
class MALTypes(Enum):
ANIME = 1
MANGA = 2
class MyAnimeListAIOClient:
"""Wrapper for MyAnimeList API Endpoint"""
client_id: str = ""
TTL = 60*60
response_cache = ExpiringDict(ttl=TTL)
def __init__(
self,
client_id: str = None,
):
"""A wrapper for the Non-user based mal api endpoints (-> no oauth needed)"""
self.log = logging.getLogger(__name__)
self.log.setLevel(logging.INFO)
self._id = client_id or self.client_id or dotenv.dotenv_values()["ID"]
if not self._id and not self.client_id:
raise RuntimeError(
"Client id has to be passed into the constructor or in the .env file under key `ID`. Consider calling `set_credentails`"
)
self._base_url = r"https://api.myanimelist.net/v2"
self._session = None
@classmethod
def set_credentials(cls, client_id: str):
""""set the client id"""
cls.client_id = client_id
@property
def session(self) -> aiohttp.ClientSession:
"""Get AioHTTP session by creating it if it doesn't already exist"""
if not self._session or self._session.closed:
self._session = aiohttp.ClientSession()
return self._session
async def _make_request(
self,
endpoint: str,
value: Optional[str] = None,
optional_query: Dict[str, str] = None,
) -> Dict[str, Any]:
query = None
if value and not value.startswith("/"):
value = "/" + value
if optional_query:
query = f"?{urlencode(optional_query)}"
url = f"{self._base_url}/{endpoint}{value or ''}{query or ''}"
async with self.session.get(url, headers=self.headers) as resp:
json = await resp.json(encoding="utf-8")
await self._session.close()
self.log.debug(f"request: {url}")
self.log.debug(f"response: {pf(json)}")
if not resp.ok:
raise RuntimeError(f"{url} returned status code {resp.status}")
return json
@property
def headers(self) -> Dict[str, str]:
if not self._id:
raise RuntimeError("Client id has to be passed into the constructor or in the .env file under key `ID`")
return {"X-MAL-CLIENT-ID": self._id}
async def fetch_anime(
self,
id: int
) -> Dict[str, Any]:
"""fetch an Anime by it's ID
Args:
-----
id : int
the mal ID of that anime
"""
fields = (
"id,title,main_picture,alternative_titles,"
"start_date,end_date,synopsis,mean,rank,popularity,"
"num_list_users,num_scoring_users,nsfw,created_at,"
"updated_at,media_type,status,genres,my_list_status,"
"num_episodes,start_season,broadcast,source,"
"average_episode_duration,rating,pictures,background,"
"related_anime,related_manga,recommendations,studios,statistics,"
"average_episode_duration,opening_themes,ending_themes"
)
resp = await self._make_request(
endpoint="anime",
value=str(id),
optional_query={"fields": fields}
)
return resp
async def _search(self):
pass
async def search_anime(self, query: str, include_nsfw=True, fallback: bool = False) -> Dict[str, Any]:
"""search for anime by name
Args:
-----
query : str
the query to search for
include_nsfw : bool
whether to include nsfw results
fallback : bool
whether or not to limit the query to 50 chars
Returns:
--------
Dict[str, Any]
the response json
"""
try:
resp = self.response_cache[query]
return deepcopy(resp)
except KeyError:
pass
fields = (
"id,title,main_picture,alternative_titles,"
"start_date,end_date,synopsis,mean,rank,popularity,"
"num_list_users,num_scoring_users,nsfw,created_at,"
"updated_at,media_type,status,genres,my_list_status,"
"num_episodes,start_season,broadcast,source,"
"average_episode_duration,rating,pictures,background,"
"related_anime,related_manga,recommendations,studios,statistics,"
"average_episode_duration,opening_themes,ending_themes"
)
a = datetime.now()
kwargs = {"nsfw": "true" if include_nsfw else "false"}
try:
resp = await self._make_request(
endpoint="anime",
optional_query={
"q": query,
"fields":fields,
"limit":"50",
**kwargs
})
except RuntimeError as e:
if fallback:
log.warning(f"Error while fetching anime - title len = {len(query)}")
log.warning(traceback.format_exc())
return None
else:
log.warning(f"fallback search for title {query}")
return await self.search_anime(query[:50], include_nsfw, True)
log.info(f"fetched {len(resp['data'])} anime in {(datetime.now() - a).total_seconds():.2f}s")
self.response_cache.ttl(query, deepcopy(resp), self.TTL)
return deepcopy(resp)
|
zp33dy/inu
|
inu/utils/rest/my_anime_list.py
|
my_anime_list.py
|
py
| 6,129 |
python
|
en
|
code
| 1 |
github-code
|
6
|
16719892891
|
import MSGrid
import time
def printgrid(arr):
print(" ",end="")
index = len(arr[0])
for x in range(index):
print(str(x)+" ", end="")
print()
index = 0
for i in arr:
print(index,end="")
index+=1
for j in i:
print("|"+str(j), end="")
print("|\n")
while(1):
try:
n = int(input("How tall should the grid be? >> "))
m = int(input("How wide should the grid be? >> "))
if(n<=0 or m<=0): raise IndexError
break
except IndexError:
print("must be positive.")
except ValueError:
print("must be valid integers.")
numMines = int((n*m)/6)
grid = MSGrid.gridInitialize(n, m, numMines)
remainingBlocks = n*m
mask = MSGrid.maskInitialize(n, m)
def sweep(y, x):
if( y < 0 or x < 0 or y >= n or x >= m):
return 0
if mask[y][x] != ' ':
return 0
if(grid[y][x] == 9):
print("BOOM! YOU DIED")
printgrid(grid)
input("exit >>")
exit(0)
mask[y][x] = grid[y][x]
if(grid[y][x] > 0):
return 1
retval = 1
for horz1 in [-1, 0, 1]:
retval += sweep(y-1, x+horz1)
for horz2 in [-1, 1]:
retval += sweep(y, x+horz2)
for horz3 in [-1, 0, 1]:
retval += sweep(y+1, x+horz3)
return retval
print("top left corner is 0,0. Enter coordinates to check for mines.")
start = time.time()
while(1):
print("time taken so far:", time.time()-start)
try:
printgrid(mask)
x = int(input("Enter an x coordinate >> "))
y = int(input("\nEnter a y coordinate >> "))
if x >= m or y >= n: raise IndexError
flagFlag = input("type F to toggle a flag or anything else to sweep the block: >>")
if(flagFlag == "F" or flagFlag == "f"):
if type(mask[y][x]) is not int:
if mask[y][x] == 'F':
mask[y][x] = ' '
print("flag toggled off.")
else:
mask[y][x] = 'F'
print("flag toggled on.")
continue
except IndexError:
print("Input index too large. Try again.\n")
continue
except ValueError:
print("Input must be a valid integer. Try again.\n")
continue
remainingBlocks -= sweep(y, x)
if(remainingBlocks == numMines):
print("all mines found!")
end = time.time()
timeTaken = int(end - start)
print("total time taken:", timeTaken,"seconds")
name = input("Enter your name to be entered into the scoreboard. >>")
f = open("scores.txt", "a")
f.write(name + ","+str(x)+"x"+str(y)+": "+str(timeTaken)+" seconds\n")
f = open("scores.txt", "r")
print("scores:")
for line in f: print(line, end="")
exit(0)
|
lazarchitect/Minesweeper
|
MSDriver.py
|
MSDriver.py
|
py
| 2,392 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13523037639
|
from flask import Flask
import os.path
app = Flask(__name__)
@app.route("/")
def hello():
if os.path.exists('/volume/test'):
return "Hello from pvc!"
return "Hello World!"
if __name__ == "__main__":
app.run()
|
prgcont/workshop-OKD
|
cz/lekce-2/demoapp/app.py
|
app.py
|
py
| 236 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34382591529
|
from django.contrib import admin
app_name = 'admin'
# Register your models here.
#当前目录下models
from myapp.models import Grades, Students
#创建班级的时候同时创建2个学生
class StudentInfo(admin.TabularInline):
model = Students
extra = 2
@admin.register(Grades)
class GradesAdmin(admin.ModelAdmin):
inlines = [StudentInfo,]
list_display = ('pk', 'gname', 'gdate','ggirlnum','gboynum','isDelete',)
list_filter = ('gname',)
search_fields = ('gname',)
list_per_page = 5
#添加修改页属性
fields = ('ggirlnum', 'gboynum', 'gname', 'gdate',)
#fieldsets = []
@admin.register(Students)
class StudentsAdmin(admin.ModelAdmin):
def gender(self):
if self.sgender:
return '男'
else:
return '女'
#列名
gender.short_description = '性别'
list_display = ('pk', 'sname', 'sage',gender,'scontend', 'sgrade', 'isDelete',)
list_per_page = 5
#actions_on_top = Flase
#admin.site.register(Grades, GradesAdmin)
#admin.site.register(Students,StudentsAdmin)
|
pyslin/project01
|
myapp/admin.py
|
admin.py
|
py
| 1,071 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11152523627
|
from simple.PIL import Image
image = Image.open('strawberries.png')
for pixel in image:
avg = (pixel.red + pixel.green + pixel.blue) / 3
if pixel.red < pixel.blue + 40:
pixel.red = avg
pixel.green = avg
pixel.blue = avg
if pixel.red < pixel.green + 40:
pixel.red = avg
pixel.green = avg
pixel.blue = avg
image.save('output.png')
|
groklearning/simple-packages
|
examples/PIL/colour_splash.py
|
colour_splash.py
|
py
| 393 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70263663549
|
import random
import math
num_teams = 32
country_list = {"england" : ["English", 4], "france" : ["French", 4],
"spain" : ["Spanish", 4], "germany" : ["German", 4],
"italy" : ["Italian", 4], "portugal" : ["Portuguese", 3],
"russia" : ["Russian", 2], "dutch" : ["Dutch", 1],
"turkey" : ["Turkish", 1], "scotland" : ["Scottish", 1],
"greece" : ["Greek", 1], "poland" : ["Polish", 1],
"belgium" : ["Belgian", 1]}
group_and_teams = {"GroupA" : {}, "groupB" : {}, "groupC" : {}, "groupD" : {},
"groupE" : {}, "groupF" : {}, "groupG" : {}, "groupH" : {}}
print("\tUEFA Champions League Draw Simulator\n")
print("Enter teams to make your very own UEFA Champions League.")
input("Press the enter key to begin.")
x = 0
for country in country_list:
x += country_list[country][1]
x = math.ceil(x / len(group_and_teams))
list_teams = {}
for country in country_list:
for x in range(country_list[country][1]):
list_teams[input("Please enter an {} team: ".format(country_list[country][0]))] = country
def find_open_slot(new_team):
good = True
group_chosen = False
rand = random.sample(list(group_and_teams), 1)[0]
while group_chosen == False:
if len(group_and_teams[rand]) > 0:
for listed_team in group_and_teams[rand]:
if new_team == listed_team or list_teams[new_team][1] == group_and_teams[rand][listed_team]:
good = False
else:
group_chosen = True
return rand
if good == False:
rand = random.sample(list(group_and_teams), 1)[0]
else:
group_chosen = True
return rand
for team in list_teams:
group = find_open_slot(team)
teams = group_and_teams[group]
teams[team] = list_teams[team]
print("\nAssembling groups...\n")
input("Groups complete. Press the enter key to view.")
for group in group_and_teams:
for teams in group_and_teams[group]:
print("{}: team: {}, Country: {}".format(group,teams, group_and_teams[group][teams]))
input("\n\nPress the enter key to exit.")
|
fabriziocominetti/practice
|
Python/ChampionsLeague_draw-simulator/drawSimulator-ChampionsLeague2.py
|
drawSimulator-ChampionsLeague2.py
|
py
| 2,172 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25225882706
|
"""If a number is equal to the sum of its factors,
the number is called the perfect number, for example, 6, because 6=1+2+3.
Program and print all the perfect numbers within 1000."""
for i in range(1, 1001):
s = 0
for j in range(1, i):
if i % j == 0:
s += j
if s == i:
print('\n', i, " ", end="")
print("it's factors are ", end="")
for j in range(1,i):
if i % j == 0:
print(j, end = " ")
|
HawkingLaugh/Data-Processing-Using-Python
|
Week1/Exerciese/7. Perfect number.py
|
7. Perfect number.py
|
py
| 474 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41563404071
|
import socket
import threading
bind_ip = '0.0.0.0'
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip,bind_port))
server.listen(5)
print("[*] Listening on %s: %d" % (bind_ip,bind_port))
def handle_client(client_socket):
#exibe o que o cliente enviar
request = client_socket.recv(1024)
resposta = request.decode()
print('[*] Received: %s ' % resposta)
mensagem = "ACK!"
msgByte = mensagem.encode()
#envia um pacote de volta
client_socket.send(msgByte)
client_socket.close()
while True:
client, addr = server.accept()
print("[*] Accepted connection from %s:%d " % (addr[0],addr[1]))
cliente_handler = threading.Thread(target=handle_client, args=(client,))
cliente_handler.start()
|
jamalghatit/EstudoBlackHatPython
|
ServidorTCP.py
|
ServidorTCP.py
|
py
| 791 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33689496961
|
from string import ascii_lowercase
from behave import given, then
from toolium.utils.dataset import map_param
from common.utils import assert_arrays_equal, payload_to_table_format, replace_param
@given("there is {chore_types:d} chore type")
@given("there are {chore_types:d} chore types")
def step_create_chore_types(context, chore_types):
for i in range(chore_types):
raw_data = {
"id": f"ct-{ascii_lowercase[i]}",
"name": f"ct-{ascii_lowercase[i]}",
"description": f"description{i+1}",
}
context.execute_steps(
f"""
Given I use the admin API key
When I send a request to the Api resource "createChoreType" with body params
{payload_to_table_format(raw_data)}
Then the response status code is "200"
And I clear the token
"""
)
@then('the response contains the chores "{ids}"')
def step_response_contains_chores(context, ids):
ids = replace_param(ids)
if not ids:
ids = []
elif isinstance(ids, str):
ids = list(map(int, ids.replace(" ", "").split(",")))
elif isinstance(ids, int):
ids = [ids]
original = map_param("[CONF:examples.simple_chore_types]")
res_json = context.res.json()
for field_name in ("completed_at", "created_at"):
for item in res_json:
del item[field_name]
expected = [original[x - 1] for x in ids]
assert_arrays_equal(expected, res_json)
|
sralloza/chore-management-api
|
test/steps/aliases/chore_types.py
|
chore_types.py
|
py
| 1,503 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9582892535
|
import os
from pathlib import Path
def get_last_n_files_in_dir(dir_path, n, recurse=False, *args, **kwargs):
method_str = "rglob" if recurse else "glob"
p = Path(dir_path)
fluid_glob = getattr(p, method_str)
l = [(i, i.stat().st_mtime) for i in fluid_glob("*.*")]
l.sort(key=lambda x: x[0], **kwargs)
l_ = l[:n]
return [i[0] for i in l_]
def delete_last_n_files_in_dir(dir_path, *args, **kwargs):
fpaths = get_last_n_files_in_dir(dir_path, *args, **kwargs)
for p in fpaths:
os.remove(p)
|
royassis/djangoRestML
|
myapi/helpers.py
|
helpers.py
|
py
| 536 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.