seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
37842291992
|
# ----------------------------------------------------------------------------------------------------------------------
# Implementation of k-Means Machine learning algorithm, tested using synthetic data created in script
#
# Sean Taylor Thomas
# 9/2021
# [email protected]
# ----------------------------------------------------------------------------------------------------------------------
import math
import random
import sys
import matplotlib.pyplot as plt
random.seed(1)
# Generating Random dataset, dataset
dataset = []
dimensions = 2
num_elements = 1000
for x in range(num_elements):
rand1 = random.randint(0, 250)
rand2 = random.randint(0, 250)
if not rand2 == rand1 * 2 + 45: # none on this line.. hmm
dataset.append([rand1, rand2])
def compute_centroid(element, centroids):
""" return the index of the closest centroid to given element"""
which_centroid = 0
min_dist = sys.maxsize
for centroid in centroids:
dist = 0 # temp dist
for i in range(dimensions):
dist += (element[i] - centroid[i]) ** 2
if dist < min_dist: # new min distance
which_centroid = centroids.index(centroid) # index of closest centroid
min_dist = dist
return which_centroid # returns index of closest centroid
def compute_cluster_mean(cluster):
"""computes literal average of given cluster"""
mean_element = list(cluster[0])
for dim in range(dimensions):
for element in cluster:
mean_element[dim] += element[dim] # Sum of elements' "dim" dimension
# Computing Average for each dimension (dividing by num elements)
mean_element[dim] /= len(cluster)
return mean_element # return average
max_iterations = 200
# Choosing initial centroids from dataset at random
k = 5
centroids = []
centroids = random.choices(dataset, k=5)
iterations = 0 # num iterations of loop
isSame = 0 # boolean testing if previous clusters are the same as current
while iterations < max_iterations and not isSame:
iterations += 1
# Initializing List, named clusters, to hold and separate k clusters
clusters = []
iterator = 0
for x in range(k):
clusters.append(list()) # List representing each of k clusters
iterator += 1
# Calculate distance from each element in dataset to each cluster seed
# And choose which of k clusters is closest to this element
for element in dataset:
closest_centroid_index = compute_centroid(element, centroids) # index of centroid closest to element
clusters[closest_centroid_index].append(element) # grouping each point into a cluster
same_centroids = 0 # variable to check if all clusters change
# Finding new centroid for each cluster, k-means
for cluster_k in clusters:
average_of_cluster = compute_cluster_mean(cluster_k) # literal average, not necessarily an element in cluster
new_centroid = cluster_k[compute_centroid(average_of_cluster, cluster_k)] # find new centroid
# add one for each centroid that hasn't change;
if new_centroid == centroids[clusters.index(cluster_k)]:
same_centroids += 1
centroids[clusters.index(cluster_k)] = new_centroid
if same_centroids == k:
isSame = 1
# Plotting elements as clusters (stars) -- 11 different clusters supported
clr = ["blue", "red", "green", "purple", "orange", "black", "brown", "cyan", "white", "yellow", "magenta"]
color_indx = 0
for cluster in clusters:
x = []
y = []
for i in cluster:
x.append(i[0])
y.append(i[1])
plt.scatter(x, y, label="Cluster " + str(color_indx), color=clr[color_indx%11], marker="*",
s=30)
color_indx += 1
# Plotting the Centroids (Large Stars)
color_indx = 0
for centroid in centroids:
x = []
y = []
x.append(centroid[0])
y.append(centroid[1])
plt.scatter(x, y, label="Centroid " + str(color_indx), color=clr[color_indx%11], marker="*",
s=450)
color_indx += 1
plt.ylabel('y-axis')
plt.title("K-Means Clustering")
plt.legend()
plt.show()
# calculating WCSS
total_cluster_sum =0
for cluster_k in range(len(clusters)):
WCSS = 0
for element in clusters[cluster_k]:
for dim in range(dimensions):
WCSS += abs(element[dim] - centroids[cluster_k][dim]) ** 2
total_cluster_sum += WCSS
print("Average WCSS:", total_cluster_sum/k)
print("Number of Iterations: ", iterations)
# Plotting elements as clusters (stars) -- 11 different clusters supported
clr = ["blue", "red", "green", "purple", "orange", "black", "brown", "cyan","white","yellow","magenta"]
color_indx = 0
for cluster in clusters:
x = []
y = []
for i in cluster:
x.append(i[0])
y.append(i[1])
plt.scatter(x, y,label="Cluster "+str(color_indx), color=clr[color_indx%11], marker="*",
s=30)
color_indx += 1
# Plotting the Centroids (Large Stars)
color_indx=0
for centroid in centroids:
x = []
y = []
x.append(centroid[0])
y.append(centroid[1])
plt.scatter(x, y, label="Centroid "+str(color_indx), color=clr[color_indx%11], marker="*",
s=450)
color_indx += 1
plt.ylabel('y-axis')
plt.title("K-Means Clustering")
plt.legend()
plt.show()
|
STaylorT/Machine-Learning
|
K-Means.py
|
K-Means.py
|
py
| 5,400 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33386199461
|
"""
File input and output functions
"""
import ujson as json
from dev_funcs import printline, Recorded_Time
from comms import Appointment
#class to store data imported from local json config file
class FileIO:
def __init__(self):
#build all of the variables from data.json file
self.load_local_vars()
#print the device data after import
self.print_dev_data()
def load_local_vars(self):
#read in unparsed json data
unparsed_data = self.read_in_file()
#parse json data into dict objects
pdata = json.loads(unparsed_data)
#assign parsed json data to local variables
self.dev_id = pdata["device_info"]["dev_id"]
self.server_pass = pdata["device_info"]["server_pass"]
self.firm_version = pdata["device_info"]["firm_version"]
self.wifi_networks = pdata["wifi_params"]
self.appointments = pdata["appointments"]
self.last_known_time = pdata["device_info"]["last_known_time"]
self.quiet_hours = pdata["device_info"]["quiet_hours"]
#function to print basic device info
def print_dev_data(self):
#construct a string with all the device info to be displayed
ts = "Device " + str(self.dev_id) + " | Firmware version: " + \
str(self.firm_version)
#print constructed string
print(ts)
#function to update time in json file with current time
#takes a Recorded_Time instance (preferred) or a string (not as good)
#no formatting, if time is rewritten incorrectly it could cause a failure
def update_last_known_time(self, current_time):
#make new string to store the new time
#new_time = ""
#check if current_time is a Recorded_Time object
if isinstance(current_time, Recorded_Time):
#get the time as a datetime formatted string
new_time = current_time.get_datetime_string()
else:
#otherwise write new_time with current_time object or string
#this is where failure could happen, use cautiously
new_time = current_time
#read in data from file
read_in_data = json.loads(self.read_in_file())
#rewrite last_known_time
read_in_data["device_info"]["last_known_time"] = new_time
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
def update_quiet_hours(self, start=None, end=None):
#define new quiet hours json
quiet_hours = {
"start_time": start,
"end_time": end
}
#read in data from file
read_in_data = json.loads(self.read_in_file())
#rewrite old unmodified quiet hours entry (preserves all data)
read_in_data["device_info"]["quiet_hours"] = quiet_hours
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
#function takes an Appointment object and adds appointment to appointments object
def add_appointment(self, new_appt):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the appointment data
appointments = read_in_data["appointments"]
#create new JSON of new appt to add
appt_to_add = {
"appointment_id": int(new_appt.appointment_id),
"appointment_date_time": new_appt.appointment_date_time,
"answers" : [],
"cancelled" : False
}
#append new appointment onto appointment JSON obj
appointments.append(appt_to_add)
#rewrite old unmodified appointment entry (preserves all data)
read_in_data["appointments"] = appointments
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
#function to remove an appointment from the json file
#takes an appointment id as an arg, does not return anything
def remove_appointment(self, appointment_id):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the appointment data
appointments = read_in_data["appointments"]
#make empty dict of appointments that can be filled by loop
remaining_appts = []
#search through appointments for matching id
for appt in appointments:
if appt["appointment_id"] != appointment_id:
remaining_appts.append(appt)
#rewrite old unmodified appointment entry (preserves all other data)
read_in_data["appointments"] = remaining_appts
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
#function to get appoint data stored in data.json
#returns None (if no appts) or an array of Appointment objects
def get_appointments(self, appt_id=None):
if appt_id:
for appt in self.appointments:
if appt["appointment_id"] == appt_id:
return Appointment(appt["appointment_id"],appt["answers"],appt["appointment_date_time"], appt["cancelled"])
return None
else:
#create new array for resulting objects
appts_arr = []
#go through appointments json
for appt in self.appointments:
#create new appointment with json data
new_appt = Appointment(appt["appointment_id"],appt["answers"],appt["appointment_date_time"], appt["cancelled"])
#add newly created Appointment obj to list to return
appts_arr.append(new_appt)
#return the array
return appts_arr
def get_cancelled_appointments(self):
#create new array for resulting objects
appts_arr = []
#go through appointments json
for appt in self.appointments:
if appt.cancelled:
#create new appointment with json data
new_appt = Appointment(appt["appointment_id"],appt["answers"],appt["appointment_date_time"], appt["cancelled"])
#add newly created Appointment obj to list to return
appts_arr.append(new_appt)
#return the array
return appts_arr
def get_unsent_appointment_answers(self):
appts_arr = []
#go through appointments json
for appt in self.appointments:
for answer in appt["answers"]:
if answer["sent"] == False:
#create new appointment with json data
new_appt = Appointment(appt["appointment_id"],appt["answers"],appt["appointment_date_time"], appt["cancelled"])
highest_answer = 0
for i in appt["answers"]:
if i["number"] > highest_answer:
highest_answer = i["number"]
#add newly created Appointment obj to list to return
appts_arr.append([new_appt,highest_answer])
#return the array
return appts_arr
#function adds an appointment answer to the specified appt
#takes an appt id (int), an answer (True,False,None), and a Recorded_Time object
def new_appointment_answer(self, appointment_id, answer, currtime, answer_number):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the appointment data
appointments = read_in_data["appointments"]
#search through appointments for matching id
for appt in appointments:
if appt["appointment_id"] == appointment_id:
currtime.update_time()
new_answer = {
"answer": answer,
"time_answered": currtime.get_datetime_string(),
"number": answer_number,
"sent": False
}
appt["answers"].append(new_answer)
#rewrite old unmodified appointment entry (preserves all other data)
read_in_data["appointments"] = appointments
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
def cancel_appointment(self, appointment_id):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the appointment data
appointments = read_in_data["appointments"]
#search through appointments for matching id
for appt in appointments:
if appt["appointment_id"] == appointment_id:
appt["cancelled"] = True
#rewrite old unmodified appointment entry (preserves all other data)
read_in_data["appointments"] = appointments
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
def remove_appointment_answer(self, appointment_id):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the appointment data
appointments = read_in_data["appointments"]
#search through appointments for matching id
for appt in appointments:
if appt["appointment_id"] == appointment_id:
appt["answers"] = []
#rewrite old unmodified appointment entry (preserves all other data)
read_in_data["appointments"] = appointments
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
#updates answer status (change sent status from false to true)
def update_appointment_answer_status(self, appointment_id, status, number):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the appointment data
appointments = read_in_data["appointments"]
#search through appointments for matching id
for appt in appointments:
if appt["appointment_id"] == appointment_id:
for answer in appt["answers"]:
if number == answer["number"]:
answer["sent"] = status
#rewrite old unmodified appointment entry (preserves all other data)
read_in_data["appointments"] = appointments
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
#function takes an ssid, password, adds wifi network to wifi params
def add_wifi_network(self, ssid, password):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the "wifi_params" section of json data
wifi_networks = read_in_data["wifi_params"]
#create new JSON of new wifi network to add
network_to_add ={
"ssid": ssid,
"password" : password
}
#append new network onto wifi_networks JSON obj
wifi_networks.append(network_to_add)
#rewrite old unmodified wifi_params entry (preserves all other data)
read_in_data["wifi_params"] = wifi_networks
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
#function to remove a wifi network entry from the json file
#takes a wifi ssid an arg, does not return anything
def remove_wifi_network(self, ssid):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the "wifi_params" section of json data
wifi_networks = read_in_data["wifi_params"]
#make empty dict of remaining networks that can be filled by loop
remaining_networks = []
#search through wifi_networks for matching ssid
for wifi_network in wifi_networks:
if wifi_network["ssid"] != ssid:
remaining_networks.append(wifi_network)
#rewrite old unmodified appointment entry (preserves all data)
read_in_data["wifi_params"] = remaining_networks
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
#function reads in data.json file and returns unmodified string
def read_in_file(self):
#create file object pointing to json config file
loc_file = open('data.json', 'r')
#read in unparsed json data, close file
unparsed_data = loc_file.read()
loc_file.close()
#return resulting unparsed data
return unparsed_data
#function to rewrite json file
#WILL OVERWRITE ALL JSON DATA, READ DATA, MODIFY, THEN WRITE
def write_to_file(self, new_file_text):
#create file object pointing to json config file
loc_file = open('data.json', 'w')
#write data to file
loc_file.write(new_file_text)
#close file
loc_file.close()
|
TotalJTM/DoccoLink-Device-Firmware-V1
|
file_funcs.py
|
file_funcs.py
|
py
| 11,601 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72489693947
|
"""
The smallest square (or matrix) large enough to contain the given coordinates has size `x + y +1`.
The biggest number in a matrix of size N, with given rules is `n + n-1 + n-2 + ... + 1`.
Given the biggest number, we can just subtract y to "move" to the correct id.
"""
def solution(x, y):
matrix_size = x + y - 1
greatest_in_matrix = sum(range(1, matrix_size+1))
id = greatest_in_matrix - y + 1
return str(id)
|
curzel-it/foobar
|
2.2 Bunny Worker Locations/main.py
|
main.py
|
py
| 432 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24293870265
|
def count_substring(string, sub_string):
sublen = len(sub_string)
count = 0
for i in range(len(string)-sublen+1):
temp = string[i:i+sublen]
if temp == sub_string:
count += 1
return count
print(count_substring("ABCDCDCD", "CD"))
|
Tanmoy0077/Python-Experiments
|
Count_substr.py
|
Count_substr.py
|
py
| 272 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11323411187
|
import random
import sys
from pyfiglet import Figlet
import requests
import json
import os
from dotenv import load_dotenv
# Setting up TMDB API Key
load_dotenv()
API_KEY = os.getenv('TMDB_API_KEY')
# Retrieve top rated movies in TheMovieDB
pages = {'results': []}
for i in range(5):
page = requests.get(f'https://api.themoviedb.org/3/movie/top_rated?api_key={API_KEY}&language=en-US&page={i+1}').json()
pages['results'].extend(page['results'])
# Create a list that will contain the names of the movies to be guessed by the player
list_of_movies = []
for result in pages['results']:
if result['original_language'] == 'en' and len(result['title']) < 40:
list_of_movies.append(result['title'].strip())
# Setting up header font
figlet = Figlet()
fonts = figlet.getFonts()
figlet.setFont(font='ogre')
def main():
print(figlet.renderText('Welcome to\n Movie\n Hangman!'))
while True:
user_input = input('Press s to start a new game or e to exit: ').strip()
try:
start = start_new_game(user_input)
except ValueError:
print('Invalid input')
continue
else:
if start:
movie_to_guess = get_movie(list_of_movies)
game(movie_to_guess)
else:
sys.exit()
# Checks user input on the main screen to start a new game, exit the program or ask for input again if it was not valid
def start_new_game(play):
if play.lower() == "s":
print("Good luck!")
return True
elif play.lower() == "e":
print("Ok. Goodbye!")
return False
else:
raise ValueError('Invalid input')
# Selects a random movie from the list if available movies
def get_movie(list_of_movies):
return random.choice(list_of_movies)
# Returns a list containing a '_' for each letter in the movie to guess
def hide_movie(movie):
hidden_movie = ['_' if letter.isalpha() else letter for letter in movie]
return hidden_movie
# Starts up a game of Hangman.
def game(title):
hidden_movie = hide_movie(title) # a list containing a '_' for each letter in the movie to guess
movie = title # name of the movie to be guessed as a string
number_of_guesses = 8 # number of tries that the player has left.
print(f'Your movie contains {hidden_movie.count("_")} letters.')
print(' '.join(hidden_movie))
# The following block will run while the player has guesses left. It will be interrupted if the player
# guesses the correct word before running out of guesses.
while number_of_guesses > 0:
# As long as there are any '_' remaining in hidden_movie , the player will be asked to make a guess.
if '_' in hidden_movie:
print(f"You have {number_of_guesses} {'guess' if number_of_guesses == 1 else 'guesses'} left")
user_guess = input('Enter a letter:').lower().strip()
result = play_round(user_guess, movie, hidden_movie)
if result is None:
print(' '.join(hidden_movie))
continue
elif result:
# If the player's guess was correct, any '_' in hidden_movie will be replaced with the correct letter
indices = [i for i, x in enumerate(movie) if x.lower() == user_guess]
for index in indices:
hidden_movie[index] = movie[index]
print(' '.join(hidden_movie))
else:
number_of_guesses -= 1
print(' '.join(hidden_movie))
# If there aren't any '_' left in hidden_movie it means that all the letters have been
# discovered and the player has won.
else:
print('You win!')
break
# If the player doesn't have any guesses left, a message including the correct word is shown.
if number_of_guesses == 0:
print(f"You Lose! The movie was {movie}")
def play_round(guess, title, hidden_title):
if len(guess) != 1 or not guess.isalpha() :
print('Invalid input. Please enter a letter')
return None
elif guess in hidden_title or guess.upper() in hidden_title:
print('You already guessed this letter. Try a different one')
return None
elif guess in title.lower():
print('Correct!')
return True
elif guess not in title.lower():
print('Wrong! Try again!')
return False
if __name__ == '__main__':
main()
|
MaCeleste/Movie-Hangman
|
project.py
|
project.py
|
py
| 4,490 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27529865063
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
def load_data(filename):
# 读入数据文件
data = np.loadtxt(filename)
return data
def plot_signal_waveform(data, fs):
# 绘制信号波形
duration = len(data) / fs # 持续时间,单位为秒
time = np.linspace(0, duration, len(data))
plt.subplot(3,1,1)
plt.plot(time, data)
plt.xlabel("Time (s)")
plt.ylabel("Amplitude")
plt.title("Original Signal")
def plot_stft_spectrogram(data, fs, window, nperseg, noverlap):
# 进行STFT
f, t, Zxx = signal.stft(data, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap)
# 绘制时频图
plt.subplot(3,1,2)
plt.pcolormesh(t, f, np.abs(Zxx), cmap='YlOrBr')
plt.title('STFT Magnitude')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
def plot_fft_magnitude(data, fs):
# 进行FFT
fft_data = np.fft.fft(data)
freqs = np.fft.fftfreq(len(fft_data), 1/fs)
# 绘制FFT图
plt.subplot(3,1,3)
plt.plot(freqs, np.abs(fft_data))
plt.title('FFT Magnitude')
plt.ylabel('Magnitude')
plt.xlabel('Frequency [Hz]')
if __name__ == '__main__':
filename = 'Software/data/1.csv'
data = load_data(filename)
fs = 1000
window = signal.windows.hann(128) # 窗函数
nperseg = 128 # STFT段长
noverlap = nperseg//2 # STFT重叠长度
plot_signal_waveform(data, fs)
plot_stft_spectrogram(data, fs, window, nperseg, noverlap)
plot_fft_magnitude(data, fs)
# 调整布局
plt.tight_layout()
# 显示图形
plt.show()
|
huigang39/TENG
|
Software/dl/signal_analysis.py
|
signal_analysis.py
|
py
| 1,602 |
python
|
en
|
code
| 2 |
github-code
|
6
|
16824394057
|
# Noah van der Vleuten (s1018323)
# Jozef Coldenhoff (s1017656)
import queue
from pacman import agents, gamestate, search, util
import ass2
class CornersSearchRepresentation(search.SearchRepresentation):
def __init__(self, gstate):
super().__init__(gstate)
self.walls = gstate.walls
self.start_position = gstate.pacman
left, bottom = 1, 1
right, top = gstate.shape - 2 * util.Vector.unit
self.corners = frozenset([util.Vector(left, bottom),
util.Vector(left, top),
util.Vector(right, bottom),
util.Vector(right, top)])
@property
def start(self):
return self.start_position, (False, False, False, False)
def is_goal(self, state):
position, corners_tuple = state
super().is_goal(position)
corners_bool_list = list(corners_tuple)
for boolean in corners_bool_list:
if not boolean:
return False
return True
def successors(self, state):
position, corners_tuple = state
successors = []
for move in util.Move.no_stop:
new_vector = position + move.vector
if not self.walls[new_vector]:
corners_bool_list = list(corners_tuple)
corners_list = list(self.corners)
if new_vector in corners_list:
index_position = corners_list.index(new_vector)
if new_vector in self.corners:
corners_bool_list[index_position] = True
successor = ((new_vector, tuple(corners_bool_list)), [move], 1)
successors.append(successor)
return successors
def pathcost(self, path):
return search.standard_pathcost(path, self.start_position, self.walls)
def corners_heuristic(state, representation):
"""
Calculates the Manhattan distance to the closest unvisited corner,
plus the Manhattan distance to the other unvisited corners.
This heuristic is admissible because the cost of the manhattan distance of these corners relative to each other is
never greater than the actual path cost of getting there.
:param state: this is the state of the game containing the position and visited corners of Pacman.
:param representation: (search.PositionSearchRepresentation) the search representation being passed in.
:returns: (number) the numerical result of the heuristic.
"""
# List of corner coordinates.
corners = list(representation.corners)
position, corners_visited = state
result = 0
future_corners_visited = list(corners_visited)
future_position = position
for i in range(len(corners)):
distance_to_corners = [0, 0, 0, 0]
for num_corner, corner in enumerate(future_corners_visited):
distance_to_corners[num_corner] = util.manhattan(future_position, corners[num_corner])
num_closest = 0
for num_corner, distance_corner in enumerate(distance_to_corners):
if future_corners_visited[num_closest]:
num_closest = num_corner
if not future_corners_visited[num_corner] and distance_corner < distance_to_corners[num_closest]:
num_closest = num_corner
if not future_corners_visited[num_closest]:
result += distance_to_corners[num_closest]
future_position = corners[num_closest]
future_corners_visited[num_closest] = True
else:
break
return result
def dots_heuristic(state, representation):
"""
Calculates the Manhattan distance from this state to all the pellets from this state,
then sorts them from high to low to find the 3 pellets that are furthest away.
We add the manhattan distance of the third furthest away pellet plus the distance of the 3rd to the 2nd plus the
distance from the 2nd to the furthest away pellet to the heuristic.
This heuristic is always admissible because the manhattan distance to all these points will never overestimate the
actual cost of going there.
:param state: this is the state of the game containing the position and visited corners of Pacman.
:param representation: (search.PositionSearchRepresentation) the search representation being passed in.
:returns: (number) the numerical result of the heuristic.
"""
position = state[0]
distance_list = [(util.manhattan(position, x), x) for x in state.dots]
heuristic = 0
distance_list.sort(reverse=True)
if len(distance_list) > 2:
heuristic += distance_list[2][0]
heuristic += util.manhattan(distance_list[2][1], distance_list[1][1])
heuristic += util.manhattan(distance_list[1][1], distance_list[0][1])
return heuristic
class ClosestDotSearchAgent(agents.SearchAgent):
def prepare(self, gstate):
self.actions = []
pacman = gstate.pacman
while gstate.dots:
next_segment = self.path_to_closest_dot(gstate)
self.actions += next_segment
for move in next_segment:
if move not in gstate.legal_moves_vector(gstate.agents[self.id]):
raise Exception('path_to_closest_dot returned an illegal move: {}, {}'.format(move, gstate))
gstate.apply_move(self.id, move)
print(f'[ClosestDotSearchAgent] path found with length {len(self.actions)}'
f' and pathcost {search.standard_pathcost(self.actions, pacman, gstate.walls)}')
@staticmethod
def path_to_closest_dot(gstate):
return ass2.breadthfirst(AnyDotSearchRepresentation(gstate))
class AnyDotSearchRepresentation(search.PositionSearchRepresentation):
def __init__(self, gstate):
super().__init__(gstate)
self.dots = gstate.dots
def is_goal(self, state):
return self.dots[state] is True
class ApproximateSearchAgent(agents.SearchAgent):
def prepare(self, gstate):
pass
def move(self, gstate):
if self.actions:
return self.actions.pop(0)
else:
self.actions = approx_search(search.AllDotSearchRepresentation(gstate))
return self.actions.pop(0)
def approx_search(representation: search.PositionSearchRepresentation) -> list:
"""
Search function that finds the closest node and returns the list of moves to that node,
also makes sure that Pacman finished the right part of the maze before beginning to work on the left part.
"""
frontier = queue.PriorityQueue()
frontier.put((0, (representation.start, [], 0)))
dots = list(representation.start[1])
# Finds all the nodes that are to the right of the middle.
right_dots = [x for x in dots if x[0] < representation.walls.shape[0] / 2]
# Finds all the nodes that are to the left of the middle.
left_dots = [x for x in dots if x[0] > representation.walls.shape[0] / 2]
explored = set()
while not frontier.empty():
_, successor = frontier.get()
state, path, cost = successor
if state in explored:
continue
explored.add(state)
# Returns if a path to the closest node in the right part of the map is found.
if state[0] in left_dots and not right_dots:
return path
# Returns if a path to the closest node in the left part of the map is found.
elif state[0] in right_dots:
return path
for successorState, actions, actionCost in representation.successors(state):
if successorState not in explored:
new_cost = cost + actionCost + right_heuristic(successorState, dots)
frontier.put((new_cost, (successorState, path + actions, cost + actionCost)))
return None
def right_heuristic(state, dots):
"""
Heuristic that weights the path by taking the node that is furthest right from Pacman.
"""
heuristic = 0
# Finds all the dots that are to the right of Pacman.
right_dots = [x for x in dots if x[0]< state[0][0]]
# Sorts the list by comparing the x coordinates.
right_dots.sort(key= lambda x: x[0])
# Returns the distance to the most right node.
if right_dots:
heuristic = util.manhattan(right_dots[0], state[0])
return heuristic
|
NoahVl/PacmanEvolution
|
PacmanEvolution/ass3.py
|
ass3.py
|
py
| 8,377 |
python
|
en
|
code
| 2 |
github-code
|
6
|
7577852611
|
from django import forms
from models import Attachment
class AttachmentForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
self.actived = kwargs.pop('actived', False)
super(AttachmentForm, self).__init__(*args, **kwargs)
def save(self):
attachment = super(AttachmentForm, self).save(commit=False)
attachment.user = self.user
attachment.actived = self.actived
attachment.save()
return attachment
class Meta:
model = Attachment
fields = ('file',)
|
vicalloy/django-lb-attachments
|
attachments/forms.py
|
forms.py
|
py
| 616 |
python
|
en
|
code
| 7 |
github-code
|
6
|
7131301851
|
# List Comprehensions
# quick ways to create lists is python
my_list = []
for char in 'hello':
my_list.append(char)
print(my_list)
# there is a quicker way
# my_list = [param for param in iterable]
my_list = [char for char in 'hello']
print(my_list)
# first param can be an expression
my_list2 = [num * 2 for num in range(0, 100)]
print(my_list2)
# Can add a conditional at the end also
my_list3 = [num ** 2 for num in range(0, 100) if num % 2 == 0]
print(my_list3)
|
leerobertsprojects/Python-Mastery
|
Advanced Python Concepts/Functional Programming/List Comprehensions.py
|
List Comprehensions.py
|
py
| 481 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25069790009
|
class Solution:
def moveZeroes(self, nums):
"""
Do not return anything, modify nums in-place instead.
"""
for index, value in enumerate(nums):
if value == 0:
nums.pop(index)
nums.append(value)
print(nums)
if __name__=="__main__":
nums=[0, 0, 1, 0, 1, 1, 1]
print(nums)
a=Solution()
k=a.moveZeroes(nums)
|
ankitarm/Leetcode
|
Python/283.MoveZeros.py
|
283.MoveZeros.py
|
py
| 424 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38316017126
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
#imports routes
from .routes import home_blueprint
# from .database.model import *
def create_app():
app = Flask(__name__)
#load config file
app.config.from_object("project.config.Config")
#routes
app.register_blueprint(home_blueprint, url_prefix='/api/v1/home')
#init database
db.init_app(app)
return app
|
vipin733/flask_boilerplate
|
services/web/project/__init__.py
|
__init__.py
|
py
| 427 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18478316296
|
from block import Block
from transaction import Transaction
class ConverterToObj():
@staticmethod
def chain_to_obj(blockchain):
"""
Receives a blockchain of dictionaries and converts the blocks
into block objects and the transactions into Transactions objects
Returns an updated blockchain of objects
"""
updated_blockchain = []
for block in blockchain:
converted_tx = [Transaction(
tx['sender'], tx['receiver'], tx['signature'], tx['amount']) for tx in block['transactions']]
updated_block = Block(
block['index'], block['previous_hash'], converted_tx, block['proof'], block['timestamp'])
updated_blockchain.append(updated_block)
return updated_blockchain
@staticmethod
def transaction_dict_to_obj(transactions):
"""
Converts a set of transactions dictionaries to Transaction object
Arguments:
- An Array of transactions
"""
updated_transactions = []
for tx in transactions:
updated_transaction = Transaction(
tx['sender'], tx['receiver'], tx['signature'], tx['amount'])
updated_transactions.append(updated_transaction)
return updated_transactions
|
salvescoding/bockchain_cryptocurrency
|
app/helpers/converter_to_obj.py
|
converter_to_obj.py
|
py
| 1,249 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6297668116
|
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from libs.lib import newIcon, labelValidator
BB = QDialogButtonBox
class AdjustWindowLevelDialog(QDialog):
def __init__(self, text="Adjust window/level", parent=None):
super(AdjustWindowLevelDialog, self).__init__(parent)
self.windowEdit = QLineEdit()
self.windowEdit.setText(text)
self.windowEdit.setValidator(labelValidator())
self.windowEdit.editingFinished.connect(self.postProcess)
self.levelEdit = QLineEdit()
self.levelEdit.setText(text)
self.levelEdit.setValidator(labelValidator())
self.levelEdit.editingFinished.connect(self.postProcess)
layout = QVBoxLayout()
layout.addWidget(self.windowEdit)
layout.addWidget(self.levelEdit)
self.buttonBox = bb = BB(BB.Ok | BB.Cancel, Qt.Horizontal, self)
bb.button(BB.Ok).setIcon(newIcon('done'))
bb.button(BB.Cancel).setIcon(newIcon('undo'))
bb.accepted.connect(self.validate)
bb.rejected.connect(self.reject)
layout.addWidget(bb)
self.setLayout(layout)
def validate(self):
try:
if self.windowEdit.text().trimmed() and self.levelEdit.text().trimmed():
try:
_ = int(self.windowEdit.text())
_ = int(self.levelEdit.text())
self.accept()
except ValueError:
self.reject()
except AttributeError:
# PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
if self.windowEdit.text().strip() and self.levelEdit.text().strip():
try:
_ = int(self.windowEdit.text())
_ = int(self.levelEdit.text())
self.accept()
except ValueError:
self.reject()
def postProcess(self):
try:
self.windowEdit.setText(self.windowEdit.text().trimmed())
self.levelEdit.setText(self.levelEdit.text().trimmed())
except AttributeError:
# PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
self.windowEdit.setText(self.windowEdit.text().strip())
self.levelEdit.setText(self.levelEdit.text().strip())
def popUp(self, w_width=1000, w_level=200, move=True):
self.windowEdit.setText(str(w_width))
self.windowEdit.setSelection(0, len(str(w_width)))
self.windowEdit.setFocus(Qt.PopupFocusReason)
self.levelEdit.setText(str(w_level))
if move:
self.move(QCursor.pos())
if self.exec_():
return int(self.windowEdit.text()), int(self.levelEdit.text())
else:
return None
|
RT-Rakesh/label-img
|
libs/adjustWindowLevelDialog.py
|
adjustWindowLevelDialog.py
|
py
| 2,895 |
python
|
en
|
code
| null |
github-code
|
6
|
8287227022
|
# encoding: utf-8
from django.test import TestCase
from django.db import IntegrityError
from subscription.models import Subscription
class SubscriptionModelTest(TestCase):
def test_create_new_subscription(self):
s = Subscription.objects.create(
name='Henrique Bastos',
cpf='05633165780',
email='[email protected]',
phone='21-9618-6180'
)
self.assertEquals(s.id, 1)
class SubscriptionModelUniqueTest(TestCase):
fixtures = ['subscription.json']
def test_cpf_must_be_unique(self):
s = Subscription(
name='Henrique Bastos',
cpf='05633165780',
email='[email protected]',
phone='21-9618-6180'
)
self.assertRaises(IntegrityError, s.save)
def test_email_must_be_unique(self):
s = Subscription(
name='Henrique Bastos',
cpf='38067528772',
email='[email protected]',
phone='21-9618-6180')
self.assertRaises(IntegrityError, s.save)
|
rosenclever/Eventex
|
subscription/tests/test_models.py
|
test_models.py
|
py
| 1,055 |
python
|
en
|
code
| 2 |
github-code
|
6
|
18552105619
|
# # Napisać program który wyświetla wykres funkcji kwadratowej o podanych współczynnikach.
# # Tworząc wykres należy tak dobrać zakres wyświetlanej osi X aby znalazły się w nim:
# # współrzędna wierzchołka oraz miejsca zerowe z marginesem ok 10%
# # (dla przykładu: jeżeli miejsca zerowe wynoszą np x1=2 i x2=10 to oś X powinna zawierać punkty od 1.8 do 11).
# # Jeżeli parabola nie ma miejsc zerowych, lub ma podwójne miejsce zerowe, wykres powinien zawierać wierzchołek paraboli oraz margines ok 20%
# # (dla przykładu jeżeli wsp. wierzchołka wynosi x0=5 to oś X powinna zawierać punkty od 4 do 6).
import math
import matplotlib.pyplot as plot
import numpy as np
def liczenie_delty(a,b,c):
delta = (b*b) - 4*(a*c)
print('delta =',delta)
return delta
def wykres(delta,a,b,c):
if delta == 0 :
print('Równanie ma jedno rozwiązanie')
x0 = (-b-(math.sqrt(delta)))/(2*a)
print('x0 =',x0)
elif delta > 0 :
print('Równanie ma dwa rozwiązanie')
x1 = (-b-(math.sqrt(delta)))/(2*a)
x2 = (-b+(math.sqrt(delta)))/(2*a)
print('x1 =',x1)
print('x2 =',x2)
x0 = None
else : print('Równanie nie ma rozwiązań')
print("f(x)={0}x^2+{1}x+{2}".format(a,b,c))
p = (-b)/(2*a)
q = (-delta)/(4*a)
print('p',p,'q',q)
if x0 is None:
if x1>x2:
x = np.linspace(x1+(0.1*x1), x2-(0.1*x1), 1000)
y = a * x ** 2 + b * x + c
fig, ax = plot.subplots()
ax.set_title("Wykres funkcji kwadratowej")
plot.grid(True)
ax.plot(x, y)
ax.hlines(y=0, xmin=min(x), xmax=max(x), colors='r', linestyles='--', lw=1)
plot.scatter(p, q, color='red', label='Wierzchołek')
if x1 is not None:
plot.scatter(x1, 0, color='green', label='Miejsce zerowe')
if x2 is not None:
plot.scatter(x2, 0, color='green', label='Miejsce zerowe')
plot.show()
else:
x = np.linspace(x1-(0.1*x1), x2+(0.1*x1), 1000)
y = a * x ** 2 + b * x + c
fig, ax = plot.subplots()
ax.set_title("Wykres funkcji kwadratowej")
plot.grid(True)
ax.plot(x, y)
ax.hlines(y=0, xmin=min(x), xmax=max(x), colors='r', linestyles='--', lw=1)
plot.scatter(p, q, color='red', label='Wierzchołek')
if x1 is not None:
plot.scatter(x1, 0, color='green', label='Miejsce zerowe')
if x2 is not None:
plot.scatter(x2, 0, color='green', label='Miejsce zerowe')
plot.show()
else:
x = np.linspace(x0-(0.2*x0), x0+(0.2*x0), 1000)
y = a * x ** 2 + b * x + c
fig, ax = plot.subplots()
ax.set_title("Wykres funkcji kwadratowej")
plot.grid(True)
ax.plot(x, y)
ax.hlines(y=0, xmin=min(x), xmax=max(x), colors='r', linestyles='--', lw=1)
plot.scatter(p, q, color='red', label='Wierzchołek')
plot.show()
print('Podaj liczbę a:')
a=input()
while a == '0':
print('a musi być liczbą całkowitą ani być równe zero. Podaj liczbę a jeszcze raz:')
a=input()
a = int(a)
print('Podaj liczbę b:')
b=input()
b = int(b)
print('Podaj liczbę c:')
c=input()
c = int(c)
delta = liczenie_delty(a,b,c)
wykres(delta,a, b, c)
|
TomaszWs/Python-training
|
UG-training/wykres-funkcji.py
|
wykres-funkcji.py
|
py
| 3,391 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
9352031238
|
"""Cleaning Functions
These functions define standard text processing functions for cleaning.
"""
from html import unescape
import re
import emoji
def clean_text(text):
"""Cleans single data entry of text.
Args:
text (str): input text for cleaning.
Returns:
str: output cleaned text.
"""
# convert HTML codes
text = unescape(text)
# replace mentions, URLs and emojis with special token
text = re.sub(r"@[A-Za-z0-9_-]+",'[USER]',text)
text = re.sub(r"http\S+",'[URL]',text)
text = ''.join(' [EMOJI] ' if (char in emoji.UNICODE_EMOJI) else char for char in text).strip()
# in Samory dataset there are mentions e.g. MENTION3851 --> convert to USER tokens
text = re.sub("MENTION[0-9]*", '[USER]', text)
# remove newline and tab characters
text = text.replace('\n',' ')
text = text.replace('\t',' ')
# remove leading ">" (reddit artifact)
text = text.lstrip('>')
# collapse whitespace into single whitespace
text = re.sub(r'\s+', ' ', text)
# remove leading and trailing whitespaces
text = text.strip()
return text
def drop_nans(input_df, subset_col='text', verbose = False):
"""Removes posts with NaN values in given column.
Args:
input_df (pd.DataFrame): input dataframe.
subset_col (str, optional): column for NaN removal. Defaults to 'text'.
verbose (bool, optional): whether to print number of dropped values. Defaults to False.
Returns:
pd.DataFrame: output dataframe with modifications.
"""
# Get original len
orig_len = len(input_df)
# remove NANs in place
input_df.dropna(subset=[subset_col], inplace = True)
# Get new len
new_len = len(input_df)
if verbose is True:
print(f"""\nOrig len: {orig_len},
Num of dropped values: {orig_len - new_len},
New len: {new_len}""")
return input_df
def drop_duplicates(input_df, subset_col = 'clean_text', verbose = False):
"""Removes duplicate values in given column. Should be run *after* text cleaning.
Args:
input_df (pd.DataFrame): input dataframe.
subset_col (str, optional): column for de-duplication. Defaults to 'clean_text'.
verbose (bool, optional): whether to print number of dropped values. Defaults to False.
Returns:
pd.DataFrame: output dataframe with modifications.
"""
# Get original len
orig_len = len(input_df)
# remove duplicates in place
input_df.drop_duplicates(subset=[subset_col], inplace = True)
# Get new len
new_len = len(input_df)
if verbose is True:
print(f"""\nOrig len: {orig_len},
Num of dropped values: {orig_len - new_len},
New len: {new_len}""")
return input_df
def drop_empty_text(input_df, subset_col = 'clean_text', verbose = False):
"""Removes rows with empty text. Should be run *after* text cleaning.
Args:
input_df (pd.DataFrame): input dataframe.
subset_col (str, optional): column for empty text removal. Defaults to 'clean_text'.
verbose (bool, optional): whether to print number of dropped values. Defaults to False.
Returns:
pd.DataFrame: output dataframe with modifications.
"""
# Get original len
orig_len = len(input_df)
# drop rows with empty text
input_df = input_df[input_df[subset_col].values!=""]
# Get new len
new_len = len(input_df)
if verbose is True:
print(f"""\nOrig len: {orig_len},
Num of dropped values: {orig_len - new_len},
New len: {new_len}""")
return input_df
def drop_url_emoji(input_df, subset_col = 'clean_text', verbose = False):
"""Removes rows with only [URL] or [EMOJI] tokens. Should be run *after* text cleaning.
Args:
input_df (pd.DataFrame): input dataframe.
subset_col (str, optional): column for text removal. Defaults to 'clean_text'.
verbose (bool, optional): whether to print number of dropped values. Defaults to False.
Returns:
pd.DataFrame: output dataframe with modifications.
"""
# Get original len
orig_len = len(input_df)
# drop rows with text that is just [URL] or [EMOJI]
input_df = input_df[(input_df[subset_col]!="[URL]") & (input_df[subset_col]!="[EMOJI]")]
# Get new len
new_len = len(input_df)
if verbose is True:
print(f"""\nOrig len: {orig_len},
Num of dropped values: {orig_len - new_len},
New len: {new_len}""")
return input_df
|
HannahKirk/ActiveTransformers-for-AbusiveLanguage
|
scripts/0_data_prep/cleaning_functions.py
|
cleaning_functions.py
|
py
| 4,543 |
python
|
en
|
code
| 3 |
github-code
|
6
|
15018763785
|
from tkinter import Widget
import customtkinter as ctk
from customtkinter import ThemeManager
from View.GUI.Windows.GraphWindow.ButtonBar import ButtonBar
from View.GUI.Windows.GraphWindow.GraphCanvas import GraphCanvas
from View.GUI.Windows.WindowInterface import WindowInterface, Position
class GraphWindow(WindowInterface, ctk.CTkFrame):
@staticmethod
def get_title() -> str:
return "Graph"
@staticmethod
def get_start_position() -> Position:
return Position.Center
@staticmethod
def get_importance():
return 5
def __init__(self, parent, controller, network, move_to_center=True):
WindowInterface.__init__(self, parent, controller, network)
bg_color = ThemeManager.theme["color_scale"]["outer"]
fg_color = ThemeManager.theme["color_scale"]["inner"]
ctk.CTkFrame.__init__(self, parent, fg_color=fg_color, bg_color=bg_color)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.graph_canvas = GraphCanvas(self, controller, network, move_to_center=move_to_center)
self.graph_canvas.grid(column=0, row=0, sticky="news", padx=3, pady=3)
self.button_bar = ButtonBar(self, self.graph_canvas)
self.button_bar.grid(column=0, row=0, sticky="n", pady=5)
self.graph_canvas.button_bar = self.button_bar
self.graph_canvas.initial_setup()
def clone(self, new_parent: Widget) -> 'WindowInterface':
new_window = GraphWindow(new_parent, self.controller, self.network, move_to_center=False)
new_window.graph_canvas.zoom_to(self.graph_canvas.scale_factor)
old_x_middle = self.graph_canvas.canvasx(self.graph_canvas.winfo_width() / 2)
old_y_middle = self.graph_canvas.canvasy(self.graph_canvas.winfo_height() / 2)
old_x_model, old_y_model = self.graph_canvas.coords_canvas_to_model(old_x_middle, old_y_middle)
# estimate screen mid as canvas is not yet drawn with correct width / height
estimated_mid_x = int(new_window.graph_canvas.canvasx(new_parent.winfo_width() / 2))
estimated_mid_y = int(new_window.graph_canvas.canvasy(new_parent.winfo_height() / 2))
new_window.graph_canvas.move_canvas_to(old_x_model, old_y_model, estimated_mid_x, estimated_mid_y)
return new_window
|
Moni5656/npba
|
View/GUI/Windows/GraphWindow/GraphWindow.py
|
GraphWindow.py
|
py
| 2,319 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19938577350
|
import struct
class MD4:
@staticmethod
def digest(input_file):
# input_file = input_data
def F(x, y, z):
return (x & y) | (~x & z)
def G(x, y, z):
return (x & y) | (x & z) | (y & z)
def H(x, y, z):
return x ^ y ^ z
def left_rotate(val, n):
lbits, rbits = (val << n) & mask, val >> (width - n)
return lbits | rbits
def bytes():
# return final hash as bytes
return struct.pack("<4L", *words)
width = 32
mask = 0xFFFFFFFF
words = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476]
length = len(input_file) * 8
input_file += b"\x80"
input_file += b"\x00" * (-(len(input_file) + 8) % 64)
# 448 bits + padding = 512 bits
input_file += struct.pack("<Q", length)
# Split message into 512-bit chunks.
message_chunks = []
for i in range(0, len(input_file), 64):
message_chunks.append(input_file[i: i + 64])
for chunk in message_chunks:
# fragments of an original message
X = list(struct.unpack("<16I", chunk))
# copy of initial words
h = words.copy()
# Round 1.
Xi = [3, 7, 11, 19]
for n in range(16):
a, b, c, d = map(lambda x: x % 4, range(-n, -n + 4))
K, S = n, Xi[n % 4]
to_rotate = h[a] + F(h[b], h[c], h[d]) + X[K]
h[a] = left_rotate(to_rotate & mask, S)
# Round 2.
Xi = [3, 5, 9, 13]
for n in range(16):
a, b, c, d = map(lambda x: x % 4, range(-n, -n + 4))
K, S = n % 4 * 4 + n // 4, Xi[n % 4]
to_rotate = h[a] + G(h[b], h[c], h[d]) + X[K] + 0x5A827999
h[a] = left_rotate(to_rotate & mask, S)
# Round 3.
Xi = [3, 9, 11, 15]
Ki = [0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15]
for n in range(16):
a, b, c, d = map(lambda x: x % 4, range(-n, -n + 4))
K, S = Ki[n], Xi[n % 4]
to_rotate = h[a] + H(h[b], h[c], h[d]) + X[K] + 0x6ED9EBA1
h[a] = left_rotate(to_rotate & mask, S)
# Create the final message
words = [((v + n) & mask) for v, n in zip(words, h)]
# return hash
return "".join(f"{value:02x}" for value in bytes())
|
dzakrzew/io-ns
|
generators/python/hash_functions/md4.py
|
md4.py
|
py
| 2,484 |
python
|
en
|
code
| 0 |
github-code
|
6
|
787431703
|
"""
Tests for `nameko_cachetools` module.
"""
import time
import pytest
from mock import Mock, patch
import random
import eventlet
from nameko.rpc import rpc
from nameko.standalone.rpc import ServiceRpcProxy
from nameko_cachetools import CachedRpcProxy, CacheFirstRpcProxy
from nameko.testing.services import (entrypoint_hook, entrypoint_waiter,
get_extension)
@pytest.fixture
def container(container_factory, rabbit_config):
class Service(object):
name = "service"
cached_service = CachedRpcProxy('some_other_service', failover_timeout=1)
cache_first_service = CacheFirstRpcProxy('some_other_service')
@rpc
def cached(self, *args, **kwargs):
return self.cached_service.some_method(*args, **kwargs)
@rpc
def cache_first(self, *args, **kwargs):
return self.cache_first_service.some_method(*args, **kwargs)
container = container_factory(Service, rabbit_config)
container.start()
return container
def test_cached_response(container):
cached_rpc = get_extension(container, CachedRpcProxy)
def fake_some_method(*args, **kwargs):
return 'hi'
with patch('nameko.rpc.MethodProxy.__call__', fake_some_method):
with entrypoint_hook(container, 'cached') as hook:
assert hook('test') == 'hi'
def broken_some_method(*args, **kwargs):
raise Exception('hmm')
with patch('nameko.rpc.MethodProxy.__call__', broken_some_method):
with entrypoint_hook(container, 'cached') as hook:
assert hook('test') == 'hi'
with patch('nameko.rpc.MethodProxy.__call__', broken_some_method):
with entrypoint_hook(container, 'cached') as hook:
with pytest.raises(Exception):
hook('unknown')
cached_rpc.cache = {}
with patch('nameko.rpc.MethodProxy.__call__', broken_some_method):
with entrypoint_hook(container, 'cached') as hook:
with pytest.raises(Exception):
hook('test')
def test_cached_response_on_timeout(container):
cached_rpc = get_extension(container, CachedRpcProxy)
def fake_some_method(*args, **kwargs):
return 'hi'
with patch('nameko.rpc.MethodProxy.__call__', fake_some_method):
with entrypoint_hook(container, 'cached') as hook:
assert hook() == 'hi'
def slow_response(*args, **kwargs):
eventlet.sleep(3)
return 'hi'
start = time.time()
with patch('nameko.rpc.MethodProxy.__call__', slow_response):
with entrypoint_hook(container, 'cached') as hook:
assert hook() == 'hi'
assert time.time() - start < 2
cached_rpc.cache = {}
start = time.time()
with patch('nameko.rpc.MethodProxy.__call__', slow_response):
with entrypoint_hook(container, 'cached') as hook:
assert hook() == 'hi'
assert time.time() - start >= 3
def test_cached_rich_args_rich_response(container):
response = {}
request = {}
for i in range(400):
response[random.randint(1, 1000)] = ['a', (2, 3), {'b': 4.3}]
request[random.randint(1, 1000)] = ['b', [4, 6], {'c': 8.9}]
def fake_some_method(*args, **kwargs):
return response
with patch('nameko.rpc.MethodProxy.__call__', fake_some_method):
with entrypoint_hook(container, 'cached') as hook:
assert hook(request) == response
def broken_some_method(*args, **kwargs):
raise Exception('hmm')
with patch('nameko.rpc.MethodProxy.__call__', broken_some_method):
with entrypoint_hook(container, 'cached') as hook:
assert hook(request) == response
def test_cache_first(container):
mock = Mock()
with patch('nameko.rpc.MethodProxy.__call__', mock):
with entrypoint_hook(container, 'cache_first') as hook:
hook('ho')
mock.assert_called_once_with('ho')
mock.reset_mock()
with patch('nameko.rpc.MethodProxy.__call__', mock):
with entrypoint_hook(container, 'cache_first') as hook:
hook('ho')
mock.assert_not_called()
cache_first_rpc = get_extension(container, CacheFirstRpcProxy)
cache_first_rpc.cache = {}
with patch('nameko.rpc.MethodProxy.__call__', mock):
with entrypoint_hook(container, 'cache_first') as hook:
hook('ho')
mock.assert_called_once_with('ho')
|
santiycr/nameko-cachetools
|
test/test_nameko_cachetools.py
|
test_nameko_cachetools.py
|
py
| 4,402 |
python
|
en
|
code
| 9 |
github-code
|
6
|
73477691067
|
from tinygrad.tensor import Tensor
import numpy
import os
# Format Details:
# A KINNE parameter set is stored as a set of files named "snoop_bin_*.bin",
# where the * is a number starting at 0.
# Each file is simply raw little-endian floats,
# as readable by: numpy.fromfile(path, "<f4")
# and as writable by: t.data.astype("<f4", "C").tofile(path)
# This format is intended to be extremely simple to get into literally anything.
# It is not intended to be structural or efficient - reloading a network when
# unnecessary is inefficient anyway.
# Ultimately, the idea behind this is as a format that, while it will always
# require code to implement, requires as little code as possible, and therefore
# works as a suitable interchange for any situation.
# To add to the usability of the format, some informal metadata is provided,
# in "meta.txt", which provides human-readable shape information.
# This is intended to help with debugging other implementations of the network,
# by providing concrete human-readable information on tensor shapes.
# It is NOT meant to be read by machines.
class KinneDir:
"""
A KinneDir is an intermediate object used to save or load a model.
"""
def __init__(self, base: str, save: bool):
"""
Opens a new KINNE directory with the given base path.
If save is true, the directory is created if possible.
(This does not create parents.)
Save being true or false determines if tensors are loaded or saved.
The base path is of the form "models/abc" - no trailing slash.
It is important that if you wish to save in the current directory,
you use ".", not the empty string.
"""
if save:
try:
os.mkdir(base)
except:
# Silence the exception - the directory may (and if reading, does) already exist.
pass
self.base = base + "/snoop_bin_"
self.next_part_index = 0
self.save = save
if save:
self.metadata = open(base + "/meta.txt", "w")
def parameter(self, t: Tensor):
"""
parameter loads or saves a parameter, given as a tensor.
"""
path = f"{self.base}{self.next_part_index}.bin"
if self.save:
t.data.astype("<f4", "C").tofile(path)
self.metadata.write(f"{self.next_part_index}: {t.shape}\n")
else:
t.assign(Tensor(numpy.fromfile(path, "<f4")).reshape(shape=t.shape))
self.next_part_index += 1
def parameters(self, params):
"""
parameters loads or saves a sequence of parameters.
It's intended for easily attaching to an existing model,
assuming that your parameters list orders are consistent.
(In other words, usage with tinygrad.utils.get_parameters isn't advised -
it's too 'implicit'.)
"""
for t in params:
self.parameter(t)
def close(self):
if self.save:
self.metadata.close()
|
fpaboim/tinysparse
|
extra/kinne.py
|
kinne.py
|
py
| 2,836 |
python
|
en
|
code
| 9 |
github-code
|
6
|
10649216487
|
"""
ProjectManager
Description:
"""
import pygame,sys
pygame.init()
# Defining Image Width
get_width = int(input("Image Width: (px)"))
get_height = int(input("Image Height: (px)"))
get_name = str(input("Project Name: "))
win_size = (get_width,get_height)
# Creating Project Script
file = get_name + '.txt'
with open(file,'w') as f:
f.write("class " + get_name + ":\n")
f.write(" def __init__(self,bg_color,pos=(0,0)):\n")
f.write(" self.pos = list(pos)\n")
f.write(" self.img = pygame.Surface(" + str([win_size[0],win_size[1]]) + ")\n")
f.write(" self.img.fill(bg_color)\n\n")
f.write(" # Drawing Code Goes Here")
# Editing Current Shape
currentPolygon = False
# Window
w,h = (win_size[0],win_size[1])
win = pygame.display.set_mode([w,h])
# Variables
image_panel = []
pt_list = []
color_list = []
# Idea for Saving Data?
save_data = {
"item1": "color_data"
}
# Color Tuples
BACKGROUND = (255,255,255)
color = (0,0,0)
# Shaping Functions
def update_polygons(point_list):
global image_panel
for i in range(len(point_list)):
pygame.draw.circle(win,(255,0,0),(point_list[i][0],point_list[i][1]),4)
pygame.draw.polygon(win, color,point_list)
def polygon_tool():
global pt_list,currentPolygon
if not currentPolygon:
image_panel.append(pt_list)
color_list.append(color)
pt_list = []
print("Current Tool: None")
else:
print("Current Tool: Polygon Shape Tool")
def undo_move():
global image_panel,pt_list,color
try: image_panel.pop(-1)
except: pass
win.fill(BACKGROUND)
for i in range(len(image_panel)):
pygame.draw.polygon(win, color,image_panel[i])
def save_image(image_panel):
with open (file, 'a') as f:
for i in range(len(image_panel)):
f.write('\n pygame.draw.polygon(self.img,' + str(color_list[i]) + "," + str(image_panel[i]) + ')')
print("Image Saved! You can now close the application...")
# Window Loop
while True:
x, y = pygame.mouse.get_pos()
key = pygame.key.get_pressed()
for e in pygame.event.get():
if e.type == pygame.QUIT:
sys.exit()
if e.type == pygame.MOUSEBUTTONDOWN:
if currentPolygon:
pt_list += [(x,y)]
if e.type == pygame.KEYUP:
if key[pygame.K_p]:
print("Current Tool: Pen Tool")
if key[pygame.K_r]:
currentPolygon = not currentPolygon
polygon_tool()
if key[pygame.K_f]:
print("Current Tool: Bucket Fill Tool")
if key[pygame.K_LEFT]: # Undo Move
undo_move()
if key[pygame.K_RIGHT]: # Redo Move
pass
if key[pygame.K_c]: # Change Color
new_color = input("Enter New Color: (tuple) | ")
color = tuple(eval(new_color))
if key[pygame.K_s]: # Saving
print("Saving Image...")
save_image(image_panel)
update_polygons(pt_list)
pygame.display.flip()
|
LandenTy/GeometricEngine
|
CustomTexturer/main.py
|
main.py
|
py
| 3,267 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71319102588
|
def check(a, b):
if a > 0 and b > 0:
return True
else:
return False
while True:
m = int(input('Введите кол-во экспертов: '))
n = int(input('Введите кол-во целей: '))
if not check(m, n):
print('Вы ввели некорректные значения. Повторите попытку!')
continue
else:
break
mat = []
for i in range(m):
mat.append([])
for j in range(n):
mat[i].append(0)
print('Составить исходную матрицу предпочтений: ')
for i in range(m):
print('Эксперт №', i+1)
for j in range(n):
while True:
print("\tЦель №", j+1, ": ", end=' ')
val = int(input())
if val < 0 or val > n:
print('Введено неверное значение. Повторите попытку!')
continue
else:
mat[i][j] = val
break
print('\n' * 100)
print('Исходная матрица предпочтений: ')
for i in range(m):
print()
for j in range(n):
print(mat[i][j], end = ' ')
print('\n\nМодифицированная матрица предпочтений:')
for i in range(m):
print()
for j in range(n):
print(n-mat[i][j], end = ' ')
list_of_sums = []
print('\n\nСуммарные оценки предпочтений:')
for j in range(n):
sum_of_marks = 0
for i in range(m):
sum_of_marks += (n-mat[i][j])
print(sum_of_marks, end = ' ')
list_of_sums.append(sum_of_marks)
omega = []
for i in range(n):
omega.append(0)
print('\n\nИскомые веса целей:')
for i in range(n):
omega[i] = list_of_sums[i]/sum(list_of_sums)
print(round(omega[i], 2), end = ' ')
max_omega = omega[0]
solution = 1
for i in range(n):
if omega[i] > max_omega:
max_omega = omega[i]
solution = i + 1
print('\n\nОТВЕТ: Наиболее выгодна альтернатива №', solution)
|
jewdash/SAandPIS
|
САиПИС_4/код.py
|
код.py
|
py
| 2,194 |
python
|
ru
|
code
| 1 |
github-code
|
6
|
36132885755
|
from random import choice
from time import sleep
from colorama import init, Fore
init()
deck_preset = ("A", *range(2, 11), "J", "Q", "K")
deck = [item for item in deck_preset for i in range(4)]
del deck_preset
class Card:
special_names = ["A", "J", "Q", "K"]
def __init__(self, name):
if name == "A":
self.name = str(name)
self.value = 11
elif name in Card.special_names:
self.name = str(name)
self.value = 10
else:
self.value = name
self.name = str(name)
def __repr__(self):
if self.name in Card.special_names:
return f"{self.name}({self.value})"
else:
return f"{self.name}"
def calculate_scores(player):
return sum([card.value for card in player])
def validate_score(player):
if calculate_scores(player) > 21:
return True
def print_cards(player, method="spread", hide_last=False):
if method == "spread":
if hide_last:
return ', '.join([str(card) for card in player[:-1]])
return ', '.join([str(card) for card in player])
elif method == "sum":
if hide_last:
return str(sum([card.value for card in player[:-1]]))
return str(calculate_scores(player))
def print_scores(player, dealer, hide_dealer=True):
print(f"\nYour cards: {Fore.CYAN + print_cards(player) + Fore.WHITE} "
f"[{Fore.MAGENTA + str(calculate_scores(player)) + Fore.WHITE}]")
if hide_dealer:
print(f"Dealer cards: {Fore.CYAN + print_cards(dealer, 'spread', hide_dealer) + Fore.WHITE}, (?)"
f"[{Fore.MAGENTA + print_cards(dealer, 'sum', hide_dealer) + Fore.WHITE}]")
else:
print(f"Dealer cards: {Fore.CYAN + print_cards(dealer, 'spread', hide_dealer) + Fore.WHITE} "
f"[{Fore.MAGENTA + print_cards(dealer, 'sum', hide_dealer) + Fore.WHITE}]")
def draw_cards(n=1):
cards = []
for i in range(n):
card = choice(deck)
deck.remove(card)
cards.append(Card(card))
return cards
def change_aces(player):
score = calculate_scores(player)
a_index = [player.index(card) for card in player if card.name == "A" and card.value == 11]
if score > 21 and a_index:
for index in a_index:
player[index].value = 1
a_index.pop(0)
score = calculate_scores(player)
if score <= 21:
break
def check_scores(player1, player2, check_draw=False):
player1_score = calculate_scores(player1)
player2_score = calculate_scores(player2)
if check_draw:
if player1_score == player2_score:
return True
else:
if player1_score == 21:
return True
return False
def compare_scores(player, dealer):
player_score = calculate_scores(player)
dealer_score = calculate_scores(dealer)
if dealer_score < player_score:
return True
if check_scores(player, dealer) and check_scores(dealer, player):
print(Fore.YELLOW + "\n----------Draw!----------")
quit()
elif check_scores(player, dealer, True):
if calculate_scores(dealer) > 18:
print(Fore.YELLOW + "\n----------Draw!----------")
quit()
else:
return True
elif 21 >= player_score > dealer_score:
print(Fore.GREEN + "\n----------You win!----------")
quit()
elif 21 >= dealer_score > player_score:
print(Fore.RED + "\n----------Dealer wins!----------")
quit()
else:
print(Fore.BLUE + "Unexpected situation:", player_score, dealer_score)
quit()
def end_game(player, dealer):
change_aces(player)
change_aces(dealer)
print_scores(player, dealer, False)
while compare_scores(player, dealer):
dealer.extend(draw_cards())
change_aces(dealer)
sleep(1)
print_scores(player, dealer, False)
if validate_score(dealer):
print(Fore.GREEN + "\n----------You win!----------")
quit()
def game():
in_game = True
player = draw_cards(2)
change_aces(player)
dealer = draw_cards(2)
print_scores(player, dealer)
while in_game:
button_draw = Fore.GREEN + "'d'" + Fore.WHITE
button_stand = Fore.GREEN + "'s'" + Fore.WHITE
print(f"Type {button_draw} to draw a card or {button_stand} to stand: ", end='')
user_choice = input().lower().strip()
if user_choice[0] == "d":
player.extend(draw_cards())
change_aces(player)
print_scores(player, dealer)
if validate_score(player):
print(Fore.RED + "\n----------Dealer wins!----------")
quit()
elif user_choice[0] == "s":
end_game(player, dealer)
else:
print(Fore.YELLOW + "\n----------Invalid choice.----------" + Fore.WHITE)
print("""
.------. _ _ _ _ _
|A_ _ |. | | | | | | (_) | |
|( \/ ).-----. | |__ | | __ _ ___| | ___ __ _ ___| | __
| \ /|K /\ | | '_ \| |/ _` |/ __| |/ / |/ _` |/ __| |/ /
| \/ | / \ | | |_) | | (_| | (__| <| | (_| | (__| <
`-----| \ / | |_.__/|_|\__,_|\___|_|\_\ |\__,_|\___|_|\_\\
| \/ K| _/ |
`------' |__/
""")
game()
|
Rikaisan/100-days-of-code
|
python-files/11_blackjack.py
|
11_blackjack.py
|
py
| 5,613 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33153414975
|
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import dgl.function as fn
from dgl.nn.pytorch import edge_softmax
class GCNLayer(nn.Module):
def __init__(self,
in_feats,
out_feats,
activation,
dropout,
bias=True):
super(GCNLayer, self).__init__()
self.weight = nn.Parameter(torch.Tensor(in_feats, out_feats))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_feats))
else:
self.bias = None
self.activation = activation
if dropout:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = 0.
self.reset_parameters()
def reset_parameters(self):
'''uniform init.
'''
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, g, h):
g = g.local_var()
if self.dropout:
h = self.dropout(h)
h = torch.mm(h, self.weight)
# normalization by square root of src degree
h = h * g.ndata['norm']
g.ndata['h'] = h
g.update_all(fn.copy_src(src='h', out='m'),
fn.sum(msg='m', out='h'))
h = g.ndata.pop('h')
# normalization by square root of dst degree
h = h * g.ndata['norm']
# bias
if self.bias is not None:
h = h + self.bias
if self.activation:
h = self.activation(h)
return h
class GATLayer(nn.Module):
r"""Apply `Graph Attention Network <https://arxiv.org/pdf/1710.10903.pdf>`__
over an input signal.
.. math::
h_i^{(l+1)} = \sum_{j\in \mathcal{N}(i)} \alpha_{i,j} W^{(l)} h_j^{(l)}
where :math:`\alpha_{ij}` is the attention score bewteen node :math:`i` and
node :math:`j`:
.. math::
\alpha_{ij}^{l} & = \mathrm{softmax_i} (e_{ij}^{l})
e_{ij}^{l} & = \mathrm{LeakyReLU}\left(\vec{a}^T [W h_{i} \| W h_{j}]\right)
Parameters
----------
in_feats : int
Input feature size.
out_feats : int
Output feature size.
num_heads : int
Number of heads in Multi-Head Attention.
feat_drop : float, optional
Dropout rate on feature, defaults: ``0``.
attn_drop : float, optional
Dropout rate on attention weight, defaults: ``0``.
negative_slope : float, optional
LeakyReLU angle of negative slope.
residual : bool, optional
If True, use residual connection.
activation : callable activation function/layer or None, optional.
If not None, applies an activation function to the updated node features.
Default: ``None``.
"""
def __init__(self,
in_feats,
out_feats,
num_heads,
feat_drop=0.,
attn_drop=0.,
negative_slope=0.2,
residual=False,
activation=None):
super(GATLayer, self).__init__()
self._num_heads = num_heads
self._in_feats = in_feats
self._out_feats = out_feats
self.fc = nn.Linear(in_feats, out_feats * num_heads, bias=False)
self.attn_l = nn.Parameter(torch.FloatTensor(size=(1, num_heads, out_feats)))
self.attn_r = nn.Parameter(torch.FloatTensor(size=(1, num_heads, out_feats)))
self.feat_drop = nn.Dropout(feat_drop)
self.attn_drop = nn.Dropout(attn_drop)
self.leaky_relu = nn.LeakyReLU(negative_slope)
if residual:
if in_feats != out_feats:
self.res_fc = nn.Linear(in_feats, num_heads * out_feats, bias=False)
else:
self.res_fc = lambda x:x
else:
self.register_buffer('res_fc', None)
self.reset_parameters()
self.activation = activation
def reset_parameters(self):
"""Reinitialize learnable parameters."""
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.fc.weight, gain=gain)
nn.init.xavier_normal_(self.attn_l, gain=gain)
nn.init.xavier_normal_(self.attn_r, gain=gain)
if isinstance(self.res_fc, nn.Linear):
nn.init.xavier_normal_(self.res_fc.weight, gain=gain)
def forward(self, graph, feat):
r"""Compute graph attention network layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : torch.Tensor
The input feature of shape :math:`(N, D_{in})` where :math:`D_{in}`
is size of input feature, :math:`N` is the number of nodes.
Returns
-------
torch.Tensor
The output feature of shape :math:`(N, H, D_{out})` where :math:`H`
is the number of heads, and :math:`D_{out}` is size of output feature.
"""
graph = graph.local_var()
h = self.feat_drop(feat)
feat = self.fc(h).view(-1, self._num_heads, self._out_feats)
el = (feat * self.attn_l).sum(dim=-1).unsqueeze(-1)
er = (feat * self.attn_r).sum(dim=-1).unsqueeze(-1)
graph.ndata.update({'ft': feat, 'el': el, 'er': er})
# compute edge attention
graph.apply_edges(fn.u_add_v('el', 'er', 'e'))
e = self.leaky_relu(graph.edata.pop('e'))
# compute softmax
graph.edata['a'] = self.attn_drop(edge_softmax(graph, e))
# message passing
graph.update_all(fn.u_mul_e('ft', 'a', 'm'),
fn.sum('m', 'ft'))
rst = graph.ndata['ft']
# residual
if self.res_fc is not None:
resval = self.res_fc(h).view(h.shape[0], -1, self._out_feats)
rst = rst + resval
# activation
if self.activation:
rst = self.activation(rst)
return rst
def adaptive_message_func(edges):
'''
send data for computing metrics and update.
'''
return {'feat':edges.src['h'],'logits': edges.src['logits']}
def adaptive_attn_message_func(edges):
return {'feat': edges.src['ft']* edges.data['a'],
'logits': edges.src['logits'],
'a': edges.data['a']}
def adaptive_attn_reduce_func(nodes):
# (n_nodes, n_edges, n_classes)
_, pred = torch.max(nodes.mailbox['logits'], dim=2)
_, center_pred = torch.max(nodes.data['logits'], dim=1)
n_degree = nodes.data['degree']
# case 1
# ratio of common predictions
a = nodes.mailbox['a'].squeeze(3) #(n_node, n_neighbor, n_head, 1)
n_head = a.size(2)
idxs = torch.eq(pred, center_pred.unsqueeze(1)).unsqueeze(2).expand_as(a)
f1 = torch.div(torch.sum(a*idxs, dim=1), n_degree.unsqueeze(1)) # (n_node, n_head)
f1 = f1.detach()
# case 2
# entropy of neighborhood predictions
uniq = torch.unique(pred)
# (n_unique)
cnts_p = torch.zeros((pred.size(0), n_head, uniq.size(0),)).cuda()
for i,val in enumerate(uniq):
idxs = torch.eq(pred, val).unsqueeze(2).expand_as(a)
tmp = torch.div(torch.sum(a*idxs, dim=1),n_degree.unsqueeze(1)) # (n_nodes, n_head)
cnts_p[:,:, i] = tmp
cnts_p = torch.clamp(cnts_p, min=1e-5)
f2 = (-1)* torch.sum(cnts_p * torch.log(cnts_p),dim=2)
f2 = f2.detach()
neighbor_agg = torch.sum(nodes.mailbox['feat'], dim=1) #(n_node, n_head, n_feat)
return {
'f1': f1,
'f2':f2,
'agg': neighbor_agg,
}
def adaptive_reduce_func(nodes):
'''
compute metrics and determine if we need to do neighborhood aggregation.
'''
# (n_nodes, n_edges, n_classes)
_, pred = torch.max(nodes.mailbox['logits'], dim=2)
_, center_pred = torch.max(nodes.data['logits'], dim=1)
n_degree = nodes.data['degree']
# case 1
# ratio of common predictions
f1 = torch.sum(torch.eq(pred,center_pred.unsqueeze(1)), dim=1)/n_degree
f1 = f1.detach()
# case 2
# entropy of neighborhood predictions
uniq = torch.unique(pred)
# (n_unique)
cnts_p = torch.zeros((pred.size(0), uniq.size(0),)).cuda()
for i,val in enumerate(uniq):
tmp = torch.sum(torch.eq(pred, val), dim=1)/n_degree
cnts_p[:, i] = tmp
cnts_p = torch.clamp(cnts_p, min=1e-5)
f2 = (-1)* torch.sum(cnts_p * torch.log(cnts_p),dim=1)
f2 = f2.detach()
return {
'f1': f1,
'f2':f2,
}
class GatedAttnLayer(nn.Module):
def __init__(self, g, in_feats, out_feats, activation, dropout, num_heads,
attn_drop=0.,
negative_slope=0.2,lidx=1):
super(GatedAttnLayer, self).__init__()
self._num_heads = num_heads
self._in_feats = in_feats
self._out_feats = out_feats
if in_feats != out_feats:
self.fc = nn.Linear(in_feats, out_feats * num_heads, bias=False) # for first layer
self.feat_drop = nn.Dropout(dropout)
self.attn_drop = nn.Dropout(attn_drop)
self.leaky_relu = nn.LeakyReLU(negative_slope)
self.activation = activation
self.tau_1 = nn.Parameter(torch.zeros((1,)))
self.tau_2 = nn.Parameter(torch.zeros((1,)))
self.ln_1 = nn.LayerNorm((g.number_of_nodes(), num_heads),elementwise_affine=False)
self.ln_2 = nn.LayerNorm((g.number_of_nodes(),num_heads), elementwise_affine=False)
self.reset_parameters(lidx)
def reset_parameters(self, lidx, how='layerwise'):
gain = nn.init.calculate_gain('relu')
if how == 'normal':
nn.init.normal_(self.tau_1)
nn.init.normal_(self.tau_2)
else:
nn.init.constant_(self.tau_1, 1/(lidx+1))
nn.init.constant_(self.tau_2, 1/(lidx+1))
return
def forward(self, g, h, logits, old_z, attn_l, attn_r, shared_tau=True, tau_1=None, tau_2=None):
g = g.local_var()
if self.feat_drop:
h = self.feat_drop(h)
if hasattr(self, 'fc'):
feat = self.fc(h).view(-1, self._num_heads, self._out_feats)
else:
feat = h
g.ndata['h'] = feat # (n_node, n_feat)
g.ndata['logits'] = logits
el = (feat * attn_l).sum(dim=-1).unsqueeze(-1)
er = (feat * attn_r).sum(dim=-1).unsqueeze(-1)
g.ndata.update({'ft': feat, 'el': el, 'er': er})
# compute edge attention
g.apply_edges(fn.u_add_v('el', 'er', 'e'))
e = self.leaky_relu(g.edata.pop('e'))
# compute softmax
g.edata['a'] = self.attn_drop(edge_softmax(g, e))
g.update_all(message_func=adaptive_attn_message_func, reduce_func=adaptive_attn_reduce_func)
f1 = g.ndata.pop('f1')
f2 = g.ndata.pop('f2')
norm_f1 = self.ln_1(f1)
norm_f2 = self.ln_2(f2)
if shared_tau:
z = F.sigmoid((-1)*(norm_f1-tau_1)) * F.sigmoid((-1)*(norm_f2-tau_2))
else:
# tau for each layer
z = F.sigmoid((-1)*(norm_f1-self.tau_1)) * F.sigmoid((-1)*(norm_f2-self.tau_2))
gate = torch.min(old_z, z)
agg = g.ndata.pop('agg')
normagg = agg * g.ndata['norm'].unsqueeze(1) # normalization by tgt degree
if self.activation:
normagg = self.activation(normagg)
new_h = feat + gate.unsqueeze(2)*normagg
return new_h,z
class GatedLayer(nn.Module):
def __init__(self,g,in_feats, out_feats, activation, dropout, lidx=1):
super(GatedLayer, self).__init__()
self.weight_neighbors= nn.Linear(in_feats, out_feats)
self.activation = activation
self.dropout = nn.Dropout(p=dropout)
self.tau_1 = nn.Parameter(torch.zeros((1,)))
self.tau_2 = nn.Parameter(torch.zeros((1,)))
self.ln_1 = nn.LayerNorm((g.number_of_nodes()),elementwise_affine=False)
self.ln_2 = nn.LayerNorm((g.number_of_nodes()), elementwise_affine=False)
self.reset_parameters(lidx)
def reset_parameters(self,lidx, how='layerwise'):
# initialize params
if how == 'normal':
nn.init.normal_(self.tau_1)
nn.init.normal_(self.tau_2)
else:
nn.init.constant_(self.tau_1, 1/(lidx+1))
nn.init.constant_(self.tau_2, 1/(lidx+1))
return
def forward(self, g, h, logits, old_z, shared_tau=True, tau_1=None, tau_2=None):
# operates on a node
g = g.local_var()
if self.dropout:
h = self.dropout(h)
g.ndata['h'] = h
g.ndata['logits'] = logits
g.update_all(message_func=fn.copy_u('logits','logits'), reduce_func=adaptive_reduce_func)
f1 = g.ndata.pop('f1')
f2 = g.ndata.pop('f2')
norm_f1 = self.ln_1(f1)
norm_f2 = self.ln_2(f2)
if shared_tau:
z = F.sigmoid((-1)*(norm_f1-tau_1)) * F.sigmoid((-1)*(norm_f2-tau_2))
else:
# tau for each layer
z = F.sigmoid((-1)*(norm_f1-self.tau_1)) * F.sigmoid((-1)*(norm_f2-self.tau_2))
gate = torch.min(old_z, z)
g.update_all(message_func=fn.copy_u('h','feat'), reduce_func=fn.sum(msg='feat', out='agg'))
agg = g.ndata.pop('agg')
normagg = agg * g.ndata['norm'] # normalization by tgt degree
if self.activation:
normagg = self.activation(normagg)
new_h = h + gate.unsqueeze(1)*normagg
return new_h,z
class GatedAPPNPConv(nn.Module):
r"""Approximate Personalized Propagation of Neural Predictions
layer from paper `Predict then Propagate: Graph Neural Networks
meet Personalized PageRank <https://arxiv.org/pdf/1810.05997.pdf>`__.
.. math::
H^{0} & = X
H^{t+1} & = (1-\alpha)\left(\hat{D}^{-1/2}
\hat{A} \hat{D}^{-1/2} H^{t}\right) + \alpha H^{0}
Parameters
----------
k : int
Number of iterations :math:`K`.
alpha : float
The teleport probability :math:`\alpha`.
edge_drop : float, optional
Dropout rate on edges that controls the
messages received by each node. Default: ``0``.
"""
def __init__(self,
g, k,
n_hidden, n_classes,
edge_drop=0., lidx=1):
super(GatedAPPNPConv, self).__init__()
self._k = k
self.edge_drop = nn.Dropout(edge_drop)
self.tau_1 = nn.Parameter(torch.zeros((1,)))
self.tau_2 = nn.Parameter(torch.zeros((1,)))
self.ln_1 = nn.LayerNorm((g.number_of_nodes()),elementwise_affine=False)
self.ln_2 = nn.LayerNorm((g.number_of_nodes()), elementwise_affine=False)
self.weight_y = nn.Linear(n_hidden, n_classes)
self.reset_parameters(lidx)
def reset_parameters(self,lidx, how='layerwise'):
# initialize params
if how == 'normal':
nn.init.normal_(self.tau_1)
nn.init.normal_(self.tau_2)
else:
nn.init.constant_(self.tau_1, 1/(lidx+1))
nn.init.constant_(self.tau_2, 1/(lidx+1))
return
def forward(self, graph, feat, logits):
r"""Compute APPNP layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : torch.Tensor
The input feature of shape :math:`(N, *)` :math:`N` is the
number of nodes, and :math:`*` could be of any shape.
Returns
-------
torch.Tensor
The output feature of shape :math:`(N, *)` where :math:`*`
should be the same as input shape.
"""
graph = graph.local_var()
norm = torch.pow(graph.in_degrees().float().clamp(min=1), -0.5)
shp = norm.shape + (1,) * (feat.dim() - 1)
norm = torch.reshape(norm, shp).to(feat.device)
feat_0 = feat
z = torch.FloatTensor([1.0,]).cuda()
for lidx in range(self._k):
# normalization by src node
old_z = z
feat = feat * norm
graph.ndata['h'] = feat
old_feat = feat
if lidx != 0:
logits = self.weight_y(feat)
graph.ndata['logits'] = logits
graph.update_all(message_func=fn.copy_u('logits','logits'), reduce_func=adaptive_reduce_func)
f1 = graph.ndata.pop('f1')
f2 = graph.ndata.pop('f2')
norm_f1 = self.ln_1(f1)
norm_f2 = self.ln_2(f2)
z = F.sigmoid((-1)*(norm_f1-self.tau_1)) * F.sigmoid((-1)*(norm_f2-self.tau_2))
gate = torch.min(old_z, z)
graph.edata['w'] = self.edge_drop(
torch.ones(graph.number_of_edges(), 1).to(feat.device))
graph.update_all(fn.u_mul_e('h', 'w', 'm'),
fn.sum('m', 'h'))
feat = graph.ndata.pop('h')
# normalization by dst node
feat = feat * norm
feat = z.unsqueeze(1)* feat + old_feat # raw features
return feat
class GraphTopoAttention(nn.Module):
def __init__(self,
g,
in_dim,
topo_dim,
out_dim,
num_heads,
feat_drop,
attn_drop,
residual=False,
concat=True,
last_layer=False):
super(GraphTopoAttention, self).__init__()
self.g = g
self.num_heads = num_heads
if feat_drop:
self.feat_drop = nn.Dropout(feat_drop)
else:
self.feat_drop = lambda x : x
if attn_drop:
self.attn_drop = nn.Dropout(attn_drop)
else:
self.attn_drop = lambda x : x
# weight matrix Wl for leverage property
if last_layer:
self.fl = nn.Linear(in_dim+topo_dim, out_dim, bias=False)
else:
self.fl = nn.Linear(in_dim, num_heads*out_dim, bias=False)
# weight matrix Wc for aggregation context
self.fc = nn.Parameter(torch.Tensor(size=(in_dim+topo_dim, num_heads*out_dim)))
# weight matrix Wq for neighbors' querying
self.fq = nn.Parameter(torch.Tensor(size=(in_dim, num_heads*out_dim)))
nn.init.xavier_normal_(self.fl.weight.data)
nn.init.constant_(self.fc.data, 10e-3)
nn.init.constant_(self.fq.data, 10e-3)
self.attn_activation = nn.ELU()
self.softmax = edge_softmax
self.residual = residual
if residual:
if in_dim != out_dim:
self.res_fl = nn.Linear(in_dim, num_heads * out_dim, bias=False)
nn.init.xavier_normal_(self.res_fl.weight.data)
else:
self.res_fl = None
self.concat = concat
self.last_layer = last_layer
def forward(self, inputs, topo=None):
# prepare
h = self.feat_drop(inputs) # NxD
if topo:
t = self.feat_drop(topo) #N*T
if not self.last_layer:
ft = self.fl(h).reshape((h.shape[0], self.num_heads, -1)) # NxHxD'
if topo:
ft_c = torch.matmul(torch.cat((h, t), 1), self.fc).reshape((h.shape[0], self.num_heads, -1)) # NxHxD'
else:
ft_c = torch.matmul(h, self.fc).reshape((h.shape[0], self.num_heads, -1)) # NxHxD'
ft_q = torch.matmul(h, self.fq).reshape((h.shape[0], self.num_heads, -1)) # NxHxD'
self.g.ndata.update({'ft' : ft, 'ft_c' : ft_c, 'ft_q' : ft_q})
self.g.apply_edges(self.edge_attention)
self.edge_softmax()
l_s = int(0.713*self.g.edata['a_drop'].shape[0])
topk, _ = torch.topk(self.g.edata['a_drop'], l_s, largest=False, dim=0)
thd = torch.squeeze(topk[-1])
self.g.edata['a_drop'] = self.g.edata['a_drop'].squeeze()
self.g.edata['a_drop'] = torch.where(self.g.edata['a_drop']-thd<0, self.g.edata['a_drop'].new([0.0]), self.g.edata['a_drop'])
attn_ratio = torch.div((self.g.edata['a_drop'].sum(0).squeeze()+topk.sum(0).squeeze()), self.g.edata['a_drop'].sum(0).squeeze())
self.g.edata['a_drop'] = self.g.edata['a_drop'] * attn_ratio
self.g.edata['a_drop'] = self.g.edata['a_drop'].unsqueeze(-1)
self.g.update_all(fn.src_mul_edge('ft', 'a_drop', 'ft'), fn.sum('ft', 'ft'))
ret = self.g.ndata['ft']
if self.residual:
if self.res_fl is not None:
resval = self.res_fl(h).reshape((h.shape[0], self.num_heads, -1)) # NxHxD'
else:
resval = torch.unsqueeze(h, 1) # Nx1xD'
ret = resval + ret
ret = torch.cat((ret.flatten(1), ft.mean(1).squeeze()), 1) if self.concat else ret.flatten(1)
else:
if topo:
ret = self.fl(torch.cat((h, t), 1))
else:
ret = self.fl(h)
return ret
def edge_attention(self, edges):
c = edges.dst['ft_c']
q = edges.src['ft_q'] - c
a = (q * c).sum(-1).unsqueeze(-1)
return {'a': self.attn_activation(a)}
def edge_softmax(self):
attention = self.softmax(self.g, self.g.edata.pop('a'))
self.g.edata['a_drop'] = self.attn_drop(attention)
|
raspberryice/ala-gcn
|
layers.py
|
layers.py
|
py
| 21,424 |
python
|
en
|
code
| 21 |
github-code
|
6
|
42600676142
|
import multiprocessing
import operator
from functools import partial
import numpy as np
from core import mathlib
from core.interact import interact as io
from core.leras import nn
from facelib import FaceType, XSegNet
from models import ModelBase
from samplelib import *
class XSegModel(ModelBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, force_model_class_name='XSeg', **kwargs)
# 覆盖父类方法,用于初始化模型选项
#override
def on_initialize_options(self):
# 检查是否需要重写现有模型
ask_override = self.ask_override()
# 如果不是第一次运行并且用户选择了重写,则重置模型权重并从头开始训练
if not self.is_first_run() and ask_override:
if io.input_bool(f"是否重新开始训练?", False, help_message="重置模型权重并从头开始训练。"):
self.set_iter(0)
# 设置默认选项并加载之前的选项值(如果存在)
default_face_type = self.options['face_type'] = self.load_or_def_option('face_type', 'wf')
default_pretrain = self.options['pretrain'] = self.load_or_def_option('pretrain', False)
# 如果是第一次运行,询问用户选择面部类型
if self.is_first_run():
self.options['face_type'] = io.input_str("请选择面部类型", default_face_type,
['h', 'mf', 'f', 'wf', 'head'],
help_message="选择半脸/中脸/全脸/整个脸部/头部。选择与您的Deepfake模型相同的类型。").lower()
# 如果是第一次运行或用户选择了重写,则询问批处理大小和是否启用预训练模式
if self.is_first_run() or ask_override:
self.ask_batch_size(4, range=[2, 16])
self.options['pretrain'] = io.input_bool("是否启用预训练模式", default_pretrain)
# 如果不是导出模型且启用了预训练模式但未设置预训练数据路径,则引发异常
if not self.is_exporting and (self.options['pretrain'] and self.get_pretraining_data_path() is None):
raise Exception("未定义pretraining_data_path")
# 检查是否只是禁用了预训练模式
self.pretrain_just_disabled = (default_pretrain == True and self.options['pretrain'] == False)
# 覆盖父类方法,用于在初始化模型时设置选项
#override
def on_initialize(self):
device_config = nn.getCurrentDeviceConfig()
self.model_data_format = "NCHW" if self.is_exporting or (
len(device_config.devices) != 0 and not self.is_debug()) else "NHWC"
nn.initialize(data_format=self.model_data_format)
tf = nn.tf
device_config = nn.getCurrentDeviceConfig()
devices = device_config.devices
self.resolution = resolution = 256
# 根据用户选择的面部类型设置面部类型('h'、'mf'、'f'、'wf' 或 'head')
self.face_type = {'h': FaceType.HALF,
'mf': FaceType.MID_FULL,
'f': FaceType.FULL,
'wf': FaceType.WHOLE_FACE,
'head': FaceType.HEAD}[self.options['face_type']]
# 确定是否将模型放置在CPU上
place_model_on_cpu = len(devices) == 0
models_opt_device = '/CPU:0' if place_model_on_cpu else nn.tf_default_device_name
# 定义输入图像和掩码的形状
bgr_shape = nn.get4Dshape(resolution, resolution, 3)
mask_shape = nn.get4Dshape(resolution, resolution, 1)
# 初始化模型类
self.model = XSegNet(name='XSeg',
resolution=resolution,
load_weights=not self.is_first_run(),
weights_file_root=self.get_model_root_path(),
training=True,
place_model_on_cpu=place_model_on_cpu,
optimizer=nn.RMSprop(lr=0.0001, lr_dropout=0.3, name='opt'),
data_format=nn.data_format)
# 设置预训练模式(如果需要)
self.pretrain = self.options['pretrain']
if self.pretrain_just_disabled:
self.set_iter(0)
if self.is_training:
# 调整批处理大小以适应多个GPU
gpu_count = max(1, len(devices))
bs_per_gpu = max(1, self.get_batch_size() // gpu_count)
self.set_batch_size(gpu_count * bs_per_gpu)
# 计算每个GPU的损失
gpu_pred_list = []
gpu_losses = []
gpu_loss_gvs = []
for gpu_id in range(gpu_count):
with tf.device(f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0'):
with tf.device(f'/CPU:0'):
# 在CPU上切片,否则所有批次数据将首先传输到GPU
batch_slice = slice(gpu_id * bs_per_gpu, (gpu_id + 1) * bs_per_gpu)
gpu_input_t = self.model.input_t[batch_slice, :, :, :]
gpu_target_t = self.model.target_t[batch_slice, :, :, :]
# 处理模型张量
gpu_pred_logits_t, gpu_pred_t = self.model.flow(gpu_input_t, pretrain=self.pretrain)
gpu_pred_list.append(gpu_pred_t)
if self.pretrain:
# 结构损失
gpu_loss = tf.reduce_mean(
5 * nn.dssim(gpu_target_t, gpu_pred_t, max_val=1.0, filter_size=int(resolution / 11.6)),
axis=[1])
gpu_loss += tf.reduce_mean(
5 * nn.dssim(gpu_target_t, gpu_pred_t, max_val=1.0, filter_size=int(resolution / 23.2)),
axis=[1])
# 像素损失
gpu_loss += tf.reduce_mean(10 * tf.square(gpu_target_t - gpu_pred_t), axis=[1, 2, 3])
else:
gpu_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=gpu_target_t, logits=gpu_pred_logits_t),
axis=[1, 2, 3])
gpu_losses += [gpu_loss]
gpu_loss_gvs += [nn.gradients(gpu_loss, self.model.get_weights())]
# 平均损失和梯度,并创建优化器更新操作
with tf.device(models_opt_device):
pred = tf.concat(gpu_pred_list, 0)
loss = tf.concat(gpu_losses, 0)
loss_gv_op = self.model.opt.get_update_op(nn.average_gv_list(gpu_loss_gvs))
# 初始化训练和查看函数
if self.pretrain:
def train(input_np, target_np):
l, _ = nn.tf_sess.run([loss, loss_gv_op], feed_dict={self.model.input_t: input_np, self.model.target_t: target_np})
return l
else:
def train(input_np, target_np):
l, _ = nn.tf_sess.run([loss, loss_gv_op], feed_dict={self.model.input_t: input_np, self.model.target_t: target_np})
return l
self.train = train
def view(input_np):
return nn.tf_sess.run([pred], feed_dict={self.model.input_t: input_np})
self.view = view
# 初始化样本生成器
cpu_count = min(multiprocessing.cpu_count(), 8)
src_dst_generators_count = cpu_count // 2
src_generators_count = cpu_count // 2
dst_generators_count = cpu_count // 2
if self.pretrain:
pretrain_gen = SampleGeneratorFace(self.get_pretraining_data_path(), debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=True),
output_sample_types=[{'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'warp': True, 'transform': True, 'channel_type': SampleProcessor.ChannelType.BGR, 'face_type': self.face_type, 'data_format': nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'warp': True, 'transform': True, 'channel_type': SampleProcessor.ChannelType.G, 'face_type': self.face_type, 'data_format': nn.data_format, 'resolution': resolution},
],
uniform_yaw_distribution=False,
generators_count=cpu_count)
self.set_training_data_generators([pretrain_gen])
else:
srcdst_generator = SampleGeneratorFaceXSeg([self.training_data_src_path, self.training_data_dst_path],
debug=self.is_debug(),
batch_size=self.get_batch_size(),
resolution=resolution,
face_type=self.face_type,
generators_count=src_dst_generators_count,
data_format=nn.data_format)
src_generator = SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=False),
output_sample_types=[{'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'warp': False, 'transform': False, 'channel_type': SampleProcessor.ChannelType.BGR, 'border_replicate': False, 'face_type': self.face_type, 'data_format': nn.data_format, 'resolution': resolution},
],
generators_count=src_generators_count,
raise_on_no_data=False)
dst_generator = SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=False),
output_sample_types=[{'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'warp': False, 'transform': False, 'channel_type': SampleProcessor.ChannelType.BGR, 'border_replicate': False, 'face_type': self.face_type, 'data_format': nn.data_format, 'resolution': resolution},
],
generators_count=dst_generators_count,
raise_on_no_data=False)
self.set_training_data_generators([srcdst_generator, src_generator, dst_generator])
# 覆盖父类方法,返回模型文件名列表
#override
def get_model_filename_list(self):
return self.model.model_filename_list
# 覆盖父类方法,在保存时触发保存模型权重的操作
#override
def onSave(self):
self.model.save_weights()
# 覆盖父类方法,在每个训练迭代中触发训练操作
#override
def onTrainOneIter(self):
image_np, target_np = self.generate_next_samples()[0]
loss = self.train(image_np, target_np)
return (('loss', np.mean(loss)), )
# 覆盖父类方法,获取预览图像
#override
def onGetPreview(self, samples, for_history=False):
n_samples = min(4, self.get_batch_size(), 800 // self.resolution)
if self.pretrain:
srcdst_samples, = samples
image_np, mask_np = srcdst_samples
else:
srcdst_samples, src_samples, dst_samples = samples
image_np, mask_np = srcdst_samples
I, M, IM, = [ np.clip(nn.to_data_format(x, "NHWC", self.model_data_format), 0.0, 1.0) for x in ([image_np, mask_np] + self.view(image_np)) ]
M, IM, = [ np.repeat(x, (3,), -1) for x in [M, IM] ]
green_bg = np.tile(np.array([0, 1, 0], dtype=np.float32)[None, None, ...], (self.resolution, self.resolution, 1))
result = []
st = []
for i in range(n_samples):
if self.pretrain:
ar = I[i], IM[i]
else:
ar = I[i] * M[i] + 0.5 * I[i] * (1 - M[i]) + 0.5 * green_bg * (1 - M[i]), IM[i], I[i] * IM[i] + 0.5 * I[i] * (1 - IM[i]) + 0.5 * green_bg * (1 - IM[i])
st.append(np.concatenate(ar, axis=1))
result += [('XSeg training faces', np.concatenate(st, axis=0)), ]
if not self.pretrain and len(src_samples) != 0:
src_np, = src_samples
D, DM, = [ np.clip(nn.to_data_format(x, "NHWC", self.model_data_format), 0.0, 1.0) for x in ([src_np] + self.view(src_np)) ]
DM, = [ np.repeat(x, (3,), -1) for x in [DM] ]
st = []
for i in range(n_samples):
ar = D[i], DM[i], D[i] * DM[i] + 0.5 * D[i] * (1 - DM[i]) + 0.5 * green_bg * (1 - DM[i])
st.append(np.concatenate(ar, axis=1))
result += [('XSeg src faces', np.concatenate(st, axis=0)), ]
if not self.pretrain and len(dst_samples) != 0:
dst_np, = dst_samples
D, DM, = [ np.clip(nn.to_data_format(x, "NHWC", self.model_data_format), 0.0, 1.0) for x in ([dst_np] + self.view(dst_np)) ]
DM, = [ np.repeat(x, (3,), -1) for x in [DM] ]
st = []
for i in range(n_samples):
ar = D[i], DM[i], D[i] * DM[i] + 0.5 * D[i] * (1 - DM[i]) + 0.5 * green_bg * (1 - DM[i])
st.append(np.concatenate(ar, axis=1))
result += [('XSeg dst faces', np.concatenate(st, axis=0)), ]
return result
# 导出模型到ONNX格式
def export_dfm(self):
output_path = self.get_strpath_storage_for_file(f'model.onnx')
io.log_info(f'Dumping .onnx to {output_path}')
tf = nn.tf
with tf.device(nn.tf_default_device_name):
input_t = tf.placeholder(nn.floatx, (None, self.resolution, self.resolution, 3), name='in_face')
input_t = tf.transpose(input_t, (0, 3, 1, 2))
_, pred_t = self.model.flow(input_t)
pred_t = tf.transpose(pred_t, (0, 2, 3, 1))
tf.identity(pred_t, name='out_mask')
output_graph_def = tf.graph_util.convert_variables_to_constants(
nn.tf_sess,
tf.get_default_graph().as_graph_def(),
['out_mask']
)
import tf2onnx
with tf.device("/CPU:0"):
model_proto, _ = tf2onnx.convert._convert_common(
output_graph_def,
name='XSeg',
input_names=['in_face:0'],
output_names=['out_mask:0'],
opset=13,
output_path=output_path)
Model = XSegModel
|
ccvvx1/Python_Df
|
models/Model_XSeg/Model.py
|
Model.py
|
py
| 15,453 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27519489621
|
from django.shortcuts import (render, get_object_or_404,
get_list_or_404, redirect, HttpResponse)
from .models import Total
from .serializer import TotalSerializer, Serializer # , UserSerializer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, logout, login
from .forms import RegisterForm, LoginForm, ProfileForm
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.mail import send_mail
from django.conf import settings
from django.contrib.auth.decorators import login_required
# from total.decorators import add_get_request
# from django.views.decorators.http import require_http_methods
# Create your views here.
def home(request):
return render(request, 'index.html', {})
def contact(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
message = request.POST['message']
print(name, email, message)
send_mail(subject='API message', message=message,
from_email=email, recipient_list=['[email protected]'])
messages.success(request, 'Message sent Successfully')
return redirect('home')
else:
return render(request, 'index.html', {})
class endpoints(APIView):
def get(self, request):
return Response([
{"endpoint": 'description',
"api/v2/": 'endpoints home'},
{"register": 'Page to register user'},
{"login": 'Login Page, to get token after login'},
{"login/username=<username>&password=<password>/":
'''a GET reqest to this endpoint with a registered users
username & pasword return the token for the user'''},
{"api/v2/all/token": 'return all data from the beginning of corona virus till today'},
{"api/v2/today/token": 'return the data for today'},
{"api/v2/dates/2020-10-1:2020-11-10:2020-12-10/token":
'return the data for the three dates seperated by :'},
{"api/v2/from/2020-22-10/token": 'return the datas starting from 2020-22-10', },
{"api/v2/yesterday/token": 'return the data for yesterday'},
{"api/v2/date/2020-10-20/token": 'return the data for the specify date'},
])
def login_user(request):
next = request.GET.get('next')
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return render(request, 'login.html', {})
def logout_user(request):
logout(request)
return redirect('home')
@login_required
def profile(request):
user = request.user
return render(request, 'profile.html', {'user': user})
def register_user(request):
new_user = None
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
new_user = form.save(commit=False)
new_user.set_password(password)
new_user.save()
Token.objects.create(user=new_user)
messages.info(request, 'registration successfull, Login First')
return redirect('login')
# return render(request, 'register_success.html',
# {'new_user': new_user})
else:
form = RegisterForm()
return render(request, 'register.html', {'form': form})
class LoginView(APIView):
permission_classes = ()
def post(self, request, username, password):
username = username
password = password
# username = request.data.get('username')
# password = request.data.get('password')
user = authenticate(username=username, password=password)
if user:
return Response({"token": user.auth_token.key})
else:
return Response({"error": "wrong credentials"})
class TotalListView(APIView):
'''
This will return all datas from the commencement of Corona Virus till today
'''
# permission_classes = (IsAuthenticated,)
def get(self, request, token):
try:
user = get_object_or_404(User, auth_token=token)
except Exception as DoesNotExist:
user = None
print(user)
if user:
obj = Total.objects.all()
# lookup_field = 'hello'
data = TotalSerializer(obj, many=True).data
return Response(data)
else:
return Response({'error': 'Invalid Token'})
class GetDateView(APIView):
def get(self, request, day, token):
try:
user = get_object_or_404(User, auth_token=token)
except Exception as DoesNotExist:
user = None
if user:
obj = get_object_or_404(Total, day=day)
data = Serializer(obj).data
return Response(data)
else:
return Response({'error': 'Invalid Token'})
class GetFromDate(APIView):
def get(self, request, day, token):
try:
user = get_object_or_404(User, auth_token=token)
except Exception as DoesNotExist:
user = None
if user:
q1 = get_object_or_404(Total, day=day).pk
q = Total.objects.filter(id__gte=q1)
# obj = get_list_or_404(q)
data = Serializer(q, many=True).data
return Response(data)
else:
return Response({'error': 'Invalid Token'})
class GetFirstOccurence(APIView):
'''
Will return the day of the first occurence of Covid19 in Nigeria
'''
def get(self, request, token):
try:
user = get_object_or_404(User, auth_token=token)
except Exception as DoesNotExist:
user = None
if user:
obj = Total.objects.all().filter(confirmed=1)
data = Serializer(obj[0]).data
# print(obj)
return Response(data)
else:
return Response({'error': 'Invalid Token'})
class GetToday(APIView):
def get(self, request, token):
try:
user = get_object_or_404(User, auth_token=token)
except Exception as DoesNotExist:
user = None
if user:
query = Total.objects.all()
obj = query[0]
data = Serializer(obj).data
return Response(data)
else:
return Response({'error': 'Invalid Token'})
class GetYesterday(APIView):
def get(self, request, token):
try:
user = get_object_or_404(User, auth_token=token)
except Exception as DoesNotExist:
user = None
if user:
query = Total.objects.all().order_by('id')
obj = query[len(query) - 2]
data = Serializer(obj).data
return Response(data)
else:
return Response({'error': 'Invalid Token'})
class GetSepDate(APIView):
def get(self, request, days, token):
try:
user = get_object_or_404(User, auth_token=token)
except Exception as DoesNotExist:
user = None
if user:
d1 = days.split(':')[0]
d2 = days.split(':')[1]
d3 = days.split(':')[2]
print(d1, d2, d3, days)
obj = Total.objects.filter(day__in=[d1, d2, d3])
print(obj,)
data = Serializer(obj, many=True).data
return Response(data)
else:
return Response({'error': 'Invalid Token'})
@login_required
def edit_profile(request):
if request.method == 'POST':
form = ProfileForm(data=request.POST, instance=request.user)
if form.is_valid():
form.save()
return redirect('profile')
else:
messages.warning(request, 'Error occured')
else:
form = ProfileForm(instance=request.user)
return render(request, 'edit_profile.html', {'form': form})
|
Afeez1131/CovidNg-2021
|
total/views.py
|
views.py
|
py
| 8,441 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4880169931
|
#!/usr/bin/env bash
"""true" '''\'
set -e
eval "$(${CONDA_EXE:-conda} shell.bash hook)"
conda deactivate
conda activate audio-lessons
exec python "$0" "$@"
exit $?
''"""
import re
from re import Match
from chrutils import ced2mco
def main() -> None:
in_file: str = "data/cll2-v1-vocab-list-ced.txt"
out_file: str = "data/cll2-v1-vocab-list-mco.txt"
with open(in_file, "r") as r:
with open(out_file, "w") as w:
for line in r:
if not line.strip():
continue
# line = line.replace("\u00a0", " ")
if "[" in line:
matches: list[str] = re.findall("\\[.*?]", line)
if not matches:
continue
for match in matches:
mco_text = ced2mco(match)
line = line.replace(match, mco_text)
if "(" in line:
matches: list[str] = re.findall("\\(.*?\\)", line)
if not matches:
continue
for match in matches:
mco_text = ced2mco(match)
line = line.replace(match, mco_text)
w.write(line)
pass
if __name__ == '__main__':
main()
|
CherokeeLanguage/audio-lessons-generator-python
|
fix_cll2_v1_vocab_list.py
|
fix_cll2_v1_vocab_list.py
|
py
| 1,306 |
python
|
en
|
code
| 2 |
github-code
|
6
|
5809207089
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:biovectors]
# language: python
# name: conda-env-biovectors-py
# ---
# # Statistical Test for Multi-Model Variation
# After confirming that aligning multiple word2vec models is a success [03_multi_model_alignment_check.ipynb](03_multi_model_alignment_check.ipynb), the next step is to construct a metric that accounts for intra and inter year variation.
#
# Typically, the way to compare words words is to use cosine distance, which measures the distance between two vectors by looking at the angle between two vectors.
# A more common name for this would be [cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity); however, the difference here is that cosine distance shifts the range from -1 to 1 to 0 to 2 (1 - cosine similarity).
#
# Regarding this project, I'm using cosine distance to see how a word changes across time.
# I based this comparison off of two metrics defined by authors in [this paper](http://arxiv.org/abs/1606.02821).
# - Global distance is defined as the cosine distance between words in year with their second year counterparts
# - Local distance is defined as the cosine distance of a word's similarity to its neighbors across time (no longer used)
# +
# %load_ext autoreload
# %autoreload 2
from collections import Counter
import csv
import copy
import itertools
import math
from pathlib import Path
import random
import re
from gensim.models import Word2Vec, KeyedVectors
from joblib import Parallel, delayed
import numpy as np
import pandas as pd
import plotnine as p9
import plydata as ply
import plydata.tidy as ply_tdy
import scipy.stats as stats
import tqdm
from biovectors_modules.word2vec_analysis_helper import align_word2vec_models
# -
# Method only used for this notebook
def return_global_plot(year_model, tok="are", limits=(0, 1), inter_or_intra="intra"):
g = (
p9.ggplot(
year_model >> ply.query(f"tok=='{tok}'"),
p9.aes(x="year", y="global_distance"),
)
+ p9.geom_boxplot()
+ p9.labs(
title=f"{inter_or_intra.capitalize()} Year global Distance for Token: '{tok}'"
)
+ p9.coord_flip()
+ p9.scale_y_continuous(limits=limits)
+ p9.theme_seaborn(style="white")
)
return g
# # Grab a listing of all word models
word_models = list(Path("output/models").rglob("*model"))
word_models = sorted(word_models, key=lambda x: x.stem)
word_model_filter = list(filter(lambda x: "2021" not in x.stem, word_models))
alignment_base_model = Word2Vec.load(str(word_model_filter[-1]))
temp_output_path = Path("output/aligned_vectors_tmp")
for model_file in tqdm.tqdm(word_model_filter):
if not Path(f"{str(temp_output_path)}/{model_file.stem}.kv").exists():
word_model = Word2Vec.load(str(model_file))
aligned_model = align_word2vec_models(alignment_base_model.wv, word_model.wv)
aligned_model.save(f"{str(temp_output_path)}/{model_file.stem}.kv")
# # Inter and Intra Variation calculation
# Refer to the following scripts in order to perform inter and intra word2vec calculations:
# 1. [pmacs_cluster_running_inter_model_variation.py](pmacs_cluster_running_inter_model_variation.py)
# 2. [pmacs_cluster_running_intra_model_variation.py](pmacs_cluster_running_intra_model_variation.py)
# # Are word2vec models unstable?
# Due to the nature of negative sampling, word2vec models generat weights arbitrarily.
# This is undesired as a token in the year 2000 cannot be compared with a token in 2001.
# A solution is to use orthogonal procrustes to align word2vec models; however, variation could still remain in these word models.
# To measure this variation I trained 10 unique word2vec models on abstracts for each given year and then calculated global and local distances between every word2vec model pair (10 choose 2).
# From there I analyzed variation within each year (term intra-year variation).
# ## Intra Model Calculations
intra_year_models = []
for idx, file in enumerate(Path("output/intra_models").rglob("*.tsv.xz")):
intra_year_model_df = pd.read_csv(
str(file), sep="\t", na_filter=False
) >> ply_tdy.extract("year_pair", into="year", regex=r"(\d+)_", convert=True)
intra_year_models.append(intra_year_model_df)
if Path(
f"output/averaged_intra_models/average_{str(Path(file.stem).stem)}.tsv"
).exists():
continue
averaged_intra_year_models = dict()
for idx, row in tqdm.tqdm(
intra_year_model_df.iterrows(), desc=f"intra_df: {str(file)}"
):
if (row["tok"], int(row["year"])) not in averaged_intra_year_models:
averaged_intra_year_models[(row["tok"], int(row["year"]))] = dict(
global_distance=[], local_distance=[]
)
averaged_intra_year_models[(row["tok"], int(row["year"]))][
"global_distance"
].append(row["global_distance"])
averaged_intra_year_models[(row["tok"], int(row["year"]))][
"local_distance"
].append(row["local_distance"])
with open(
f"output/averaged_intra_models/average_{str(Path(file.stem).stem)}.tsv", "w"
) as outfile:
fieldnames = [
"average_global_distance",
"average_local_distance",
"var_global_distance",
"var_local_distance",
"tok",
"year",
]
writer = csv.DictWriter(outfile, fieldnames=fieldnames, delimiter="\t")
writer.writeheader()
for tok, year in tqdm.tqdm(
averaged_intra_year_models, desc=f"summary_intra_writer: {str(file.stem)}"
):
writer.writerow(
{
"average_global_distance": np.mean(
averaged_intra_year_models[(tok, year)]["global_distance"]
),
"var_global_distance": np.var(
averaged_intra_year_models[(tok, year)]["global_distance"]
),
"average_local_distance": np.mean(
averaged_intra_year_models[(tok, year)]["local_distance"]
),
"var_local_distance": np.var(
averaged_intra_year_models[(tok, year)]["local_distance"]
),
"tok": tok,
"year": year,
}
)
intra_year_models = pd.concat(intra_year_models)
intra_year_models.year = pd.Categorical(intra_year_models.year.tolist())
intra_year_models.head()
return_global_plot(intra_year_models, limits=(0, 0.1))
return_global_plot(intra_year_models, "privacy", limits=(0, 0.5))
return_global_plot(intra_year_models, "rna", limits=(0, 0.5))
# ## Inter Model Calculations
for idx, file in enumerate(Path("output/inter_models/on_years").rglob("*.tsv.xz")):
average_file_name = f"output/averaged_inter_models/average_{str(Path(file).stem)}"
if Path(average_file_name).exists():
continue
inter_year_model_df = pd.read_csv(
str(file), sep="\t", na_filter=False
) >> ply_tdy.extract(
"year_pair", into=["year1", "year2"], regex=r"(\d+)_\d-(\d+)_\d", convert=True
)
averaged_inter_year_models = dict()
for idx, row in tqdm.tqdm(
inter_year_model_df.iterrows(), desc=f"inter_df {str(Path(file).stem)}"
):
if (
row["tok"],
int(row["year1"]),
int(row["year2"]),
) not in averaged_inter_year_models:
averaged_inter_year_models[
(row["tok"], int(row["year1"]), int(row["year2"]))
] = dict(global_distance=[], local_distance=[])
averaged_inter_year_models[(row["tok"], int(row["year1"]), int(row["year2"]))][
"global_distance"
].append(row["global_distance"])
with open(average_file_name, "w") as outfile:
fieldnames = [
"average_global_distance",
"var_global_distance",
"tok",
"year1",
"year2",
]
writer = csv.DictWriter(outfile, fieldnames=fieldnames, delimiter="\t")
writer.writeheader()
for tok, year1, year2 in tqdm.tqdm(
averaged_inter_year_models, desc="summary_inter_writer"
):
writer.writerow(
{
"average_global_distance": np.mean(
averaged_inter_year_models[(tok, year1, year2)][
"global_distance"
]
),
"var_global_distance": np.var(
averaged_inter_year_models[(tok, year1, year2)][
"global_distance"
]
),
"tok": tok,
"year1": year1,
"year2": year2,
}
)
# # Custom Statistic that accounts for Inter and Intra Variation
# I needed to figure out a metric to take in inter-year (between years) and intra-year(within year variation).
# Turns out population genetics developed a statistic that accounts for genetic variation between populations and with in populations (termed $Q_{st}$).
# This metric is calculated via this equation: $$Q_{st}= \frac{Variation_{between}}{Variation_{between} + 2*Variation_{within}}$$
#
# Translating this equation into my field, population is the same as a group of word2vec models trained on abstracts for a given year.
# Each "year" has it's own variation (intra) along with variation across years (inter), so the idea here is to try and capture this instability.
#
# Using the equation above as inspiration, I devise a custom equation below.
#
# First have to define the distance mapping function:
# Let distance be cosine distance: $$ distance(w_{x}, w_{y}) = cos\_dist(w_{x}, w_{y})$$ where $$ 0 \leq cos\_dist(w_{x}, w_{y}) \leq 2$$
#
# Values close to 2 signify completely opposite word contexts, while values close to 0 signify same word context.
#
# Every publication year has ten models. I took the average distance of every model combination for a given year to calculate the intra year variation for each given word.
# E.g. year 2000 has 10 choose 2 options so for every combination pair I calculated the distance above and then averaged over all years.
# For inter year I just performed the cartesian product of all models between years and then perform the same average approach above.
# Now assume each distance is averaged, we get the following equation:
#
# $$\hat{Distance} = \frac{Distance_{inter\_year(x,y)}}{Distance_{inter\_year(x,y)} + Distance_{intra\_year(x)} + Distance_{intra\_year(y)}}$$
#
# Where x and y are a particular year and $x \neq y$.
# If $x = y$ then this estimate would be 1.
#
# However, I cant use this metric for bayesian changepoint detection as this metric would be completely dominated by
# the frequency ratio metric.
# In other words the above metric is bound between 0 and 1, while the frequency ratio is bounded between 0 and infinity.
# Therefore, the change metric heavily depends on frequency to work. This is bad as there are words that have undergone a semantic change, but have yet to have a change in frequency to detect said change (e.g. increase).
#
# To account for this I'm using the following metric instead:
# $$\hat{Distance} = \frac{Distance_{inter\_year(x,y)}}{Distance_{intra\_year(x)} + Distance_{intra\_year(y)}}$$
intra_year_averaged = pd.concat(
[
pd.read_csv(str(file), sep="\t", na_filter=False)
for file in Path("output/averaged_intra_models").rglob("*.tsv")
]
)
intra_year_averaged.head()
tok_intra_year = dict()
for idx, row in tqdm.tqdm(intra_year_averaged.iterrows()):
tok_intra_year[(row["tok"], row["year"])] = {
"global": row["average_global_distance"],
"local": row["average_local_distance"],
}
inter_model_files = list(Path("output/averaged_inter_models").rglob("*tsv"))
unique_years = set(
list(map(lambda x: int(re.search(r"(\d+)", x.stem).groups()[0]), inter_model_files))
)
len(unique_years)
for year in unique_years:
if Path(
f"output/combined_inter_intra_distances/saved_{year}-{year+1}_distance.tsv"
).exists():
print(f"{year}-{year+1} exists!")
continue
inter_year_models_averaged = pd.concat(
[
pd.read_csv(str(file), sep="\t", na_filter=False)
for file in filter(
lambda x: int(re.search(r"(\d+)", x.stem).group(0)) == year,
Path("output/averaged_inter_models").rglob(f"*{year}*.tsv"),
)
]
)
data = []
already_seen = set()
for idx, row in tqdm.tqdm(inter_year_models_averaged.iterrows()):
# Inter year variation
global_inter_top = row["average_global_distance"]
# local_inter_top = row["average_local_distance"]
if (row["tok"], int(row["year1"])) not in tok_intra_year or (
row["tok"],
int(row["year2"]),
) not in tok_intra_year:
continue
# global intra year variation
global_intra_bottom = (
tok_intra_year[(row["tok"], int(row["year1"]))]["global"]
+ tok_intra_year[(row["tok"], int(row["year2"]))]["global"]
)
global_distance_qst = global_inter_top / (
global_inter_top + global_intra_bottom
)
data.append(
{
"tok": row["tok"],
"original_global_distance": global_inter_top,
"global_distance_qst": global_distance_qst,
"ratio_metric": global_inter_top / global_intra_bottom,
"year_1": row["year1"],
"year_2": row["year2"],
}
)
(
pd.DataFrame.from_records(data)
>> ply.call(
".to_csv",
f"output/combined_inter_intra_distances/saved_{year}-{year+1}_distance.tsv",
sep="\t",
index=False,
)
)
|
greenelab/biovectors
|
multi_model_experiment/04_novel_distance_calculations.py
|
04_novel_distance_calculations.py
|
py
| 14,275 |
python
|
en
|
code
| 3 |
github-code
|
6
|
33680361650
|
from django.urls import path, include
from rest_framework import routers
from .views import (
IndexView,
DetailView,
ResultsView,
vote,
QuestionViewSet,
ChoiceViewSet,
)
router = routers.DefaultRouter()
router.register(r"questions", QuestionViewSet)
router.register(r"choices", ChoiceViewSet)
app_name = "polls"
urlpatterns = [
path("", IndexView.as_view(), name="index"),
path("<int:pk>/", DetailView.as_view(), name="detail"),
path("<int:pk>/results", ResultsView.as_view(), name="results"),
path("<int:question_id>/vote", vote, name="vote"),
path("api/", include(router.urls)),
]
|
orvisevans/django-vue-site
|
backend/polls/urls.py
|
urls.py
|
py
| 630 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74793669627
|
class HeaderRepository:
def __init__(self):
self._type_dict = {
"CONNECT": "0x1",
"CONNACK": "0x2",
"PUBLISH": "0x3",
"PUBREC": "0x4",
"PUBREL": "0x5",
"PUBCOMP": "0x6",
"SUBSCRIBE": "0x7",
"SUBACK": "0x8",
"UNSUBSCRIBE": "0x9",
"UNSUBACK": "0xA",
"PINGREQ": "0xB",
"PINGRESP": "0xC",
"DISCONNECT": "0xD",
"AUTH": "0xE"
}
self._flags_dict = {
"CONNECT": "0x0",
"CONNACK": "0x0",
"PUBLISH": "0x0",
"PUBREC": "0x0",
"PUBREL": "0x2",
"PUBCOMP": "0x0",
"SUBSCRIBE": "0x2",
"SUBACK": "0x0",
"UNSUBSCRIBE": "0x2",
"UNSUBACK": "0x0",
"PINGREQ": "0x0",
"PINGRESP": "0x0",
"DISCONNECT": "0x0",
"AUTH": "0x0"
}
self._reversed_type_dict = {
1: "CONNECT",
2: "CONNACK",
3: "PUBLISH",
4: "PUBREC",
5: "PUBREL",
6: "PUBCOMP",
7: "SUBSCRIBE",
8: "SUBACK",
9: "UNSUBSCRIBE",
10: "UNSUBACK",
11: "PINGREQ",
12: "PINGRESP",
13: "DISCONNECT",
14: "AUTH"
}
def get_flag(self, header_type: str) -> str:
return self._flags_dict[header_type]
def get_type(self, header_type: str) -> str:
return self._type_dict[header_type]
def get_type_from(self, integer: int) -> str:
return self._reversed_type_dict[integer]
|
BigKahuna7385/mqttBroker
|
Utils/HeaderRepository.py
|
HeaderRepository.py
|
py
| 1,684 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26804363991
|
input = __import__("sys").stdin.readline
num_wiz, num_duels = [int(data) for data in input().split()]
graph = [[] for _ in range(num_wiz + 1)]
for _ in range(num_duels):
adj_vertex, vertex = [int(data) for data in input().split()]
graph[vertex].append(adj_vertex)
visited = set()
endpoint = [0] * (num_wiz + 1)
if len(graph[1]) == 0:
endpoint[1] = 1
queue = [1]
while queue:
vertex = queue.pop(0)
for adj_vertex in graph[vertex]:
edge = (vertex, adj_vertex)
if not edge in visited:
queue.append(adj_vertex)
visited.add(edge)
endpoint[adj_vertex] = 1
print("".join(str(data) for data in endpoint[1:]))
|
Stevan-Zhuang/DMOJ
|
COCI/COCI '18 Contest 4 #2 Wand.py
|
COCI '18 Contest 4 #2 Wand.py
|
py
| 678 |
python
|
en
|
code
| 1 |
github-code
|
6
|
1245326187
|
import pandas as pd
import pyranges as pr
import re
def extract_dna_id(filename):
pattern = "genomics\/3_vcf\/.*\/(.*)\/.*"
dna_id = re.search(pattern, filename).group(1)
return dna_id
df_anno = pd.read_csv(snakemake.input['sample_anno'], sep='\t')
_df_anno = df_anno[
~df_anno['DNA_VCF_FILE'].isna()
]
_df_anno['VCF_ID'] = _df_anno.apply(lambda x: extract_dna_id(x['DNA_VCF_FILE']), axis=1)
df_anno = df_anno.set_index('RNA_ID').join(_df_anno.set_index('RNA_ID')['VCF_ID']).reset_index()
df_anno.to_csv(snakemake.output['sample_anno_updated'], sep='\t')
|
gagneurlab/AbSplice_analysis
|
workflow/scripts/als/junction_annotation/correct_vcf_id_DROP_anno.py
|
correct_vcf_id_DROP_anno.py
|
py
| 574 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32645650527
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains functions related with Maya tag functionality for ueGear.
"""
from __future__ import print_function, division, absolute_import
import maya.cmds as cmds
import maya.api.OpenMaya as OpenMaya
from mgear.uegear import utils, log
logger = log.uegear_logger
TAG_ASSET_GUID_ATTR_NAME = "ueGearAssetGuid"
TAG_ASSET_TYPE_ATTR_NAME = "ueGearAssetType"
TAG_ASSET_NAME_ATTR_NAME = "ueGearAssetName"
TAG_ASSET_PATH_ATTR_NAME = "ueGearAssetPath"
TAG_ACTOR_NAME_ATTR_NAME = "ueGearActorName"
ALL_TAGS_ATTR_NAMES = [
TAG_ASSET_GUID_ATTR_NAME,
TAG_ASSET_TYPE_ATTR_NAME,
TAG_ASSET_NAME_ATTR_NAME,
TAG_ASSET_PATH_ATTR_NAME,
TAG_ACTOR_NAME_ATTR_NAME,
]
class TagTypes(object):
"""
Class that holds all available tag types.
"""
Skeleton = "skeleton"
StaticMesh = "staticmesh"
SkeletalMesh = "skeletalmesh"
Camera = "camera"
Alembic = "alembic"
MetahumanBody = "metahumanbody"
MetahumanFace = "metahumanface"
Sequence = "sequence"
def auto_tag(node=None, remove=False):
"""
Automatically tags given (or current selected nodes) so ueGear exporter can identify how to export the specific
nodes.
:param str or list(str) or None node: node/s to tag.
:param bool remove: if True tag will be removed.
"""
nodes = utils.force_list(node or cmds.ls(sl=True, long=True))
for node in nodes:
found_skin_clusters = utils.get_skin_clusters_for_node(node)
if found_skin_clusters and cmds.objectType(node) == "joint":
remove_tag(node) if remove else apply_tag(
node, attribute_value=TagTypes.SkeletalMesh
)
else:
shapes = cmds.listRelatives(node, fullPath=True, shapes=True)
if not shapes:
continue
first_shape = utils.get_first_in_list(shapes)
if not first_shape:
continue
object_type = cmds.objectType(first_shape)
if object_type == "mesh":
found_skin_clusters = utils.get_skin_clusters_for_node(
first_shape
)
if found_skin_clusters:
remove_tag(node) if remove else apply_tag(
node, attribute_value=TagTypes.Skeleton
)
else:
remove_tag(node) if remove else apply_tag(
node, attribute_value=TagTypes.StaticMesh
)
elif object_type == "camera":
remove_tag(node) if remove else apply_tag(
node, attribute_value=TagTypes.Camera
)
def apply_tag(
node=None, attribute_name=TAG_ASSET_TYPE_ATTR_NAME, attribute_value=""
):
"""
Creates a new tag attribute with given value into given node/s (or selected nodes).
:param str or list(str) or None node: nodes to apply tag to.
:param str attribute_name: tag attribute value to use. By default, TAG_ASSET_TYPE_ATTR_NAME will be used.
:param str attribute_value: value to set tag to.
"""
nodes = utils.force_list(node or cmds.ls(sl=True))
attribute_value = str(attribute_value)
for node in nodes:
if not cmds.attributeQuery(attribute_name, node=node, exists=True):
cmds.addAttr(node, longName=attribute_name, dataType="string")
cmds.setAttr(
"{}.{}".format(node, attribute_name),
attribute_value,
type="string",
)
if attribute_value:
logger.info(
'Tagged "{}.{}" as {}.'.format(
node, attribute_name, attribute_value
)
)
else:
logger.info(
'Tagged "{}.{}" as empty.'.format(node, attribute_name)
)
def remove_tag(node=None, attribute_name=TAG_ASSET_TYPE_ATTR_NAME):
"""
Removes tag attribute from the given node.
:param str or list(str) or None node: nodes to remove tag from.
:param str attribute_name: tag attribute value to remove. By default, TAG_ASSET_TYPE_ATTR_NAME will be used.
"""
nodes = utils.force_list(node or cmds.ls(sl=True))
for node in nodes:
if not cmds.attributeQuery(attribute_name, node=node, exists=True):
continue
cmds.deleteAttr("{}.{}".format(node, attribute_name))
logger.info(
'Removed attribute {} from "{}"'.format(attribute_name, node)
)
def remove_all_tags(node=None):
"""
Removes all ueGear tags from the given node.
:param str or list(str) or None node: nodes to remove tags from.
"""
nodes = utils.force_list(node or cmds.ls(sl=True))
for attribute_name in ALL_TAGS_ATTR_NAMES:
remove_tag(nodes, attribute_name=attribute_name)
def apply_alembic_tag(node=None, remove=False):
"""
Applies alembic tag to given node/s (or selected nodes).
:param str or list(str) or None node: node/s to tag.
:param bool remove: if True tag will be removed.
"""
remove_tag(node=node) if remove else apply_tag(
node=node, attribute_value=TagTypes.Alembic
)
def find_tagged_nodes(
tag_name=TAG_ASSET_TYPE_ATTR_NAME, nodes=None, tag_value=None
):
"""
Returns a list with all nodes that are tagged with the given tag name and has a value set.
:param str tag_name: name of the tag to search. By default, TAG_ATTR_NAME will be used.
:param str or list(str) or None nodes: list of nodes to find tags of, if not given all nodes in the scene will be
checked.
:param str tag_value: if given only tag with given value will be returned.
:return: list of found tagged nodes.
:rtype: list(str)
"""
found_tagged_nodes = list()
nodes = utils.force_list(nodes or cmds.ls())
for node in nodes:
if not cmds.attributeQuery(tag_name, node=node, exists=True):
continue
found_tag_value = cmds.getAttr("{}.{}".format(node, tag_name))
if not found_tag_value or (
tag_value is not None and found_tag_value != tag_value
):
continue
found_tagged_nodes.append(node)
return found_tagged_nodes
def find_tagged_selected_nodes(tag_name):
"""
Returns a list with all selected nodes that are tagged with the given tag name and has a value set.
:param str tag_name: name of the tag to search. By default, TAG_ATTR_NAME will be used.
:return: list of found tagged nodes.
:rtype: list(str)
"""
return find_tagged_nodes(nodes=cmds.ls(sl=True))
def find_tagged_node_attributes(tag_name=TAG_ASSET_TYPE_ATTR_NAME, nodes=None):
"""
Returns a list with all node attributes that are tagged with the given tag name and has a value set.
:param str tag_name: name of the tag to search. By default, TAG_ATTR_NAME will be used.
:param str or list(str) or None nodes: list of nodes to find tags of, if not given all nodes in the scene will be
checked.
:return: list of found tagged nodes.
:rtype: list(str)
"""
found_tagged_node_attributes = list()
nodes = utils.force_list(nodes or cmds.ls(long=True))
for node in nodes:
if not cmds.attributeQuery(tag_name, node=node, exists=True):
continue
if not cmds.getAttr("{}.{}".format(node, tag_name)):
continue
found_tagged_node_attributes.append("{}.{}".format(node, tag_name))
return found_tagged_node_attributes
def find_tagged_selected_node_attributes(tag_name):
"""
Returns a list with all selected node attributes that are tagged with the given tag name and has a value set.
:param str tag_name: name of the tag to search. By default, TAG_ATTR_NAME will be used.
:return: list of found tagged nodes.
:rtype: list(str)
"""
return find_tagged_node_attributes(nodes=cmds.ls(sl=True))
def tag_values(tag_name=TAG_ASSET_TYPE_ATTR_NAME, nodes=None):
"""
Returns a list with all node attribute values that are tagged with the given tag name.
:param str tag_name:name of the tag to search value of.
:param str or list(str) nodes: list of nodes to find tags of, if not given all nodes in the scene will be checked.
:return: list of tagged node values.
:rtype: list(object)
"""
found_tag_values = list()
nodes = utils.force_list(nodes or cmds.ls(long=True))
for node in nodes:
if not cmds.attributeQuery(tag_name, node=node, exists=True):
found_tag_values.append(None)
continue
found_tag_values.append(cmds.getAttr("{}.{}".format(node, tag_name)))
return found_tag_values
def tag_match(dag_path, tag_value, tag):
"""
Validates if the object specified by its dag path, has the same tag and value
assigned to it.
:param OpenMaya.DagPath dag_path: The object you want to validate has the
following tag and data assigned.
:param str tag_value: value assigned to the tag.
:param str tag: tag to correlate with.
:return: True if the object has matching tag and the values are the same.
:rtype: bool
"""
dag_node = OpenMaya.MFnDagNode(dag_path)
attr = dag_node.attribute(tag)
plug = dag_node.findPlug(attr, False)
plug_value = plug.asString()
return plug_value == tag_value
|
mgear-dev/mgear4
|
release/scripts/mgear/uegear/tag.py
|
tag.py
|
py
| 9,387 |
python
|
en
|
code
| 209 |
github-code
|
6
|
8267999016
|
from __future__ import annotations
from unittest import mock
from kombu.utils.objects import cached_property
class test_cached_property:
def test_deleting(self):
class X:
xx = False
@cached_property
def foo(self):
return 42
@foo.deleter
def foo(self, value):
self.xx = value
x = X()
del x.foo
assert not x.xx
x.__dict__['foo'] = 'here'
del x.foo
assert x.xx == 'here'
def test_when_access_from_class(self):
class X:
xx = None
@cached_property
def foo(self):
return 42
@foo.setter
def foo(self, value):
self.xx = 10
desc = X.__dict__['foo']
assert X.foo is desc
assert desc.__get__(None) is desc
assert desc.__set__(None, 1) is desc
assert desc.__delete__(None) is desc
assert desc.setter(1)
x = X()
x.foo = 30
assert x.xx == 10
del x.foo
def test_locks_on_access(self):
class X:
@cached_property
def foo(self):
return 42
x = X()
# Getting the value acquires the lock, and may do so recursively
# on Python < 3.12 because the superclass acquires it.
with mock.patch.object(X.foo, 'lock') as mock_lock:
assert x.foo == 42
mock_lock.__enter__.assert_called()
mock_lock.__exit__.assert_called()
# Setting a value also acquires the lock.
with mock.patch.object(X.foo, 'lock') as mock_lock:
x.foo = 314
assert x.foo == 314
mock_lock.__enter__.assert_called_once()
mock_lock.__exit__.assert_called_once()
# .. as does clearing the cached value to recompute it.
with mock.patch.object(X.foo, 'lock') as mock_lock:
del x.foo
assert x.foo == 42
mock_lock.__enter__.assert_called_once()
mock_lock.__exit__.assert_called_once()
|
celery/kombu
|
t/unit/utils/test_objects.py
|
test_objects.py
|
py
| 2,091 |
python
|
en
|
code
| 2,643 |
github-code
|
6
|
1584185600
|
import cv2
def draw_boxes(im, boxes, class_names=None, scores=None, colors=None):
scores = [None] * len(boxes) if scores is None else scores
colors = [None] * len(boxes) if colors is None else colors
class_names = [None] * len(boxes) if class_names is None else class_names
for params in zip(boxes, class_names, scores, colors):
_draw_box(im, *params)
return im
def _draw_box(im, box, class_name=None, score=None, color=None):
x1, y1, x2, y2 = box
color = color if color is not None else (0, 255, 0)
msg = class_name.capitalize() if class_name else None
if msg is not None and score is not None:
msg += f' [{int(score * 100)}]'
cv2.rectangle(im, (x1, y1), (x2, y2), color=color, thickness=2)
if msg is not None:
cv2.rectangle(im, (x1 - 1, y1 - 20), (x2 + 1, y1), color, -1)
cv2.putText(im, msg, (x1 + 10, y1 - 8), cv2.FONT_HERSHEY_SIMPLEX ,
.5, (0, 0, 0), 2, cv2.LINE_AA)
return im
|
Guillem96/ssd-pytorch
|
ssd/viz.py
|
viz.py
|
py
| 991 |
python
|
en
|
code
| 0 |
github-code
|
6
|
46058474656
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from django.db import IntegrityError, transaction
from .managers import TopicNotificationQuerySet
from spirit.core.conf import settings
class TopicNotification(models.Model):
UNDEFINED, MENTION, COMMENT = range(3)
ACTION_CHOICES = (
(UNDEFINED, _("Undefined")),
(MENTION, _("Mention")),
(COMMENT, _("Comment")))
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='st_topic_notifications',
on_delete=models.CASCADE)
topic = models.ForeignKey(
'spirit_topic.Topic',
on_delete=models.CASCADE)
comment = models.ForeignKey(
'spirit_comment.Comment',
on_delete=models.CASCADE)
date = models.DateTimeField(default=timezone.now)
action = models.IntegerField(choices=ACTION_CHOICES, default=UNDEFINED)
is_read = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
objects = TopicNotificationQuerySet.as_manager()
class Meta:
unique_together = ('user', 'topic')
ordering = ['-date', '-pk']
verbose_name = _("topic notification")
verbose_name_plural = _("topics notification")
def get_absolute_url(self):
if self.topic_id != self.comment.topic_id:
# Out of sync
return self.topic.get_absolute_url()
return self.comment.get_absolute_url()
@property
def text_action(self):
return self.ACTION_CHOICES[self.action][1]
@property
def is_mention(self):
return self.action == self.MENTION
@property
def is_comment(self):
return self.action == self.COMMENT
@classmethod
def mark_as_read(cls, user, topic):
if not user.is_authenticated:
return
(cls.objects
.filter(user=user, topic=topic)
.update(is_read=True))
@classmethod
def create_maybe(cls, user, comment, is_read=True, action=COMMENT):
# Create a dummy notification
return cls.objects.get_or_create(
user=user,
topic=comment.topic,
defaults={
'comment': comment,
'action': action,
'is_read': is_read,
'is_active': True})
@classmethod
def notify_new_comment(cls, comment):
(cls.objects
.filter(topic=comment.topic, is_active=True, is_read=True)
.exclude(user=comment.user)
.update(
comment=comment,
is_read=False,
action=cls.COMMENT,
date=timezone.now()))
@classmethod
def notify_new_mentions(cls, comment, mentions):
if not mentions:
return
# TODO: refactor
for user in mentions.values():
try:
with transaction.atomic():
cls.objects.create(
user=user,
topic=comment.topic,
comment=comment,
action=cls.MENTION,
is_active=True)
except IntegrityError:
pass
(cls.objects
.filter(
user__in=tuple(mentions.values()),
topic=comment.topic,
is_read=True)
.update(
comment=comment,
is_read=False,
action=cls.MENTION,
date=timezone.now()))
@classmethod
def bulk_create(cls, users, comment):
return cls.objects.bulk_create([
cls(user=user,
topic=comment.topic,
comment=comment,
action=cls.COMMENT,
is_active=True)
for user in users])
# XXX add tests
# XXX fix with migration (see issue #237)
@classmethod
def sync(cls, comment, topic):
# Notifications can go out of sync
# when the comment is no longer
# within the topic (i.e moved).
# User is subscribed to the topic,
# not the comment, so we either update
# it to a newer comment or set it as undefined
if comment.topic_id == topic.pk:
return
next_comment = (
topic.comment_set
.filter(date__gt=comment.date)
.order_by('date')
.first())
if next_comment is None:
(cls.objects
.filter(comment=comment, topic=topic)
.update(is_read=True, action=cls.UNDEFINED))
return
(cls.objects
.filter(comment=comment, topic=topic)
.update(comment=next_comment, action=cls.COMMENT))
|
nitely/Spirit
|
spirit/topic/notification/models.py
|
models.py
|
py
| 4,758 |
python
|
en
|
code
| 1,153 |
github-code
|
6
|
5558606800
|
import os
from dotenv import load_dotenv
from configparser import ConfigParser
conf = ConfigParser()
conf.read('model.conf')
load_dotenv('.env')
def _getenv(key, default): return type(default)(os.getenv(key)) if os.getenv(key) else default
SERVER_IP = _getenv('SERVER_IP', '0.0.0.0') # Service IP
SERVER_PORT = _getenv('SERVER_PORT', '6002') # Service IP
REGISTER = _getenv('REGISTER', 0) # register to the management service
MANAGER_IP = _getenv('MANAGER_IP', '127.0.0.1') # Management server address
MANAGER_PORT = _getenv('MANAGER_PORT', 5005) # Management server address
MANAGER_INTERFACE_REGISTER = _getenv('MANAGER_INTERFACE_REGISTER', '/model/register')
MANAGER_INTERFACE_CANCEL = _getenv('MANAGER_INTERFACE_CANCEL', '/model/cancel')
MODEL_TYPE = _getenv('MODEL_TYPE', conf.get('model', 'model_type', fallback='')) # Service type
MODEL_VERSION = _getenv('MODEL_VERSION', 1) # Service version number
ENGINE_FILE_PATH = _getenv('ENGINE_FILE_PATH', conf.get('model', 'engine_file_path', fallback=''))
CLASS_NUM = _getenv('CLASS_NUM', int(conf.get('model', 'class_num', fallback='0')))
CLASS_NAMES = [name.strip() for name in _getenv('CLASS_NAMES', conf.get('model', 'class_names')).split(',')]
KEY = _getenv('KEY', 'LONGYUAN')
|
rahmanmahbub073/PythonBased_FastAPI_mL_dL_Repo
|
UnwantedImageDetection_server/config.py
|
config.py
|
py
| 1,241 |
python
|
en
|
code
| 1 |
github-code
|
6
|
23338785771
|
import tensorflow as tf
import pandas as pd
from sklearn.metrics import multilabel_confusion_matrix, confusion_matrix, precision_score, recall_score, f1_score
def calculate_output(model, actual_classes, session, feed_dict):
actuals = tf.argmax(actual_classes, 1)
predictions = tf.argmax(model, 1)
actuals = session.run(actuals, feed_dict)
predictions = session.run(predictions, feed_dict)
return actuals, predictions
def tf_confusion_metrics(model, actual_classes, session, feed_dict):
import numpy as np
cat = 5
actuals, predictions = calculate_output(model, actual_classes, session, feed_dict)
lbls = [*range(cat)]
mcm = multilabel_confusion_matrix(actuals, predictions, labels=lbls)
tp = mcm[:, 1, 1]
tn = mcm[:, 0, 0]
fn = mcm[:, 1, 0]
fp = mcm[:, 0, 1]
cm = confusion_matrix(actuals, predictions, labels=lbls, sample_weight=None)
tp = np.mean(tp)
tn = np.mean(tn)
fp = np.mean(fp)
fn = np.mean(fn)
try:
tpr = float(tp)/(float(tp) + float(fn))
accuracy = (float(tp) + float(tn))/(float(tp) + float(fp) + float(fn) + float(tn))
recall = tpr
if((fp+tp)!=0):
precision = float(tp)/(float(tp) + float(fp))
f1_score = (2 * (precision * recall)) / (precision + recall)
else:
precision=0
f1_score=0
fp_rate=float(fp)/(float(fp)+float(tn))
fn_rate=float(fn)/(float(fn)+float(tp))
# return precision, recall, f1_score, accuracy, fp_rate, fn_rate
PR = str(round(precision * 100, 2))
RC = str(round(recall * 100, 2))
F1 = str(round(f1_score * 100, 2))
ACC = str(round(accuracy * 100, 2))
FPR = str(round(fp_rate * 100, 2))
FNR = str(round(fn_rate * 100, 2))
data_pd=[['PR',PR],['RC', RC],['F1', F1],['ACC', ACC],['FPR', FPR], ['FNR', FNR],['tp', tp],['tn', tn],['fp', fp], ['fn', fn]]
df = pd.DataFrame(data_pd, columns=['Measure', 'Percentage'])
except Exception as e:
print(e)
data_pd = [['PR', 'Err'], ['RC', 'Err'], ['F1', 'Err'], ['ACC', 'Err'], ['FPR', 'Err'], ['FNR', 'Err']]
df = pd.DataFrame(data_pd, columns=['Measure', 'Percentage'])
return df
def tf_confusion_metrics_2(model, actual_classes, session, feed_dict):
actuals, predictions = calculate_output(model, actual_classes, session, feed_dict)
cm = tf.confusion_matrix(actuals, predictions)
print("Confusion Matrix")
return session.run(cm, feed_dict)
def Macro_calculate_measures_tf(y_true, y_pred, session, feed_dict):
y_true, y_pred = calculate_output(y_pred, y_true, session, feed_dict)
pr= precision_score(y_true, y_pred, average='macro')
rc = recall_score(y_true, y_pred, average='macro')
f1 = f1_score(y_true, y_pred, average='macro')
print("pr, rc, f1:" ,str(pr)+ str(rc)+str(f1))
return pr, rc, f1
|
Sam-Mah/PLDNN
|
tensorflow_confusion_metrics.py
|
tensorflow_confusion_metrics.py
|
py
| 2,817 |
python
|
en
|
code
| 3 |
github-code
|
6
|
38514794793
|
import gc
import os
from pathlib import Path
from typing import Any, Dict, cast
import mlflow
import numpy as np
import onnx
import torch
import transformers
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint
from transformers.modeling_utils import PreTrainedModel
from transformers.onnx import FeaturesManager, export, validate_model_outputs
from crypto_sentiment_demo_app.models.train.base import IModelTrain, TrainRegistry
from .dataset import build_dataloaders, split_train_val
from .pipeline import MetricTracker, SentimentPipeline
transformers.logging.set_verbosity_error()
os.environ["TOKENIZERS_PARALLELISM"] = "false"
@TrainRegistry.register("bert")
class Bert(IModelTrain):
"""Bert model. Wrapper for hugging face models.
:param cfg: model config
"""
def __init__(self, cfg: Dict[str, Any]):
"""Init model."""
super().__init__(cfg)
self.model_cfg = self.cfg["model"]
self.class_names = self.cfg["data"]["class_names"]
if self.model_cfg["device"] == "gpu" and not torch.cuda.is_available():
self.device = torch.device("cpu")
else:
self.device = torch.device(self.model_cfg["device"])
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
"""Fit model.
:param X: train data
:param y: train labels
"""
seed_everything(self.model_cfg["seed"])
train_data, val_data, train_labels, val_labels = split_train_val(X, y)
train_dataloader, val_dataloader = build_dataloaders(
self.model_cfg, train_data, train_labels, val_data, val_labels
)
self.model = SentimentPipeline(self.model_cfg)
metric_tracker = MetricTracker()
checkpoint_path = Path(self.model_cfg["checkpoint_path"]).parent
checkpoint_filename = Path(self.model_cfg["checkpoint_path"]).stem
checkpoint_callback = ModelCheckpoint(
save_top_k=1,
monitor="val_acc",
mode="max",
dirpath=checkpoint_path,
filename=checkpoint_filename,
)
gpus = 1 if self.device.type == "cuda" and torch.cuda.is_available() else 0
self.trainer = Trainer(
max_epochs=self.model_cfg["epochs"],
gpus=gpus,
callbacks=[metric_tracker, checkpoint_callback],
num_sanity_val_steps=0,
enable_checkpointing=True,
logger=False,
)
self.trainer.fit(
self.model,
train_dataloaders=train_dataloader,
val_dataloaders=val_dataloader,
)
def save(self) -> None:
"""Save model."""
save_dir = Path(self.model_cfg["path_to_model"]).parent
filename = Path(self.model_cfg["path_to_model"]).stem
pt_path = save_dir / f"{filename}.pt"
onnx_path = save_dir / f"{filename}.onnx"
self._onnx_export(onnx_path)
onnx_model = onnx.load_model(onnx_path)
mlflow.onnx.log_model(onnx_model=onnx_model, artifact_path="bert", registered_model_name="bert")
del onnx_model
gc.collect()
self.model = SentimentPipeline.load_from_checkpoint(self.model_cfg["checkpoint_path"], cfg=self.model_cfg)
cast(PreTrainedModel, self.model.model).eval()
cast(PreTrainedModel, self.model.tokenizer).save_pretrained(pt_path)
cast(PreTrainedModel, self.model.model).save_pretrained(pt_path)
def load(self) -> None:
"""Load model checkpoint."""
self.model = SentimentPipeline.load_from_checkpoint(self.model_cfg["checkpoint_path"], cfg=self.model_cfg)
def _onnx_export(self, path: Path):
model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(
self.model.model, feature="sequence-classification"
)
onnx_config = model_onnx_config(self.model.model.config)
onnx_inputs, onnx_outputs = export(
self.model.tokenizer, self.model.model, onnx_config, onnx_config.default_onnx_opset, path
)
validate_model_outputs(
onnx_config, self.model.tokenizer, self.model.model, path, onnx_outputs, onnx_config.atol_for_validation
)
def enable_mlflow_logging(self) -> None:
mlflow.set_experiment("bert")
mlflow.pytorch.autolog()
|
crypto-sentiment/crypto_sentiment_demo_app
|
crypto_sentiment_demo_app/models/train/bert/model.py
|
model.py
|
py
| 4,376 |
python
|
en
|
code
| 25 |
github-code
|
6
|
28178733296
|
#!/usr/bin/env python3
user_input = str(input("Please enter a phrase (only characters A-Z): "))
phrase = user_input.split()
result = " "
for i in phrase:
result += str(i[0]).upper()
print (result)
|
R4qun3/Beginner-projects
|
Acronym.py
|
Acronym.py
|
py
| 211 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26185607454
|
"""Rotate Image
You are given an n x n 2D matrix representing an image, rotate the image by 90 degrees (clockwise).
You have to rotate the image in-place, which means you have to modify the input 2D matrix directly. DO NOT allocate another 2D matrix and do the rotation.
Input: matrix = [[1,2,3],[4,5,6],[7,8,9]]
Output: [[7,4,1],[8,5,2],[9,6,3]]
Input: matrix = [[5,1,9,11],[2,4,8,10],[13,3,6,7],[15,14,12,16]]
Output: [[15,13,2,5],[14,3,4,1],[12,6,8,9],[16,7,10,11]]
"""
from typing import List
import unittest
def rotate(matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
n = len(matrix)
for row in range((n+1)//2):
for col in range(n//2):
temp = matrix[col][n-1-row]
matrix[col][n-1-row] = matrix[row][col]
matrix[row][col] = matrix[n-1-col][row]
matrix[n-1-col][row] = matrix[n-1-row][n-1-col]
matrix[n-1-row][n-1-col] = temp
class TestProblems(unittest.TestCase):
def test_rotate_image(self):
actual = rotate([[1,2,3],[4,5,6],[7,8,9]])
expected = [[7,4,1],[8,5,2],[9,6,3]]
self.assertCountEqual(actual, expected)
actual_1 = rotate([[5,1,9,11],[2,4,8,10],[13,3,6,7],[15,14,12,16]])
expected_1 = [[15,13,2,5],[14,3,4,1],[12,6,8,9],[16,7,10,11]]
self.assertCountEqual(actual_1, expected_1)
if __name__ == '__main__':
unittest.main()
|
01o91939/leetcode
|
rotateImage.py
|
rotateImage.py
|
py
| 1,441 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27716822705
|
import frappe
from frappe.model.document import Document
class Sales(Document):
def before_save(self):
total_amount = 0
for item in self.item:
item.amount = item.product_price * item.quantity
total_amount += item.amount
product = frappe.get_doc('Product', item.product_name)
# Decrease the product quantity from Product DocType
product.quantity -= item.quantity
product.save()
self.total_due = total_amount - self.receive_amount
self.total_amount = total_amount - self.discount
customer = frappe.get_doc('Customer', self.customer)
# Get the previous values and save it to Customer DocType
previous_receive = customer.total_receive
previous_dues = customer.total_due
previous_amount = customer.total_amount
if customer.total_receive == 0 or customer.total_due == 0:
customer.total_receive = self.receive_amount
customer.total_due = self.total_due
customer.toal_amount = self.total_amount
else:
customer.total_receive = previous_receive + self.receive_amount
customer.total_due = previous_dues + self.total_due
customer.total_amount = previous_amount + self.total_amount
customer.save()
|
mehedi432/pos
|
pos/pos/doctype/sales/sales.py
|
sales.py
|
py
| 1,184 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5361671905
|
import bottle
import json
import random
from . import DatabaseManager
from .product import Product
import recommender.vector.arithmetic
import recommender.rocchio.algorithm
@bottle.route('/product/get/<doc_id:int>')
def product_get(doc_id):
d = product_manager.get_product(doc_id).as_dictionary()
result = {'result': d}
return result
@bottle.route('/product/remove/<doc_id:int>', method='DELETE')
def product_remove(doc_id):
try:
product_manager.remove_document(doc_id)
except:
return {'result': False}
return {'result': True}
@bottle.route('/product/all')
def product_get_all():
l = [ p.as_dictionary() for p in product_manager.get_all_products()]
result = {'result': l}
#bottle.response.content_type = 'application/json'
return result
@bottle.route('/product/random/<count:int>')
def product_random(count):
products = product_manager.get_all_products()
rands = []
while len(rands) < count:
index = random_generator.randint(0, len(products)-1)
rands.append(products[index].as_dictionary())
products.remove(products[index])
pass
result = {'result': rands};
return result
@bottle.route('/product/insert', method='POST')
def product_insert():
"""
curl -X POST -d "product={'image_name':'img.jpg','terms':{'a':1,'b':3}}"
"""
try:
product_json = bottle.request.forms.get('product')
product_dict = json.loads(product_json)
p = Product()
p.image_name = product_dict['image_name']
p.terms = product_dict['terms']
product_manager.add_document(p)
except:
return {'result': False}
return {'result': True}
@bottle.route('/vector/default/<doc_id:int>')
def vector_default(doc_id):
d = (product_vector_manager
.get_vector_for_document_id(doc_id)
.as_dictionary()
)
result = {'result': d}
return result
@bottle.route('/vector/df')
def vector_df():
d = (
product_vector_manager
.get_document_frequency_vector()
.as_dictionary()
)
result = {'result': d}
return result
@bottle.route('/vector/idf')
def vector_idf():
d = (
product_vector_manager
.get_inverse_document_frequency_vector()
.as_dictionary()
)
result = {'result': d}
return result
@bottle.route('/vector/tf/<doc_id:int>')
def vector_tf(doc_id):
d = (
product_vector_manager
.get_term_frequency_vector(doc_id)
.as_dictionary()
)
result = {'result': d}
return result
@bottle.route('/vector/tfidf/<doc_id:int>')
def vector_tfidf(doc_id):
d = (
product_vector_manager
.get_tfidf_vector(doc_id)
.as_dictionary()
)
result = {'result': d}
return result
@bottle.route('/vector/user/<user_id:int>')
def vector_user_by_id(user_id):
d = (
user_vector_manager
.get_user_vector_for_id(user_id)
.as_dictionary()
)
result = {'result': d}
return result
@bottle.route('/vector/user/<user_name>')
def vector_user_by_name(user_name):
d = (
user_vector_manager
.get_user_vector_for_name(user_name)
.as_dictionary()
)
result = {'result': d}
return result
@bottle.route('/user/all')
def get_all_users():
user_list = user_vector_manager.get_all_users_by_name()
result = {'result': user_list}
return result
@bottle.route('/user/create/<user_name>')
def create_user_by_name(user_name):
user_vector_manager.create_user(user_name)
return {'result': True}
@bottle.route('/user/exists/<user_name>')
def exists_user_by_name(user_name):
d = {}
d['exists'] = user_vector_manager.has_user_with_name(user_name)
result = {'result': d}
return result
@bottle.route('/user/remove/<user_name>', method='DELETE')
def remove_user_by_name(user_name):
try:
user_id = user_vector_manager.get_user_id_for_name(user_name)
user_vector_manager.remove_user(user_id)
except:
return {'result': False}
return {'result': True}
@bottle.route('/user/createifnotexist/<user_name>')
def create_user_if_not_exists(user_name):
if not user_vector_manager.has_user_with_name(user_name):
create_user_by_name(user_name)
return {'result': True}
@bottle.route('/user/setpreference/<user_name>/<product_id:int>')
def add_preference_to_user(user_name, product_id):
user_id = user_vector_manager.get_user_id_for_name(user_name)
user_vector_manager.set_user_preference(user_id, product_id, True)
return {'result': True}
@bottle.route('/user/setnopreference/<user_name>/<product_id:int>')
def add_preference_to_user(user_name, product_id):
user_id = user_vector_manager.get_user_id_for_name(user_name)
user_vector_manager.set_user_preference(user_id, product_id, False)
return {'result': True}
@bottle.route('/user/update/<user_name>')
def get_user_update(user_name):
user_id = user_vector_manager.get_user_id_for_name(user_name)
weights = recommender.rocchio.default_weights()
update_user(user_id, weights)
return {'result': True}
@bottle.route('/user/update/<user_name>/<alpha:int>/<beta:int>/<gamma:int>')
def get_user_update(user_name, alpha, beta, gamma):
user_id = user_vector_manager.get_user_id_for_name(user_name)
if alpha < 0:
alpha = 0
elif alpha > 100:
alpha = 100;
if beta < 0:
beta = 0
elif beta > 100:
beta = 100
if gamma < 0:
gamma = 0
elif gamma > 100:
gamma = 100
weights = alpha / 100, beta / 100, gamma / 100
update_user(user_id, weights)
return {'result': True}
@bottle.route('/user/relevant/<user_name>')
def get_user_preference(user_name):
user_id = user_vector_manager.get_user_id_for_name(user_name)
relevant_vectors = user_vector_manager.get_relevant_document_vector_list(user_id)
relevant_products = [
product_manager.get_product(v.document_id).as_dictionary()
for v in relevant_vectors
]
result = {'result': relevant_products}
return result
@bottle.route('/user/nonrelevant/<user_name>')
def get_user_no_preference(user_name):
user_id = user_vector_manager.get_user_id_for_name(user_name)
non_relevant_vectors = user_vector_manager.get_non_relevant_document_vector_list(user_id)
non_relevant_products = [
product_manager.get_product(v.document_id).as_dictionary()
for v in non_relevant_vectors
]
result = {'result': non_relevant_products}
return result
@bottle.route('/recommendations/<user_name>/<k:int>')
def get_recommendation(user_name, k):
vector = user_vector_manager.get_user_vector_for_name(user_name)
others = product_vector_manager.get_all_vectors()
#distance_function = recommender.vector.arithmetic.hamming_distance
#distance_function = recommender.vector.arithmetic.euclidean_distance
recommendations = vector_arithmetic.k_nearest_neighbours(k, vector, others)
products = [
product_manager.get_product(vector.document_id).as_dictionary()
for _, vector in recommendations
]
result = {'result': products}
return result
database_manager = None
product_manager = None
product_vector_manager = None
document_manager = None
user_vector_manager = None
term_manager = None
random_generator = None
vector_arithmetic = recommender.vector.arithmetic
def run(database_path, host, port):
_init(database_path)
bottle.run(host=host, port=port, debug=True)
def _init(database_path):
global database_manager
global product_manager
global product_vector_manager
global document_manager
global user_vector_manager
global term_manager
global random_generator
database_manager = DatabaseManager(database_path)
product_manager = database_manager.get_product_manager()
product_vector_manager = database_manager.get_product_vector_manager()
document_manager = database_manager.get_document_manager()
user_vector_manager = database_manager.get_user_vector_manager()
term_manager = database_manager.get_term_manager()
random_generator = random.Random()
def update_user(user_id, weights):
user_vector = user_vector_manager.get_user_vector_for_id(user_id)
relevant = user_vector_manager.get_relevant_document_vector_list(user_id)
non_relevant = user_vector_manager.get_non_relevant_document_vector_list(user_id)
uvector = recommender.rocchio.algorithm.calculate(user_vector, relevant, non_relevant, weights)
user_vector_manager.update_user_vector(user_id, uvector);
pass
|
dustywind/bachelor-thesis
|
impl/recommender/webapi.py
|
webapi.py
|
py
| 8,641 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22558981666
|
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
#Dane początkowe
k1 = 1
m = 1
h = 0.05
x0 = 10
vx0 = 0
w1 = math.sqrt(k1/m)
A1 = math.sqrt((vx0*vx0)/(w1*w1) + (x0*x0))
iloscPunktow = 1000
#oś XY
setXl = 0
setXr = 55
setYl = 49.95
setYr = 50.04
if(vx0 <= 0):
fi1 = math.acos(x0/A1) * 180/math.pi
else:
fi1 = -math.acos(x0/A1) * 180/math.pi
#Wypisanie danych poczatkowych
print("\nk1 = {0}\nm = {1}\nh = {2}\nx0 = {3}\nvx0 = {4}\nw1 = {5}\nA1 = {6}\nfi1 = {7}"
.format(k1, m, h, x0, vx0, w1, A1, fi1))
print("\nIlosć punktów = {0}".format(iloscPunktow))
#Czas
time = []
for i in range(0, iloscPunktow+1):
time.append(round((i*h), 2))
#print(time[i])
#rozwiazanie dokladne listy oraz wartosci poczatkowe
dokladneX = []
dokladneVX = []
dokladneX.append(x0)
dokladneVX.append(vx0)
dokladneE = []
#metoda eulera listy oraz wartosci poczatkowe
eulerX = []
eulerVX = []
eulerX.append(x0)
eulerVX.append(vx0)
eulerE = []
#metoda punktu posredniego listy oraz wartosci poczatkowe
posredniaX = []
posredniaVX = []
posredniaX.append(x0)
posredniaVX.append(vx0)
posredniaE = []
#metoda verleta listy oraz wartosci poczatkowe
verletX = []
verletVX = []
verletX.append(x0)
verletVX.append(vx0)
verletE = []
#metoda beemana listy oraz wartosci poczatkowe
beemanX = []
beemanVX = []
beemanX.append(x0)
beemanVX.append(vx0)
beemanE = []
#uzupelnianie list
for i in range(1, iloscPunktow+1):
#dokladna
dokX = A1 * math.cos(w1 * time[i] + fi1 * math.pi / 180)
dokVX = -A1 * w1 * math.sin(w1 * time[i] + fi1 * math.pi/180)
dokladneX.append(dokX)
dokladneVX.append(dokVX)
#euler
eulX = eulerX[i - 1] + eulerVX[i - 1] * h
eulVX = eulerVX[i - 1] - (w1 * w1) * eulerX[i - 1] * h
eulerX.append(eulX)
eulerVX.append(eulVX)
#posrednia
posX = posredniaX[i-1] + posredniaVX[i-1] * h - 0.5 * (w1 * w1) * posredniaX[i-1] * (h * h)
posVX = posredniaVX[i-1] - (w1 * w1) * posredniaX[i-1] * h
posredniaX.append(posX)
posredniaVX.append(posVX)
#verlet
verX = verletX[i - 1] + verletVX[i - 1] * h - 0.5 * (w1 * w1) * verletX[i - 1] * (h * h)
verletX.append(verX)
verVX = verletVX[i - 1] - 0.5 * (w1 * w1) * (verletX[i - 1] + verletX[i]) * h
verletVX.append(verVX)
#beeman
# z verleta liczone
if(i == 1):
beemanX.append(verletX[1])
beemanVX.append(verletVX[1])
else:
bemX = beemanX[i - 1] + beemanVX[i - 1] * h + (w1 * w1) * (beemanX[i - 2] - 4 * beemanX[i - 1]) * (h * h)/6
beemanX.append(bemX)
bemVX = beemanVX[i - 1] + (w1 * w1) * (beemanX[i - 2] - 5 * beemanX[i - 1] - 2 * beemanX[i]) * h/6
beemanVX.append(bemVX)
#energia
for i in range(0, iloscPunktow+1):
dokE = 0.5 * k1 * (A1*A1)
dokladneE.append(dokE)
eulE = m * (eulerVX[i] * eulerVX[i])/2 + k1 * (eulerX[i] * eulerX[i]/2)
eulerE.append(eulE)
posE = m * (posredniaVX[i] * posredniaVX[i])/2 + k1 * (posredniaX[i] * posredniaX[i]/2)
posredniaE.append(posE)
verE = m * (verletVX[i] * verletVX[i])/2 + k1 * (verletX[i] * verletX[i]/2)
verletE.append(verE)
bemE = m * (beemanVX[i] * beemanVX[i])/2 + k1 * (beemanX[i] * beemanX[i]/2)
beemanE.append(bemE)
#Animacja
xdata = []
ydata = []
xdata2 = []
ydata2 = []
xdata3 = []
ydata3 = []
font1 = {'family': 'serif', 'color': 'blue', 'size': 20}
font2 = {'family': 'serif', 'color': 'darkred', 'size': 15}
fig, ax = plt.subplots()
ax.set_xlim(setXl, setXr)
ax.set_ylim(setYl, setYr)
plt.title("Energia całkowita oscylatora", fontdict=font1)
plt.xlabel("t", fontdict = font2)
plt.ylabel("E", fontdict = font2)
line, = ax.plot(0, 0, '.') #niebieski
line2, = ax.plot(0, 0, 'r.') #czerwony
line3, = ax.plot(0, 0, 'g.') #zielony
line.set_label('rozwiązanie Dokładne')
line2.set_label('metoda Verleta')
line3.set_label('metoda Beemana')
ax.legend()
def animation_frame(i):
xdata.append(time[i])
ydata.append(dokladneE[i])
xdata2.append(time[i])
ydata2.append(verletE[i])
xdata3.append(time[i])
ydata3.append(beemanE[i])
line.set_xdata(xdata)
line.set_ydata(ydata)
line2.set_xdata(xdata2)
line2.set_ydata(ydata2)
line3.set_xdata(xdata3)
line3.set_ydata(ydata3)
return line, line2, line3,
animation = FuncAnimation(fig, func = animation_frame, frames = np.arange(0, iloscPunktow + 1, 1), interval = 5)
plt.show()
|
OskarLewandowski/My_Learning
|
Python/Oscylator-energia.py
|
Oscylator-energia.py
|
py
| 4,595 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
45635574383
|
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from .conv_tasnet import TCN, GatedTCN
from .lobe.activation import get_activation
from .lobe.norm import get_norm
from .lobe.rnn import FSMN, ConditionFSMN
class Unet(nn.Module):
"""
Generic_Args:
input_type: Real or RI(real+image)
input_dim: input feature dimension
activation_type: activation function
norm_type: normalization function
dropout: if not 0, add dropout in down-CNN layers
Unet_Args:
channels: controlled input/output channel for Unet
kernel_t: kernel size in time axis for each down cnn layer
kernel_f: kernel size in freq axis for each down/up cnn layer
stride_t: stride size in time axis for each down cnn layer
stride_f: stride size in freq axis for each down/up cnn layer
dilation_t: dilation size in time axis for each down cnn layer
dilation_f: dilation size in freq axis for each down/up cnn layer
delay: add lookahead frames in each down cnn layers, if 0 means causal cnn operation
transpose_t_size: the kernel size of ConvTranspose2d's time axis for up cnn layer
skip_conv
"""
def __init__(
self,
input_type: str = "RI",
input_dim: int = 512,
activation_type: str = "PReLU",
norm_type: str = "bN2d",
dropout: float = 0.05,
channels: Tuple = (1, 1, 8, 8, 16, 16),
transpose_t_size: int = 2,
skip_conv: bool = False,
kernel_t: Tuple = (5, 1, 9, 1, 1),
stride_t: Tuple = (1, 1, 1, 1, 1),
dilation_t: Tuple = (1, 1, 1, 1, 1),
kernel_f: Tuple = (1, 5, 1, 5, 1),
stride_f: Tuple = (1, 4, 1, 4, 1),
dilation_f: Tuple = (1, 1, 1, 1, 1),
delay: Tuple = (0, 0, 1, 0, 0),
multi_output: int = 1,
):
super().__init__()
assert (
len(kernel_t)
== len(kernel_f)
== len(stride_t)
== len(stride_f)
== len(dilation_t)
== len(dilation_f)
)
self.input_type = input_type
self.input_dim = input_dim
self.multi_output = multi_output
self.activation_type = activation_type
self.norm_type = norm_type
self.dropout = dropout
self.skip_conv = skip_conv
# Structure information
self.kernel_t = kernel_t
self.kernel_f = kernel_f
self.stride_t = stride_t
self.stride_f = stride_f
self.dilation_t = dilation_t
self.dilation_f = dilation_f
self.transpose_t_size = transpose_t_size
active_cls = get_activation(activation_type.lower())
norm_cls = get_norm(norm_type)
self.n_cnn = len(kernel_t)
self.channels = list(channels)
self.kernel = list(
zip(kernel_f, kernel_t)
) # each layer's kernel size (freq, time)
self.delay = delay # how much delay for each layer
self.dilation = list(zip(dilation_f, dilation_t))
self.stride = list(zip(stride_f, stride_t))
self.t_kernel = transpose_t_size
# Check relationship between feature-type and input-channel
if input_type.lower() == "ri":
self.num_freq = input_dim // 2
self.channels[0] = self.channels[0] * 2 # will expand RI channel
elif input_type.lower() == "real":
self.num_freq = input_dim
else:
raise TypeError("Input feature type should be RI-concate, RI-stack or Real")
# CNN-down, downsample in frequency axis
self.cnn_down = nn.ModuleList()
for i in range(self.n_cnn):
encode = []
freq_pad = (
self.kernel[i][0] // 2,
self.kernel[i][0] // 2,
) # center padding in frequency axis
time_pad = (self.kernel[i][1] - self.delay[i] - 1, self.delay[i])
encode += [
nn.ZeroPad2d(time_pad + freq_pad), # (left, right, top, down)
nn.Conv2d(
self.channels[i],
self.channels[i + 1],
kernel_size=self.kernel[i],
stride=self.stride[i],
dilation=self.dilation[i],
),
norm_cls(self.channels[i + 1]),
active_cls(),
nn.Dropout(self.dropout),
]
self.cnn_down.append(nn.Sequential(*encode))
# CNN-up, upsample in frequency axis
self.cnn_up = nn.ModuleList()
skip_double = 2 if not skip_conv else 1
skip_double = [skip_double] * self.n_cnn
for i in reversed(range(self.n_cnn)):
s, _ = self.stride[i]
k = self.kernel[i][0]
p = k // 2
op = s - k + 2 * p
encode = []
if i != 0:
encode += [
nn.ConvTranspose2d(
self.channels[i + 1] * skip_double[i],
self.channels[i],
kernel_size=(k, self.t_kernel),
stride=self.stride[i],
dilation=self.dilation[i],
padding=(p, 0),
output_padding=(op, 0),
),
norm_cls(self.channels[i]),
active_cls(),
]
else:
# linear output
encode += [
nn.ConvTranspose2d(
self.channels[i + 1] * skip_double[i],
self.channels[i] * self.multi_output,
kernel_size=(k, self.t_kernel),
stride=self.stride[i],
dilation=self.dilation[i],
padding=(p, 0),
output_padding=(op, 0),
)
]
self.cnn_up.append(nn.Sequential(*encode))
if skip_conv:
self.skip_cnn = nn.ModuleList()
for i in reversed(range(self.n_cnn)):
encode = []
encode += [
nn.Conv2d(
self.channels[i + 1],
self.channels[i + 1],
kernel_size=(1, 1),
stride=1,
),
active_cls(),
]
self.skip_cnn.append(nn.Sequential(*encode))
def shape_info(self):
# input_shape = [N, ch, C, T]
# conv-transpose output size is:
# (freq): (input_shape[2] -1) * stride[0] - 2*padding[0] + dilation[0] * (kernel_size[0]-1) + output_padding[0] + 1
# (time): (input_shape[2] -1) * stride[1] - 2*padding[1] + dilation[1] * (kernel_size[1]-1) + output_padding[1] + 1
down_shape = [self.num_freq]
for i in range(self.n_cnn):
stride, _ = self.stride[i]
if down_shape[i - 1] % stride == 0:
_f = down_shape[-1] // stride
else:
_f = down_shape[-1] // stride
_f += 1
down_shape.append(_f)
up_shape = [_f]
for i in range(self.n_cnn):
stride, _ = self.stride[-i - 1]
kernel_size = self.kernel[-i - 1][0]
padding = kernel_size // 2
output_padding = stride - kernel_size + 2 * padding
_f = (
(up_shape[-1] - 1) * stride
- 2 * padding
+ self.dilation[-i - 1][0] * (kernel_size - 1)
+ output_padding
+ 1
)
up_shape.append(_f)
return down_shape, up_shape
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x: input tensor shape [N, C, T]
Returns:
output tensor has shape [N, C, T]
"""
if self.input_type.lower() == "ri":
_re, _im = torch.chunk(x, 2, dim=-2)
x = torch.stack([_re, _im], dim=1) # [N, C, T] -> [N, 2, C, T]
else:
if x.dim() == 3:
x = x.unsqueeze(1) # [N, 1, C, T]
skip = [x.clone()]
# forward CNN-down layers
for cnn_layer in self.cnn_down:
x = cnn_layer(x) # [N, ch, C, T]
skip.append(x)
# forward CNN-up layers
for i, cnn_layer in enumerate(self.cnn_up):
if self.skip_conv:
x += self.skip_cnn[i](skip[-i - 1])
else:
x = torch.cat([x, skip[-i - 1]], dim=1)
x = cnn_layer(x)
if self.t_kernel != 1:
x = x[
..., : -(self.t_kernel - 1)
] # transpose-conv with t-kernel size would increase (t-1) length
if self.multi_output != 1:
batch, ch, fdim, tdim = x.shape
x = x.reshape(batch, self.multi_output, -1, fdim, tdim)
if self.input_type.lower() == "ri":
_re = x[:, :, 0, :, :]
_im = x[:, :, 1, :, :]
x = torch.cat([_re, _im], dim=2)
else:
x = x.squeeze(2) # [N, M, 1, C, T] -> [N, C, T]
else:
if self.input_type.lower() == "ri":
_re = x[:, 0, :, :]
_im = x[:, 1, :, :]
x = torch.cat([_re, _im], dim=1)
else:
x = x.squeeze(1) # [N, 1, C, T] -> [N, C, T]
return x
@property
def get_args(self) -> Dict:
return {
"input_type": self.input_type,
"input_dim": self.input_dim,
"activation_type": self.activation_type,
"norm_type": self.norm_type,
"dropout": self.dropout,
"channels": self.channels,
"transpose_t_size": self.transpose_t_size,
"skip_conv": self.skip_conv,
"kernel_t": self.kernel_t,
"stride_t": self.stride_t,
"dilation_t": self.dilation_t,
"kernel_f": self.kernel_f,
"stride_f": self.stride_f,
"dilation_f": self.dilation_f,
"delay": self.delay,
"multi_output": self.multi_output,
}
class UnetTcn(Unet):
"""
Improve temporal modeling ability by inserting a TCN inside an Unet model.
Args:
embed_dim: Embedding feature dimension.
embed_norm: If True, applies the 2-norm on the input embedding.
"""
def __init__(
self,
embed_dim: int = 0,
embed_norm: bool = False,
input_type: str = "RI",
input_dim: int = 512,
activation_type: str = "PReLU",
norm_type: str = "bN2d",
dropout: float = 0.05,
channels: Tuple = (1, 1, 8, 8, 16, 16),
transpose_t_size: int = 2,
transpose_delay: bool = False,
skip_conv: bool = False,
kernel_t: Tuple = (5, 1, 9, 1, 1),
stride_t: Tuple = (1, 1, 1, 1, 1),
dilation_t: Tuple = (1, 1, 1, 1, 1),
kernel_f: Tuple = (1, 5, 1, 5, 1),
stride_f: Tuple = (1, 4, 1, 4, 1),
dilation_f: Tuple = (1, 1, 1, 1, 1),
delay: Tuple = (0, 0, 1, 0, 0),
tcn_layer: str = "normal",
tcn_kernel: int = 3,
tcn_dim: int = 256,
tcn_dilated_basic: int = 2,
per_tcn_stack: int = 5,
repeat_tcn: int = 4,
tcn_with_embed: List = [1, 0, 0, 0, 0],
tcn_use_film: bool = False,
tcn_norm: str = "gLN",
dconv_norm: str = "gGN",
causal: bool = False,
):
super().__init__(
input_type,
input_dim,
activation_type,
norm_type,
dropout,
channels,
transpose_t_size,
skip_conv,
kernel_t,
stride_t,
dilation_t,
kernel_f,
stride_f,
dilation_f,
delay,
)
self.embed_dim = embed_dim
self.embed_norm = embed_norm
self.tcn_layer = tcn_layer
self.tcn_dim = tcn_dim
self.tcn_kernel = tcn_kernel
self.per_tcn_stack = per_tcn_stack
self.repeat_tcn = repeat_tcn
self.tcn_dilated_basic = tcn_dilated_basic
self.tcn_with_embed = tcn_with_embed
self.tcn_norm = tcn_norm
self.dconv_norm = dconv_norm
self.tcn_use_film = tcn_use_film
self.causal = causal
self.transpose_delay = transpose_delay
# TCN module's
temporal_input_dim = self.num_freq
for stride, _ in self.stride:
if temporal_input_dim % stride == 0:
temporal_input_dim //= stride
else:
temporal_input_dim //= stride
temporal_input_dim += 1
temporal_input_dim *= self.channels[-1] # extend by channel size
if self.tcn_layer.lower() == "normal":
tcn_cls = TCN
elif self.tcn_layer.lower() == "gated":
print("GatedTCN would ignore dconv_norm configuration.")
tcn_cls = GatedTCN
else:
raise NameError
assert per_tcn_stack == len(tcn_with_embed)
self.tcn_list = nn.ModuleList()
for _ in range(repeat_tcn):
_tcn = []
for i in range(per_tcn_stack):
if tcn_with_embed[i]:
if self.tcn_layer.lower() == "normal":
_tcn.append(
tcn_cls(
temporal_input_dim,
tcn_dim,
kernel=tcn_kernel,
dilation=tcn_dilated_basic ** i,
emb_dim=embed_dim,
causal=causal,
tcn_norm=tcn_norm,
dconv_norm=dconv_norm,
)
)
else:
_tcn.append(
tcn_cls(
temporal_input_dim,
tcn_dim,
kernel=tcn_kernel,
dilation=tcn_dilated_basic ** i,
emb_dim=embed_dim,
causal=causal,
tcn_norm=tcn_norm,
use_film=tcn_use_film,
)
)
else:
if self.tcn_layer.lower() == "normal":
_tcn.append(
tcn_cls(
temporal_input_dim,
tcn_dim,
kernel=tcn_kernel,
dilation=tcn_dilated_basic ** i,
emb_dim=0,
causal=causal,
tcn_norm=tcn_norm,
dconv_norm=dconv_norm,
)
)
else:
_tcn.append(
tcn_cls(
temporal_input_dim,
tcn_dim,
kernel=tcn_kernel,
dilation=tcn_dilated_basic ** i,
emb_dim=0,
causal=causal,
tcn_norm=tcn_norm,
use_film=False,
)
)
self.tcn_list.append(nn.ModuleList(_tcn))
def forward(
self, x: torch.Tensor, dvec: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""
Args:
x: input tensor shape [N, C, T]
dvec: conditional tensor shape [N, C]
Returns:
output tensor has shape [N, C, T]
"""
# normalize
if self.embed_norm and dvec is not None:
dvec = F.normalize(dvec, p=2, dim=1)
if self.input_type.lower() == "ri":
_re, _im = torch.chunk(x, 2, dim=-2)
x = torch.stack([_re, _im], dim=1) # [N, C, T] -> [N, 2, C, T]
else:
if x.dim() == 3:
x = x.unsqueeze(1) # [N, 1, C, T]
skip = [x.clone()]
# forward CNN-down layers
for cnn_layer in self.cnn_down:
x = cnn_layer(x) # [N, ch, C, T]
skip.append(x)
# forward TCN block
N_ori, ch, C_ori, T = x.shape
x = x.reshape(N_ori, ch * C_ori, T)
for r in range(self.repeat_tcn):
for i in range(len(self.tcn_list[r])):
if self.tcn_with_embed[i]:
x = self.tcn_list[r][i](x, dvec)
else:
x = self.tcn_list[r][i](x)
x = x.reshape(N_ori, ch, C_ori, T)
# forward CNN-up layers
for i, cnn_layer in enumerate(self.cnn_up):
if self.skip_conv:
x += self.skip_cnn[i](skip[-i - 1])
else:
x = torch.cat([x, skip[-i - 1]], dim=1)
x = cnn_layer(x)
if self.t_kernel != 1:
if self.transpose_delay:
x = x[
..., (self.t_kernel - 1) :
] # transpose-conv with t-kernel size would increase (t-1) length
else:
x = x[
..., : -(self.t_kernel - 1)
] # transpose-conv with t-kernel size would increase (t-1) length
if self.input_type.lower() == "ri":
_re = x[:, 0, :, :]
_im = x[:, 1, :, :]
x = torch.cat([_re, _im], dim=1)
else:
x = x.squeeze(1) # [N, 1, C, T] -> [N, C, T]
return x
@property
def get_args(self) -> Dict:
return {
"input_type": self.input_type,
"input_dim": self.input_dim,
"activation_type": self.activation_type,
"norm_type": self.norm_type,
"dropout": self.dropout,
"channels": self.channels,
"transpose_t_size": self.transpose_t_size,
"transpose_delay": self.transpose_delay,
"skip_conv": self.skip_conv,
"kernel_t": self.kernel_t,
"stride_t": self.stride_t,
"dilation_t": self.dilation_t,
"kernel_f": self.kernel_f,
"stride_f": self.stride_f,
"dilation_f": self.dilation_f,
"delay": self.delay,
"embed_dim": self.embed_dim,
"embed_norm": self.embed_norm,
"tcn_norm": self.tcn_norm,
"dconv_norm": self.dconv_norm,
"tcn_layer": self.tcn_layer,
"tcn_dim": self.tcn_dim,
"tcn_kernel": self.tcn_kernel,
"tcn_dilated_basic": self.tcn_dilated_basic,
"repeat_tcn": self.repeat_tcn,
"per_tcn_stack": self.per_tcn_stack,
"tcn_with_embed": self.tcn_with_embed,
"tcn_use_film": self.tcn_use_film,
"causal": self.causal,
}
class UnetFsmn(Unet):
"""
Improve temporal modeling ability by inserting a FSMN inside an Unet model.
Args:
embed_dim: Embedding feature dimension.
embed_norm: If True, applies the 2-norm on the input embedding.
"""
def __init__(
self,
embed_dim: int = 0,
embed_norm: bool = False,
input_type: str = "RI",
input_dim: int = 512,
activation_type: str = "PReLU",
norm_type: str = "bN2d",
dropout: float = 0.05,
channels: Tuple = (1, 1, 8, 8, 16, 16),
transpose_t_size: int = 2,
transpose_delay: bool = False,
skip_conv: bool = False,
kernel_t: Tuple = (5, 1, 9, 1, 1),
stride_t: Tuple = (1, 1, 1, 1, 1),
dilation_t: Tuple = (1, 1, 1, 1, 1),
kernel_f: Tuple = (1, 5, 1, 5, 1),
stride_f: Tuple = (1, 4, 1, 4, 1),
dilation_f: Tuple = (1, 1, 1, 1, 1),
delay: Tuple = (0, 0, 1, 0, 0),
fsmn_l_context: int = 3,
fsmn_r_context: int = 0,
fsmn_dim: int = 256,
num_fsmn: int = 8,
fsmn_with_embed: List = [1, 1, 1, 1, 1, 1, 1, 1],
fsmn_norm: str = "gLN",
use_film: bool = True,
):
super().__init__(
input_type,
input_dim,
activation_type,
norm_type,
dropout,
channels,
transpose_t_size,
skip_conv,
kernel_t,
stride_t,
dilation_t,
kernel_f,
stride_f,
dilation_f,
delay,
)
self.transpose_delay = transpose_delay
self.embed_dim = embed_dim
self.embed_norm = embed_norm
self.fsmn_l_context = fsmn_l_context
self.fsmn_r_context = fsmn_r_context
self.fsmn_dim = fsmn_dim
self.num_fsmn = num_fsmn
self.fsmn_with_embed = fsmn_with_embed
self.fsmn_norm = fsmn_norm
self.use_film = use_film
# FSMN module's
temporal_input_dim = self.num_freq
for stride, _ in self.stride:
if temporal_input_dim % stride == 0:
temporal_input_dim //= stride
else:
temporal_input_dim //= stride
temporal_input_dim += 1
temporal_input_dim *= self.channels[-1] # extend by channel size
assert num_fsmn == len(fsmn_with_embed)
self.fsmn_list = nn.ModuleList()
for i in range(num_fsmn):
if fsmn_with_embed[i]:
self.fsmn_list.append(
ConditionFSMN(
temporal_input_dim,
temporal_input_dim,
fsmn_dim,
embed_dim,
fsmn_l_context,
fsmn_r_context,
norm_type=fsmn_norm,
use_film=use_film,
)
)
else:
self.fsmn_list.append(
FSMN(
temporal_input_dim,
temporal_input_dim,
fsmn_dim,
fsmn_l_context,
fsmn_r_context,
norm_type=fsmn_norm,
)
)
def forward(
self, x: torch.Tensor, dvec: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""
Args:
x: input tensor shape [N, C, T]
dvec: conditional tensor shape [N, C]
Returns:
output tensor has shape [N, C, T]
"""
# normalize
if self.embed_norm and dvec is not None:
dvec = F.normalize(dvec, p=2, dim=1)
if self.input_type.lower() == "ri":
_re, _im = torch.chunk(x, 2, dim=-2)
x = torch.stack([_re, _im], dim=1) # [N, C, T] -> [N, 2, C, T]
else:
if x.dim() == 3:
x = x.unsqueeze(1) # [N, 1, C, T]
skip = [x.clone()]
# forward CNN-down layers
for cnn_layer in self.cnn_down:
x = cnn_layer(x) # [N, ch, C, T]
skip.append(x)
# forward FSMN block
N_ori, ch, C_ori, T = x.shape
x = x.reshape(N_ori, ch * C_ori, T)
memory = None
for i in range(len(self.fsmn_list)):
if self.fsmn_with_embed[i]:
x, memory = self.fsmn_list[i](x, dvec, memory)
else:
x, memory = self.fsmn_list[i](x, memory)
x = x.reshape(N_ori, ch, C_ori, T)
# forward CNN-up layers
for i, cnn_layer in enumerate(self.cnn_up):
if self.skip_conv:
x += self.skip_cnn[i](skip[-i - 1])
else:
x = torch.cat([x, skip[-i - 1]], dim=1)
x = cnn_layer(x)
if self.t_kernel != 1:
if self.transpose_delay:
x = x[
..., (self.t_kernel - 1) :
] # transpose-conv with t-kernel size would increase (t-1) length
else:
x = x[
..., : -(self.t_kernel - 1)
] # transpose-conv with t-kernel size would increase (t-1) length
if self.input_type.lower() == "ri":
_re = x[:, 0, :, :]
_im = x[:, 1, :, :]
x = torch.cat([_re, _im], dim=1)
else:
x = x.squeeze(1) # [N, 1, C, T] -> [N, C, T]
return x
@property
def get_args(self) -> Dict:
return {
"input_type": self.input_type,
"input_dim": self.input_dim,
"activation_type": self.activation_type,
"norm_type": self.norm_type,
"dropout": self.dropout,
"channels": self.channels,
"transpose_t_size": self.transpose_t_size,
"transpose_delay": self.transpose_delay,
"skip_conv": self.skip_conv,
"kernel_t": self.kernel_t,
"stride_t": self.stride_t,
"dilation_t": self.dilation_t,
"kernel_f": self.kernel_f,
"stride_f": self.stride_f,
"dilation_f": self.dilation_f,
"delay": self.delay,
"embed_dim": self.embed_dim,
"embed_norm": self.embed_norm,
"fsmn_l_context": self.fsmn_l_context,
"fsmn_r_context": self.fsmn_r_context,
"fsmn_dim": self.fsmn_dim,
"num_fsmn": self.num_fsmn,
"fsmn_with_embed": self.fsmn_with_embed,
"fsmn_norm": self.fsmn_norm,
"use_film": self.use_film,
}
|
mcw519/PureSound
|
puresound/nnet/unet.py
|
unet.py
|
py
| 26,136 |
python
|
en
|
code
| 4 |
github-code
|
6
|
39858982363
|
import os
import sys
if not "DEVITO_OPENMP" in os.environ or os.environ["DEVITO_OPENMP"] != "1":
print("*** WARNING: Devito OpenMP environment variable has not been set ***", file=sys.stderr)
import numpy as np
from sympy import Matrix, Eq, solve
import progressbar
from devito import TimeData, Operator, t, x, y, z, logger as devito_logger, parameters as devito_parameters
from . import sim
devito_logger.set_log_level('WARNING')
def vector_laplacian(u):
return Matrix([u[0].dx2 + u[0].dy2 + u[0].dz2,
u[1].dx2 + u[1].dy2 + u[1].dz2,
u[2].dx2 + u[2].dy2 + u[2].dz2])
def vector_gradient(u):
return u[0].dx**2 + u[0].dy**2 + u[0].dz**2 + u[1].dx**2 + u[1].dy**2 + u[1].dz**2 + u[2].dx**2 + u[2].dy**2 + u[2].dz**2
def curl(u):
return Matrix([u[2].dy - u[1].dz,
u[0].dz - u[2].dx,
u[1].dx - u[0].dy])
expression_cache = {}
class Sim(sim.Sim):
framework_name = "Devito"
@property
def data_shape(self):
# Devito doesn't like numpy types for the grid dimensions, and it needs to be a tuple, so shape needs to be converted
return tuple(int(i) for i in self.grid_params.n)
def data_matrix(self, settings):
return Matrix([TimeData(name='m_x', **settings),
TimeData(name='m_y', **settings),
TimeData(name='m_z', **settings)])
def generate_step_kernel(self):
settings = {"shape":self.buffer_dims, "space_order":2}
m = self.data_matrix(settings)
c = 2 / (self.mu0 * self.sim_params.Ms)
zeeman = Matrix(self.sim_params.H)
exchange = self.sim_params.A * c * vector_laplacian(m)
e = Matrix(self.sim_params.e)
anisotropy = self.sim_params.K * c * m.dot(e) * e
dmi = self.sim_params.D * c * curl(m)
heff = zeeman + exchange + anisotropy + dmi
crossHeff = m.cross(heff)
dmdt_rhs = -self.gamma0 / (1 + self.sim_params.alpha**2) * (crossHeff + self.sim_params.alpha * m.cross(crossHeff))
dmdt_lhs = Matrix([TimeData(name='dmdt_x', **settings),
TimeData(name='dmdt_y', **settings),
TimeData(name='dmdt_z', **settings)])
dmdt_correction = self.correction * dmdt_lhs.dot(dmdt_lhs)**0.5 * (1 - m.dot(m)) * m
string_llg = str(dmdt_rhs) + str(dmdt_correction)
if string_llg in expression_cache:
update = expression_cache[string_llg]
else:
update = []
if self.correction > 0:
# if using correction solve in 2 steps; calculate dmdt, then calculate m[t+1] = dmdt + correction
for i, dmdti in enumerate(dmdt_lhs):
update.append(Eq(dmdti, dmdt_rhs[i]))
llg_eqn = Matrix([mi.dt for mi in m]) - (dmdt_lhs + dmdt_correction)
else:
# if not using correction; m[t+1] = dmdt
llg_eqn = Matrix([mi.dt for mi in m]) - dmdt_rhs
print("Solving LLG Sympy expressions ...", file=sys.stderr)
with progressbar.ProgressBar(max_value=len(m)) as bar:
for i, mi in enumerate(m):
update.append(Eq(mi.forward, solve(llg_eqn[i], mi.forward)[0]))
bar.update(i + 1)
expression_cache[string_llg] = update
bcs = []
nx, ny, nz = self.buffer_dims
if self.periodic_boundary:
for mi in m:
bcs += [Eq(mi.indexed[t, x, y, 0], mi.indexed[t, x, y, nz - 2])]
bcs += [Eq(mi.indexed[t, x, y, nz - 1], mi.indexed[t, x, y, 1])]
bcs += [Eq(mi.indexed[t, x, 0, z], mi.indexed[t, x, ny - 2, z])]
bcs += [Eq(mi.indexed[t, x, ny - 1, z], mi.indexed[t, x, 1, z])]
bcs += [Eq(mi.indexed[t, 0, y, z], mi.indexed[t, nx - 2, y, z])]
bcs += [Eq(mi.indexed[t, nx - 1, y, z], mi.indexed[t, 1, y, z])]
else:
for mi in m:
bcs += [Eq(mi.indexed[t, x, y, 0], 0.)]
bcs += [Eq(mi.indexed[t, x, y, nz - 1], 0.)]
bcs += [Eq(mi.indexed[t, x, 0, z], 0.)]
bcs += [Eq(mi.indexed[t, x, ny - 1, z], 0.)]
bcs += [Eq(mi.indexed[t, 0, y, z], 0.)]
bcs += [Eq(mi.indexed[t, nx - 1, y, z], 0.)]
dx, dy, dz = self.grid_params.d
dt = self.time_params.d
subs = {x.spacing: dx, y.spacing: dy, z.spacing: dz, t.spacing: dt}
op = Operator(bcs + update, subs=subs)
# Call op trigger compilation
op(time=1)
def step(f, t):
for i, mi in enumerate(m):
mi.data[(0, ) + self.buffer_slice] = f[i]
op(time=self.save_every + 1)
for i, mi in enumerate(m):
t[i] = mi.data[(self.save_every % 2, ) + self.buffer_slice]
return step
"""
def energy_expr(self, m):
dV = self.grid_params.prod_d
e = Matrix(self.sim_params.e)
H = Matrix(self.sim_params.H)
Kc = dV * -self.sim_params.K
Ac = dV * self.sim_params.A
Dc = dV * -self.sim_params.D
Hc = dV * -self.mu0 * self.sim_params.Ms
return {"Zeeman":Hc * m.dot(H),
"Exchange":Ac * vector_gradient(m),
"Anisotropy":Kc * (m.dot(e))**2,
"DMI":Dc * m.dot(curl(m))}
def generate_energy_kernel(self):
settings = {"shape":self.buffer_dims, "space_order":2}
m = self.data_matrix(settings)
energy_expr = self.energy_expr(m)
E = TimeData(name='E', **settings)
eqn = Eq(E, sum(energy_expr.values()))
dx, dy, dz = self.grid_params.d
subs = {x.spacing: dx, y.spacing: dy, z.spacing: dz}
# turn dle off because some eqns are 1st and some are 2nd order, requiring different bounds.
op = Operator(eqn, subs=subs, dle=False)
# Call op trigger compilation
op()
def energy(d):
for i, mi in enumerate(m):
mi.data[0] = d[i]
op(time=1)
return E.data[0]
return energy
def generate_detailed_energy_kernel(self, terms):
def energy(d):
settings = {"shape":self.buffer_dims, "space_order":2, "time_dim":len(d), "save":True}
m = self.data_matrix(settings)
energy_expr = self.energy_expr(m)
names = [k for k in terms if k in energy_expr]
symbols = []
eqns = []
for key in names:
symbol = TimeData(name='E_{}'.format(key), **settings)
symbols.append(symbol)
eqns.append(Eq(symbol, energy_expr[key]))
dx, dy, dz = self.grid_params.d
subs = {x.spacing: dx, y.spacing: dy, z.spacing: dz}
# turn dle off because some eqns are 1st and some are 2nd order, requiring different bounds.
op = Operator(eqns, subs=subs, dle=False)
for i, mi in enumerate(m):
for j, dj in enumerate(d):
mi.data[j] = dj[i]
op()
ret = {}
for i, name in enumerate(names):
ret[name] = []
for dj in symbols[i].data:
ret[name].append(dj)
return ret
return energy
"""
|
gamdow/ACG-feasibility
|
wrapper_pkg/devito.py
|
devito.py
|
py
| 7,301 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8337543538
|
#! /usr/bin/env python
import sys
import csv
import screed
import random
import argparse
import sourmash
import sequtils # local import
def main():
parser = argparse.ArgumentParser()
parser.add_argument('genome')
parser.add_argument('-e', '--error-rate', type=float, default=.01)
parser.add_argument('-r', '--read-length', type=int, default=100,
help="Length of reads to generate")
parser.add_argument("-S", "--seed", dest="seed", help="Random seed", type=int,
default=1)
parser.add_argument("-k", "--ksize", default=31, help="k-mer size")
parser.add_argument("-o", "--output", required=True,
help="CSV output of detection curve")
args = parser.parse_args()
READLEN=args.read_length
ERROR_RATE=args.error_rate
NUM_FRACMINHASH = 5
random.seed(args.seed) # make this reproducible, please.
records = list(screed.open(args.genome))
assert len(records) == 1
record = records[0]
genome = record.sequence
len_genome = len(genome)
total_mh = sourmash.MinHash(0, args.ksize, scaled=1)
total_mh.add_sequence(genome)
all_hashes = set(total_mh.hashes)
# make NUM_FRACMINHASH minhashes each with different mmh seeds
all_hashes_list = []
scaled_mh_list = []
for i in range(NUM_FRACMINHASH):
smh = sourmash.MinHash(0, args.ksize, scaled=1000, seed=i + 42)
all_hashes_i = smh.copy_and_clear()
all_hashes_i.add_sequence(genome)
scaled_mh_list.append(smh)
all_hashes_list.append(all_hashes_i)
print('genome size:', len_genome, file=sys.stderr)
print('readlen:', READLEN, file=sys.stderr)
print('error rate:', ERROR_RATE, file=sys.stderr)
print('num k-mers:', len(total_mh))
reads_mut = 0
total_mut = 0
print(f"Read in template genome {0} of length {1} from {2}".format(record["name"], len_genome, args.genome), file=sys.stderr)
print(f"Generating reads of length {READLEN} with an error rate of 1 in {ERROR_RATE}", file=sys.stderr)
it = sequtils.generate_mutated_reads(genome, READLEN, ERROR_RATE)
it = iter(it)
fp = open(args.output, 'w', newline="")
csv_w = csv.writer(fp)
headers = ['num_reads', 'coverage', 'n_detected', 'f_detected']
for i in range(NUM_FRACMINHASH):
headers.append(f"smash_count_{i}")
csv_w.writerow(headers)
csv_w.writerow([0, 0, 0, 0] + [0]*NUM_FRACMINHASH)
n_reads = 0
total_bp_in_reads = 0
f01 = len(all_hashes) * 0.1
remaining_hashes = set(all_hashes)
while len(remaining_hashes) > f01:
start, read, read_mutations = next(it)
if read_mutations:
reads_mut += 1
total_mut += read_mutations
n_reads += 1
total_bp_in_reads += len(read)
# first, track _all_ hashes for actual k-mer detection
mh = total_mh.copy_and_clear()
mh.add_sequence(read)
remaining_hashes -= set(mh.hashes)
n_detected = len(all_hashes) - len(remaining_hashes)
f_detected = n_detected / len(all_hashes)
coverage = total_bp_in_reads / len_genome
# now, track sourmash detection & intersect with legit hashes:
smash_detection = []
for smh, all_hashes_i in zip(scaled_mh_list, all_hashes_list):
smh.add_sequence(read)
smh_hashes = set(smh.hashes)
smh_hashes.intersection_update(all_hashes_i.hashes)
smash_detection.append(len(smh_hashes))
csv_w.writerow([n_reads, f"{coverage:.4f}", n_detected, f"{f_detected:.4f}"] + smash_detection)
sys.stdout.write(u'\r\033[K')
sys.stdout.write(f"...{n_reads} reads, {len(all_hashes)} missing k-mers, {total_bp_in_reads / len_genome:.2f} coverage")
sys.stdout.flush()
fp.close()
if __name__ == '__main__':
sys.exit(main())
|
ctb/2022-sourmash-sens-spec
|
scripts/make-detection-curve.py
|
make-detection-curve.py
|
py
| 3,926 |
python
|
en
|
code
| 2 |
github-code
|
6
|
2115320611
|
# suhaarslan.com
from random import randbytes
class Client:
def keyControl(self, x):
# takes first 32 bytes
return x[:self.__byte]
# Control Funtions
def __init__(self, a, b):
# a, b 32 bytes public key / a, b string
self.__byte = 128
self.public_key_1 = bytes(self.keyControl(a.encode()))
self.public_key_2 = bytes(self.keyControl(b.encode()))
self.__createPrivateKey()
def __createPrivateKey(self):
# private key 32 bytes
self.__private_key = randbytes(self.__byte)
def getPrivateKey(self):
return self.__private_key
# Key Functions
def Make(self, onc = 0):
if onc == 0:
self.local = hex((int(self.public_key_1.hex(), 16)^int(self.__private_key.hex(), 16))&int(self.public_key_2.hex(), 16)-1)
return self.local
else:
self.l_onc = hex((int(onc, 16)^int(self.__private_key.hex(), 16))&int(self.public_key_2.hex(), 16)-1)
return self.l_onc
# Calcs
k1 = """Ua){jk2#N^=yShan.]}:+#'TZL6s!F!WG8A=&-ML{gJ(B>5$xC=X/]H'[6gyNn6*B`4:UB,~)et[">$9:d#9F6nQjcp,!pm5FPP(=VGTXe6U=Ypta&JjrRfE}"/j~g"/"""
k2 = """rt$}Lu9Gdsu:^&>8[2>waMC}g+q[=g~KJ=ymp5"`=:&M-XUDQ&SB3Yc_B-V/5b@_kt(:[=r`98C(r2rE@wA#c_T8k+D>EMqrG5$\_xUaDx)Tr4_J"b{vud+X<9'N<:sB"""
alpha = Client(k1, k2)
alphaCalc = alpha.Make()
beta = Client(k1, k2)
betaCalc = beta.Make()
print(alpha.Make(onc=betaCalc))
print(beta.Make(onc=alphaCalc))
|
programmer-666/Cryptography
|
Asymetric/Diffie-Helman.py
|
Diffie-Helman.py
|
py
| 1,463 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31463758726
|
import logging
import pathlib
import sqlite3
logger = logging.getLogger(__name__)
def is_database_exists(db_path):
return pathlib.Path(db_path).exists()
def open_connection(db_path):
if is_database_exists(db_path):
logger.debug(f"Connecting to {db_path}")
try:
return sqlite3.connect(db_path)
except Exception:
logger.exception(f"Failed to connect to {db_path}")
raise
else:
raise RuntimeError(f"Databse {db_path} doesn't exist")
def close_connection(connection):
assert connection is not None
logger.debug("Closing connection")
connection.close()
def create_database(db_path):
logger.info(f"Creating empty database at {db_path}")
if not is_database_exists(db_path):
try:
connection = sqlite3.connect(db_path)
except Exception:
logging.exception("Failed to create database")
raise
else:
close_connection(connection)
else:
raise RuntimeError(f"Database {db_path} already exists")
class DatabaseIO:
def __init__(self, db_path):
self._path = db_path
self._connection = None
def __enter__(self):
self._connection = open_connection(self._path)
return self
def __exit__(self, exc_type, exc_val, exc_traceback):
del exc_type
del exc_val
del exc_traceback
close_connection(self._connection)
self._connection = None
return False
|
nemeshnorbert/reveal
|
src/db/utils.py
|
utils.py
|
py
| 1,509 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28448639940
|
# __author__ = 'heyin'
# __date__ = '2019/2/14 16:03'
# google翻译rpc服务端代码
import sys
sys.path.append('../')
import json
import grpc
import time
from concurrent import futures
from rpc_server.fanyi import fanyi_pb2, fanyi_pb2_grpc
from rpc_conf import HOST, PORT, ONE_DAY_IN_SECONDS
from core import google
js = google.Py4Js()
class Translate(fanyi_pb2_grpc.TranslateServicer):
def DoTranslate(self, request, context):
args = request.text
args = json.loads(args)
src = args.get('src')
dest = args.get('dest')
cookies = args.get('cookies')
# 下边内容为谷歌的翻译操作
ret = google.translate(js, args.get('content'), src, dest, cookies)
return fanyi_pb2.Data(text=ret)
def serve():
grpcServer = grpc.server(futures.ThreadPoolExecutor(max_workers=4))
fanyi_pb2_grpc.add_TranslateServicer_to_server(Translate(), grpcServer)
grpcServer.add_insecure_port(HOST + ':' + PORT)
grpcServer.start()
try:
while True:
time.sleep(ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
grpcServer.stop(0)
if __name__ == '__main__':
serve()
|
hy89/google-translate
|
rpc_server/server.py
|
server.py
|
py
| 1,172 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30086424921
|
import logging
import paddle.fluid as fluid
import paddle.fluid.dygraph.nn as nn
from utils import build_norm_layer, build_conv_layer, Sequential
class BasicBlock(fluid.dygraph.Layer):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super(BasicBlock, self).__init__()
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias_attr=False)
self.add_sublayer(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias_attr=False)
self.add_sublayer(self.norm2_name, norm2)
self.relu = fluid.layers.relu
self.downsample = downsample
self.stride = stride
self.dilation = dilation
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = fluid.layers.elementwise_add(out, identity)
out = self.relu(out)
return out
class Bottleneck(fluid.dygraph.Layer):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_cfg=None,
norm_cfg=dict(type='BN')):
"""Bottleneck block for ResNet.
the stride-two layer is the 3x3 conv layer,.
"""
super(Bottleneck, self).__init__()
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv1_stride = 1
self.conv2_stride = stride
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
1,
stride=1,
bias_attr=False)
self.add_sublayer(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
planes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias_attr=False)
self.add_sublayer(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
planes,
planes * self.expansion,
1,
bias_attr=False)
self.add_sublayer(self.norm3_name, norm3)
self.relu = fluid.layers.relu
self.downsample = downsample
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
@property
def norm3(self):
return getattr(self, self.norm3_name)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = fluid.layers.elementwise_add(out, identity)
out = self.relu(out)
return out
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
conv_cfg=None,
norm_cfg=dict(type='BN')):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = Sequential(
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
1,
stride=stride,
bias_attr=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
dilation,
downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
1,
dilation,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
return Sequential(*layers)
class ResNet(fluid.dygraph.Layer):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
num_stages (int): Resnet stages, normally 4.
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
norm_eval=True,
zero_init_residual=True):
super(ResNet, self).__init__()
if depth not in self.arch_settings:
raise KeyError('invalid depth {} for resnet'.format(depth))
self.depth = depth
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm_eval = norm_eval
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = 64
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = 64 * 2**i
res_layer = make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
self.inplanes = planes * self.block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_sublayer(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = self.block.expansion * 64 * 2**(
len(self.stage_blocks) - 1)
@property
def norm1(self):
return getattr(self, self.norm1_name)
def _make_stem_layer(self):
self.conv1 = build_conv_layer(
self.conv_cfg,
3,
64,
7,
stride=2,
padding=3,
bias_attr=False)
self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
self.add_sublayer(self.norm1_name, norm1)
self.relu = fluid.layers.relu
self.maxpool = nn.Pool2D(pool_size=3, pool_stride=2, pool_padding=1)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.norm1.eval()
for layer in [self.conv1, self.norm1]:
layer.eval()
for param in layer.parameters():
param.stop_gradient = True
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, 'layer{}'.format(i))
layer.eval()
for param in layer.parameters():
param.stop_gradient = True
def init_weights(self, pretrained=None):
logger = logging.getLogger()
if isinstance(pretrained, str):
logger.info('Loading pretrained model from {}'.format(pretrained))
self.set_dict(fluid.dygraph.load_dygraph(pretrained)[0])
elif pretrained is None:
logger.warning('No pretrained model for Resnet')
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = []
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
outs.append(x) # add for encoder
x = self.maxpool(x)
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self):
super(ResNet, self).train()
self._freeze_stages()
if self.norm_eval:
for layer in self.sublayers():
# trick: eval have effect on BatchNorm only
if isinstance(layer, nn.BatchNorm):
layer.eval()
|
VIS-VAR/LGSC-for-FAS
|
models/resnet.py
|
resnet.py
|
py
| 10,152 |
python
|
en
|
code
| 223 |
github-code
|
6
|
72947597948
|
from verlib import NormalizedVersion as Ver
import numpy as np
__author__ = "Balaji Sriram"
__version__ = "0.0.1"
__copyright__ = "Copyright 2018"
__license__ = "GPL"
__maintainer__ = "Balaji Sriram"
__email__ = "[email protected]"
__status__ = "Production"
class Criterion(object):
def __init__(self, name='Unknown'):
self.name = name
self.ver = Ver('0.0.1')
def __repr__(self):
return "Criterion object"
def check_criterion(self, **kwargs):
return False
class NumTrialsDoneCriterion(Criterion):
"""
NUMTRIALDONECRITERION - graduate after 'n' trials are done. Note: because it works on compiled_record and because
compiled_records are created after checking for graduation, current trial's data will not be available before
checking for graduation.
"""
def __init__(self, num_trials=100, num_trials_mode='global', name='Unknown'):
super(NumTrialsDoneCriterion, self).__init__(name)
self.ver = Ver('0.0.1')
self.num_trials = num_trials
self.num_trials_mode = num_trials_mode
def __repr__(self):
return "NumTrialsDoneCriterion object, n:%d mode:%s", (self.num_trials, self.num_trials_mode)
def check_criterion(self, compiled_record, trial_record, **kwargs):
# trial_number = np.append(np.asarray(compiled_record['trial_number']),np.asarray(trial_record['trial_number']))
# current_step = np.append(np.asarray(compiled_record['current_step']),np.asarray(trial_record['current_step']))
# protocol_name = np.append(np.asarray(compiled_record['protocol_name']),np.asarray(trial_record['protocol_name']))
# protocol_ver = np.append(np.asarray(compiled_record['protocol_version_number']),np.asarray(trial_record['protocol_version_number']))
trial_number = np.asarray(compiled_record['trial_number'])
current_step = np.asarray(compiled_record['current_step'])
protocol_name = np.asarray(compiled_record['protocol_name'])
protocol_ver = np.asarray(compiled_record['protocol_version_number'])
# filter out trial_numbers for current protocol_name and protocol_ver
current_step = current_step[np.bitwise_and(protocol_name==protocol_name[-1],protocol_ver==protocol_ver[-1])]
trial_number = trial_number[np.bitwise_and(protocol_name==protocol_name[-1],protocol_ver==protocol_ver[-1])]
# print('current_step:',current_step)
# print('current_step[-1]:',trial_record['current_step'])
# print('current_step==current_step[-1]',current_step==trial_record['current_step'])
# filter out trial_numbers where step==current_step
# print('trial_number::',trial_number)
temp = trial_number[current_step==trial_record['current_step']]
# print('temp_pre:',temp)
# print(np.asarray([-1]))
# print(np.asarray([trial_record['trial_number']]))
temp = np.append(np.append(np.asarray([-1]),temp),np.asarray([trial_record['trial_number']]))
# print('temp::',temp)
if self.num_trials_mode == 'consecutive':
jumps = np.array(np.where(np.diff(temp)!=1)) # jumps in trial number
try:
tr_for_current_sequence = temp[jumps[0,-1]+1]
nT = trial_record['trial_number'] - tr_for_current_sequence +1
except IndexError:
nT = 0
else: # 'global'
nT = np.sum(current_step==current_step[-1])
# print('nT::',nT)
if nT >= self.num_trials:
graduate = True
else:
graduate = False
print("NUMTRIALSDONECRITERION:CHECK_CRITERION::graduate=%s, nT=%d" % (graduate, nT))
return graduate
class PerformanceCriterion(Criterion):
def __init__(self, pct_correct=0.8, num_trials=200, num_trials_mode='global', name='Unknown'):
super(PerformanceCriterion, self).__init__(name)
self.ver = Ver('0.0.1')
self.pct_correct = pct_correct
self.num_trials = num_trials
self.num_trials_mode = num_trials_mode
def __repr__(self):
return "PerformanceCriterion object, (%s in %s trials, mode:%s)", (self.pct_correct, self.num_trials, self.num_trials_mode)
def check_criterion(self, compiled_record, trial_record, **kwargs):
trial_number = np.asarray(compiled_record['trial_number'])
current_step = np.asarray(compiled_record['current_step'])
correct = np.asarray(compiled_record['correct'])
protocol_name = np.asarray(compiled_record['protocol_name'])
protocol_ver = np.asarray(compiled_record['protocol_version_number'])
# filter out trial_numbers for current protocol_name and protocol_ver
current_step = current_step[np.bitwise_and(protocol_name==protocol_name[-1],protocol_ver==protocol_ver[-1])]
trial_number = trial_number[np.bitwise_and(protocol_name==protocol_name[-1],protocol_ver==protocol_ver[-1])]
correct = correct[np.bitwise_and(protocol_name==protocol_name[-1],protocol_ver==protocol_ver[-1])]
if self.num_trials_mode == 'consecutive':
jumps = np.where(np.diff(trial_number)!=1) # jumps in trial number
if not jumps[0]:
which_trials = trial_number
else:
which_trials = trial_number[jump[0][-1]:] # from the last jump
else:
which_trials = trial_number
if np.size(which_trials)<self.num_trials:
graduate = False # dont graduate if the number of trials less than num required
else:
which_trials = which_trials[-self.num_trials:]
filter = np.isin(trial_number,which_trials)
correct = correct[filter]
perf = np.sum(correct)/np.size(correct)
if perf >self.pct_correct:
graduate = True
else:
graduate = False
return graduate
class RateCriterion(Criterion):
def __init__(self, trials_per_minute=10, consecutive_minutes=5, name='Unknown'):
super(PerformanceCriterion, self).__init__(name)
self.ver = Ver('0.0.1')
self.trials_per_minute = trials_per_minute
self.consecutive_minutes = consecutive_minutes
def __repr__(self):
return "RateCriterion object, (%s trials/minute for %s minutes)", (self.trials_per_minute, self.consecutive_minutes)
def check_criterion(self, compiled_record, trial_record, station, **kwargs):
Graduate = False
raise NotImplementedError()
return graduate
class RepeatIndefinitely(Criterion):
def __init__(self, name='Unknown'):
self.ver = Ver('0.0.1')
super(RepeatIndefinitely, self).__init__(name)
def __repr__(self):
return "RepeatIndefinitely object"
def check_criterion(self, **kwargs):
return False
|
balajisriram/bcore
|
bcore/classes/Criterion.py
|
Criterion.py
|
py
| 6,860 |
python
|
en
|
code
| 1 |
github-code
|
6
|
15156725753
|
import os
import math
import numpy as np
from tqdm import tqdm
import pickle
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from models import l2norm
## Memory
class Memory(nn.Module):
def __init__(self, mem_size=500000, feat_dim=256, margin=1, topk=1000, update_rate=0.1):
super(Memory, self).__init__()
self.mem_size = mem_size
self.feat_dim = feat_dim
self.Mem = nn.Parameter(torch.zeros(mem_size, feat_dim))
self.Ages = nn.Parameter(torch.zeros(mem_size, 1))
self.topk = topk
self.margin = margin
self.update_rate = update_rate
# At this time, we don't train mem by gradient descent
self.Mem.requires_grad = False
self.Ages.requires_grad = False
def update_mem(self, x, labels):
with torch.no_grad():
self.Mem[labels] = l2norm(self.update_rate * x.data + (1 - self.update_rate) * self.Mem[labels])
def update_mem_with_ages(self, x, labels):
with torch.no_grad():
self.Ages[labels] += 1.
self.Mem[labels] = l2norm(x.data + self.Mem[labels] * self.Ages[labels])
def search_l2(self, x, topk):
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.mem_size) + \
torch.pow(self.Mem, 2).sum(dim=1, keepdim=True).expand(self.mem_size, batch_size).t()
distmat.addmm_(x, self.Mem.t(), beta=1, alpha=-2)
distances, indices = torch.topk(distmat, topk, largest=False)
return distances, indices
def compute_l2loss(self, x, labels):
""" L2 Distance
Args:
x: feature matrix with shape (batch_size, feat_dim).
labels: ground truth labels with shape (batch_size).
"""
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.mem_size) + \
torch.pow(self.Mem, 2).sum(dim=1, keepdim=True).expand(self.mem_size, batch_size).t()
distmat.addmm_(x, self.Mem.t(), beta=1, alpha=-2)
classes = torch.arange(self.mem_size).long()
if labels.is_cuda:
classes = classes.cuda()
labels = labels.unsqueeze(1).expand(batch_size, self.mem_size)
mask = labels.eq(classes.expand(batch_size, self.mem_size))
dist1 = distmat * mask.float()
min_loss = dist1.clamp(min=1e-12, max=1e+12).sum(1)
dist2 = distmat * (1.0 - mask.float())
max_loss = torch.topk(dist2, self.topk, dim=1, largest=False)[0].sum(1) / (self.topk - 1)
loss = F.relu(min_loss - max_loss + self.margin)
return loss.mean(), min_loss.mean(), max_loss.mean()
|
toanhvu/learning-to-remember-beauty-products
|
memory.py
|
memory.py
|
py
| 2,793 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42073775086
|
"""
Project 2A:
Write a program that takes as inputs the hourly wage, total regular hours, and total overtime hours and displays an employee's total weekly pay.
Overtime pay equals the total overtime hours multiplied by 1.5 times the hourly wage.
An employee's total weekly pay equals the hourly wage multiplied by the total number of regular hours plus any overtime pay.
"""
""" This generally looks good! I would split the weekly_pay function into three different ones: get_hourly_wage, get_regular_hours, and get_overtime_hours in order to make the program flow a bit more readable. The the actual program would look something like
hourly_wage = get_hourly_wage()
regular_hours = get_regular_hours()
overtime_hours = get_overtime_hours()
weekly_pay = hourly_wage * (regular_hours + 1.5 * overtime_hour)
print(f"\nTotal Weekly Pay: ${weekly_pay}")
input('')
Also, you end up doing basically the same thing (get an input,check that it's a number, check that it's non-negative) quit a few times, both in this program and in the others. It might be a good idea to write a general function to abstract this process. Something like
def get_non_negative_number(prompt: str) -> float:
while True:
input_result = input(prompt)
if not is_valid_number(input_result):
print("Not valid! (or something)")
continue
return str(input_result)
"""
def is_valid_number(num: str):
try:
float(num)
return True
except ValueError:
return False
def Weekly_Pay():
while True:
hourly_wage = input("Please enter your hourly wage: ")
if not is_valid_number(hourly_wage):
print("Invalid character(s) detected.")
elif float(hourly_wage) < 0:
print("Your hourly wage must be a positive number.")
else:
hourly_wage = float(hourly_wage)
break
while True:
total_regular_hours = input("Please enter your total regular hours: ")
if not is_valid_number(total_regular_hours):
print("Invalid character(s) detected.")
elif float(total_regular_hours) < 0:
print("Your total regular hours must be a positive number.")
else:
total_regular_hours = float(total_regular_hours)
break
while True:
total_overtime_hours = input("Please enter your total overtime hours: ")
if not is_valid_number(total_overtime_hours):
print("Invalid character(s) detected.")
elif float(total_overtime_hours) < 0:
print("Your total overtime hours must be a positive number.")
else:
total_overtime_hours = float(total_overtime_hours)
break
overtime_pay = total_overtime_hours * (1.5 * hourly_wage)
total_weekly_pay = (hourly_wage * total_regular_hours) + overtime_pay
return total_weekly_pay
weekly_pay = Weekly_Pay()
weekly_pay = "{:,}".format(round(weekly_pay,2))
print(f"\nTotal Weekly Pay: ${weekly_pay}")
input('')
|
KennethHorsman/Eric-s-Revisions
|
2A-revised.py
|
2A-revised.py
|
py
| 3,105 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72000465789
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import datetime
import locale
import os
from tqdm import tqdm
from collections import *
from typing import Optional,List,Tuple
from trident.backend.common import *
from trident.backend.pytorch_ops import *
from trident.backend.pytorch_backend import to_tensor, get_device, load,fix_layer,set_device
from trident.data.utils import download_model_from_google_drive,download_file_from_google_drive
from trident.layers.pytorch_layers import *
from trident import context
from trident.context import make_dir_if_need,split_path,sanitize_path
ctx=context._context()
__all__ = ['Word2Vec','ChineseWord2Vec']
_trident_dir = get_trident_dir()
dirname = os.path.join(_trident_dir, 'models')
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
download_path= os.path.join(_trident_dir, 'download','vocabs_tw.txt')
make_dir_if_need(download_path)
class Word2Vec(Embedding):
"""中文詞向量
繼承Embedding Layer
"""
def __init__(self, pretrained=False, embedding_dim: Optional[int] = None, num_embeddings: Optional[int] = None, vocabs: Optional[List[str]] = None,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
sparse: bool = False, _weight: Optional[Tensor] = None, filter_index=-1, keep_output: bool = False, name: Optional[str] = None) -> None:
"""
Py Word2vec结构
"""
super().__init__(num_embeddings=num_embeddings, embedding_dim=embedding_dim, max_norm=max_norm, norm_type=norm_type, scale_grad_by_freq=scale_grad_by_freq, sparse=sparse,
_weight=_weight,padding_idx=padding_idx, keep_output=keep_output, name=name)
self.pretrained=pretrained
self.filter_index=filter_index
self.locale =ctx.locale
print('locale:', self.locale)
self._vocabs = OrderedDict()
if vocabs is not None:
for k in range(len(vocabs)):
self._vocabs[vocabs[k]] = k
download_file_from_google_drive(file_id='16yDlJJ4-O9pHF-ZbXy7XPZZk6vo3aw4e', dirname=os.path.join(_trident_dir, 'download'), filename='vocabs_tw.txt')
@property
def vocabs(self):
# 詞彙表
return self._vocabs
def word2idx(self, word: str):
# 文字轉索引(根據locale處理繁簡轉換)
if self.locale != 'zh_cn' and word in self.tw2cn:
word = self.tw2cn[word]
if word in self._vocabs:
return self._vocabs[word]
else:
return None
def idx2word(self, index: int):
# 索引轉文字(根據locale處理繁簡轉換)
if index < len(self._vocabs):
word = self._vocabs.key_list[index]
if self.locale != 'zh_cn' and word in self.cn2tw:
word = self.cn2tw[word]
return word
else:
return None
@classmethod
def load(cls):
# 從google drive載入模型
st = datetime.datetime.now()
set_device('cpu')
dirname = os.path.join(get_trident_dir(), 'models')
download_model_from_google_drive('13XZPWh8QhEsC8EdIp1niLtZz0ipatSGC', dirname, 'word2vec_chinese.pth')
recovery_model = load(os.path.join(dirname, 'word2vec_chinese.pth'))
recovery_weight=recovery_model.state_dict()['weight']
shp=int_shape(recovery_weight)
v = cls(pretrained=True,num_embeddings=shp[0], embedding_dim=shp[-1],_weight=recovery_weight,name='word2vec_chinese')
v._vocabs=copy.deepcopy(recovery_model._vocabs)
v.tw2cn =copy.deepcopy(recovery_model.tw2cn)
v.cn2tw = copy.deepcopy(recovery_model.cn2tw)
del recovery_model
v.locale =ctx.locale
v.to(get_device())
et = datetime.datetime.now()
print('total loading time:{0}'.format(et - st))
return v
def find_similar(self, reprt: (str, Tensor), n: int = 10, ignore_indexes=None):
# 根據文字或是向量查詢空間中最近文字
reprt_idx = None
if ignore_indexes is None:
ignore_indexes = []
if isinstance(reprt, str):
reprt_idx = self.word2idx(reprt)
ignore_indexes.append(reprt_idx)
reprt = self.weight[reprt_idx].expand_dims(0) if reprt in self._vocabs else None
if is_tensor(reprt):
correlate = element_cosine_distance(reprt, self.weight)[0]
sorted_idxes = argsort(correlate, descending=True)
sorted_idxes = sorted_idxes[:n + len(ignore_indexes)]
sorted_idxes = to_tensor([idx for idx in sorted_idxes if idx.item() not in ignore_indexes]).long()
probs = to_list(correlate[sorted_idxes])[:n]
words = [self.idx2word(idx.item()) for idx in sorted_idxes][:n]
return OrderedDict(zip(words, probs))
else:
raise ValueError('Valid reprt should be a word or a tensor .')
def analogy(self, reprt1: (str, Tensor, list), reprt2: (str, Tensor, list), reprt3: (str, Tensor, list), n: int = 10):
# 類比關係 (男人之於女人等於國王之於皇后)
reprt1_idx = None
reprt2_idx = None
reprt3_idx = None
reprt1_arr = None
reprt2_arr = None
reprt3_arr = None
exclude_list = []
if isinstance(reprt1, str):
reprt1_idx = self.word2idx(reprt1)
exclude_list.append(reprt1_idx)
reprt1_arr = self.weight[reprt1_idx].expand_dims(0) if reprt1_idx is not None else None
elif isinstance(reprt1, Tensor):
reprt1_arr = reprt1
elif isinstance(reprt1, list):
if isinstance(reprt1[0], str):
reprt1_arr = self.get_words_centroid(*reprt1)
for item in reprt1:
exclude_list.append(self.word2idx(item))
if isinstance(reprt2, str):
reprt2_idx = self.word2idx(reprt2)
exclude_list.append(reprt2_idx)
reprt2_arr = self.weight[reprt2_idx].expand_dims(0) if reprt2_idx is not None else None
elif isinstance(reprt2, Tensor):
reprt2_arr = reprt2
elif isinstance(reprt2, list):
if isinstance(reprt2[0], str):
reprt2_arr = self.get_words_centroid(*reprt2)
for item in reprt2:
exclude_list.append(self.word2idx(item))
if isinstance(reprt3, str):
reprt3_idx = self.word2idx(reprt3)
exclude_list.append(reprt3_idx)
reprt3_arr = self.weight[reprt3_idx].expand_dims(0) if reprt3_idx is not None else None
elif isinstance(reprt3, Tensor):
reprt3_arr = reprt3
elif isinstance(reprt3, list):
if isinstance(reprt3[0], str):
reprt3_arr = self.get_words_centroid(*reprt3)
for item in reprt3:
exclude_list.append(self.word2idx(item))
if reprt1_arr is not None and reprt2_arr is not None and reprt3_arr is not None:
reprt4 = reprt2_arr - reprt1_arr + reprt3_arr
return self.find_similar(reprt4, n=n, ignore_indexes=exclude_list)
else:
not_find = []
if reprt1_arr is None:
not_find.append(reprt1)
if reprt2_arr is None:
not_find.append(reprt2)
if reprt3_arr is None:
not_find.append(reprt3)
raise ValueError(' ,'.join(not_find) + ' was not in vocabs.')
def get_words_centroid(self, *args):
# 取得數個文字的向量均值
centroid = 0
for arg in args:
reprt_idx = self.word2idx(arg)
if reprt_idx is not None:
centroid += self.weight[reprt_idx].expand_dims(0) if reprt_idx is not None else None
return centroid / len(args)
def get_words_vector(self, word):
# 取得單一文字的向量
reprt_idx = self.word2idx(word)
if reprt_idx is not None:
return self.weight[reprt_idx].expand_dims(0) if reprt_idx is not None else None
return None
def get_enumerators(self, *args, negative_case=None, n=10, exclude_samples=True):
# 取得整體距離輸入案例最接近,但是離負案例最遠(negative_case)的文字列表
positive_correlate = 0
negative_correlate = 0
exclude_list = []
for arg in args:
positive_correlate += element_cosine_distance(self.get_words_vector(arg), self.weight)[0]
correlate = positive_correlate
if negative_case is None:
pass
else:
if isinstance(negative_case, str):
negative_case = [negative_case]
if isinstance(negative_case, (list, tuple)):
for arg in negative_case:
negative_correlate += element_cosine_distance(self.get_words_vector(arg), self.weight)[0]
correlate = positive_correlate - negative_correlate
sorted_idxes = argsort(correlate, descending=True)
sorted_idxes = sorted_idxes[:n + len(exclude_list)]
sorted_idxes = to_tensor([idx for idx in sorted_idxes if idx.item() not in exclude_list]).long()
probs = to_list(correlate[sorted_idxes])[:n]
words = [self.idx2word(idx.item()) for idx in sorted_idxes][:n]
return OrderedDict(zip(words, probs))
def ChineseWord2Vec(pretrained=True, freeze_features=True, **kwargs):
if pretrained==True:
model=Word2Vec.load()
if freeze_features:
model.trainable=False
return model
else:
return Word2Vec()
|
AllanYiin/trident
|
trident/models/pytorch_embedded.py
|
pytorch_embedded.py
|
py
| 9,944 |
python
|
en
|
code
| 74 |
github-code
|
6
|
32203126633
|
import datetime
import random
import yaml
from requests import get
def compute_median(lst):
"""
Вычисление медианты списка
:param lst: входящий список значений
:return: медиана
"""
quotient, remainder = divmod(len(lst), 2)
return lst[quotient] if remainder else sum(sorted(lst)[quotient - 1:quotient + 1]) / 2
def compute_avg(lst):
"""
Вычисление среднего арифметические значения списка
:param lst: входящий список значений
:return: среднее арифметические значение
"""
return sum(lst) / len(lst)
def usage_type(avg, median):
"""
Вычисление типа использования
:param avg: среднее занечение метрик
:param median: медианное значение метрик
:return: возврат значения типа использования
"""
if (avg < 1.25 * median) and (avg > 0.75 * median):
return "стабильна"
elif avg > 1.25 * median:
return "скачки"
else:
return "снижения"
def intensity(median):
"""
Вычисление интенсивности использования
:param median: медианное значение метрик
:return: возврат значения интенсивности
"""
if (0 < median) and (median <= 30):
return "низкая"
if (30 < median) and (median <= 60):
return "умеренная"
if (60 < median) and (median <= 90):
return "высокая"
return "запредельная"
def decision(usage, intens):
"""
Принятие решения о дальнецшем использовании ресурса
:param usage: тип использования
:param intens: интенсивности использованя
:return: возврат решения
"""
if intens == "низкая":
return "отказаться"
if intens == "запредельная":
return "усилить"
if intens == "умеренная" and usage in ("стабильна", "скачки"):
return "отсавить"
if intens == "высокая" and usage in ("снижения", "стабильна"):
return "отсавить"
if usage == "снижения" and intens == "умеренная":
return "отказаться"
if usage == "скачки" and intens == "высокая":
return "усилить"
def obj_creator(data):
"""
Генератор обьекта заданной структуры из сырых данных
:param data: сырые данные
:return: Обект заданной тсруктуры
"""
final_data = {}
for msg in data:
team_name, project, resource, due, resource_metric = msg
a = {"time": due, "value": int(resource_metric)}
final_data.setdefault(team_name, {}).setdefault(project, {}).setdefault(resource, []).append(a)
return final_data
def get_data_from_http(url):
"""
Генерация списка метрик по HTTP
:param url: адресс веб сервера источника метрик
:return: лист метрик
"""
rnd_seed = random.randint(1, 3)
team_raw = get(url + f"/monitoring/infrastructure/using/summary/{rnd_seed}").text.split("$")
final_list = []
for team_raw_data in team_raw:
team_name, team_data = team_raw_data.split("|")
team_data = team_data.split(";")
for team_data_split_data in team_data:
project, resource, due, resource_metric = team_data_split_data[1:-1].split(",")
yr, mt, dy = due[0:10].split("-")
date = datetime.date(year=int(yr), month=int(mt), day=int(dy))
final_list.append((team_name, project, resource, date, int(resource_metric)))
return final_list
if __name__ == '__main__':
print("start")
full_msg = get_data_from_http("http://127.0.0.1:21122/")
final_data = obj_creator(full_msg)
yaml_price = get("http://127.0.0.1:21122/monitoring/infrastructure/using/prices").text
price_full = yaml.safe_load(yaml_price)["values"]
print("Ресурс|Значение|среднее|медиана|использование|интенсивность|решение|дата последний метрики|цена")
for name, prj in final_data.items():
print(f"команда {name}")
for prj_name, res_values in prj.items():
summ = 0
for res, values in res_values.items():
value_list = []
time = []
for value in values:
value_list.append(value["value"])
time.append(value["time"])
last_time = time[-1] + datetime.timedelta(14)
median = compute_median(value_list)
avg = compute_avg(value_list)
usage = usage_type(avg, median)
intens = intensity(median)
final_decision = decision(usage, intens)
cost = price_full[prj_name]
summ += int(cost[res])
print(f"{prj_name} | {res} | {avg} | {median} | {usage} | {intens} | {final_decision} | {last_time} | {cost[res]}")
print(f"Цена за ресурс = {summ}")
|
zombym/devops-tasks
|
5.5.1.py
|
5.5.1.py
|
py
| 5,538 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
25359325465
|
# -*- coding: utf-8 -*-
from __future__ import division
import scrapy
from scrapy import Request
# from street_food.items import StreetFoodItem, StreetFoodDatTimeItem
from street_food.items import StreetFoodDatTimeItem
from street_food.spiders import tools
import json
from urllib import urlopen
# import random
from street_food.tools import basic_tools
class GetFoodOffTheGrid(scrapy.Spider):
name = "offthegrid"
allowed_domains = ["offthegridmarkets.com", "offthegrid.com"]
start_urls = [
'https://offthegrid.com/otg-api/passthrough/markets.json/?latitude=37.7749295&longitude=-122.41941550000001&sort-order=distance-asc'
]
custom_settings = {
"ITEM_PIPELINES": {
"street_food.pipelines.ApiUploader": 10,
}
}
def parse(self, response):
''' Parse list of markets '''
markets = json.loads(response.text)
market_url = "https://offthegrid.com/otg-api/passthrough/markets/{}.json/"
# Get list of markets in San Francisco.
for market in [market for market in markets["Markets"]]:
market = market['Market']
market_id = market['id']
yield Request(market_url.format(market_id),
callback=self.parse_market)
def parse_market(self, response):
''' Parse a market '''
# load Maize Vendors.
maizeresp = urlopen('http://yumbli.herokuapp.com/api/v1/allkitchens/?format=json')
vendors = json.loads(maizeresp.read().decode('utf8'))
maizevendors = {}
for v in vendors:
maizevendors[v['name'].lower()] = v['id']
item = StreetFoodDatTimeItem()
market = json.loads(response.text)
market_detail = market["MarketDetail"]["Market"]["Market"]
market_events = market["MarketDetail"]["Events"]
# Market Address.
market_address = market_detail["address"].strip()
market_city = market_detail["city"].strip()
full_address = "{} {}".format(market_address, market_city)
# Market location.
market_latitude = market_detail['latitude']
market_longitude = market_detail['longitude']
# geolocation = "{} {}".format(market_latitude, market_longitude)
# Add data to item.
item['address'] = full_address
# Parse market events.
for event in market_events:
start_datetime, end_datetime = tools.get_start_end_datetime(event['Event'])
item['start_datetime'] = start_datetime
item['end_datetime'] = end_datetime
# Parse vendors of event.
for vendor in event['Vendors']:
vendor_name = vendor['name']
item['VendorName'] = vendor_name
# randlongpos = random.randint(-150, 150) / 1000000
# randlatpos = random.randint(-200, 200) / 1000000
# item['latitude'] = abs(float(market_latitude)) + randlatpos
# abs then *-1 b/c off the grid has some wrong values
# item['longitude'] = abs(float(market_longitude))*-1 + randlongpos
item['latitude'] = basic_tools.mix_location(market_latitude)
item['longitude'] = basic_tools.mix_location(market_longitude)
if vendor_name and vendor_name.lower() in maizevendors.keys():
item['maize_status'] = 'found'
item['maize_id'] = maizevendors[vendor_name.lower()]
else:
item['maize_status'] = 'not found'
item['maize_id'] = 'n/a'
yield item
|
kirimaks/street-food-scraper
|
street_food/street_food/spiders/offthegrid.py
|
offthegrid.py
|
py
| 3,638 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36961545751
|
#!/usr/bin/env python
# coding: utf-8
# ## App Creation
#
# First, import all necessary libraries:
# In[1]:
#App Libraries
import json
import dash
from dash import html, dcc, Input, Output, State, dash_table
import dash_bootstrap_components as dbc
#Distributions
from scipy.stats import gamma
from scipy.stats import lognorm
from scipy.stats import weibull_min
#Calculation libraries
import math
import pandas as pd
import numpy as np
import ast
import statsmodels.api as sm
import matplotlib.pyplot as plt
import scipy.stats as stats
from scipy.optimize import minimize
from scipy.integrate import odeint
from scipy.optimize import fsolve
#from sympy import symbols, Eq, solve
#Plot libraries
import plotly.express as px
import plotly.graph_objs as go
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
# In[2]:
#==================================================================#
# CREATE GENERATION INTERVAL DATA #
#==================================================================#
def create_gi(pop_mean, sd, m):
'''
pop_mean: population mean of the standard deviation
'''
#Set seed for consistency:
np.random.seed(1234)
#=========GAMMA============
gamma_shape = (pop_mean**2)/(sd**2)
gamma_scale = (sd**2)/(pop_mean)
gi_gamma_obs = np.random.gamma(gamma_shape, gamma_scale, m)
#=========LOGNORMAL============
log_mean = pop_mean
log_sd = sd
log_var = log_sd**2
norm_mean = np.log(log_mean)-0.5*np.log((log_sd/log_mean)**2+1) #scale=e^norm_mean
norm_var = np.log((log_sd/log_mean)**2+1)
norm_sd = np.sqrt(norm_var) # equivalent to the shape
gi_lognorm_obs = lognorm.rvs(s=norm_sd, scale=math.exp(norm_mean), size=m)
#=========WEIBULL============
weibull_mean = pop_mean
weibull_std = sd
def G(k):
return math.gamma(1+2/k)/(math.gamma(1+1/k)**2)
def f(k,b):
return G(k)-b #function solves for k
b = (weibull_std**2)/(weibull_mean**2)+1
init = 1 # The starting estimate for the root of f(x) = 0.
weibull_shape = fsolve(f,init,args=(b))[0]
weibull_scale = weibull_mean/math.gamma(1+1/weibull_shape)
gi_weibull_obs = weibull_min.rvs(weibull_shape,scale=weibull_scale, size=m)
return gi_gamma_obs, gi_lognorm_obs, gi_weibull_obs
#==================================================================#
# VISUALIZE GENERATION INTERVAL DATA #
#==================================================================#
def gi_visualize(gi_gamma, gi_lognorm, gi_weibull):
color=["skyblue","darkorange","green"]
fig = make_subplots(rows=2, cols=2,)
fig.append_trace(go.Histogram(x=gi_gamma, histnorm='percent', name='Gamma',
marker_color=color[0], opacity=1,),row=1,col=1)
fig.append_trace(go.Histogram(x=gi_lognorm, histnorm='percent', name='Lognorm',
marker_color=color[1], opacity=1), row=1,col=2)
fig.append_trace(go.Histogram(x=gi_weibull, histnorm='percent', name='Weibull',
marker_color=color[2], opacity=1), row=2,col=1)
group_labels = ['Gamma Curve', 'Lognormal Curve', 'Weibull Curve']
hist_data = [gi_gamma, gi_lognorm, gi_weibull]
distplfig = ff.create_distplot(hist_data, group_labels, colors=color,
bin_size=.2, show_hist=False, show_rug=False)
for k in range(len(distplfig.data)):
fig.append_trace(distplfig.data[k],
row=2, col=2
)
fig.update_layout(barmode='overlay')
return(fig)
#==================================================================#
# OBJECTIVE FUNCTION #
#==================================================================#
def objective(w_lamb,tau):
'''
Objective: To maximize the log likelihood of W(u) (ie min(-W(u)))
Inputs:
w_lamb= weights [w_1, w_2,...,w_n] and lambda in one list
tau = set of times since infection [tau_1, tau_2,...,tau_m]
Outputs:
objective: value (-W(u))
'''
w=w_lamb[:-1]
lamb=w_lamb[-1]
n=len(w)
objective = 0
for tau_i in tau: #FOR EACH TIME SINCE EXPOSURE
wlog_val = w[0]
for j in range(1,n): #CALCULATE TERMS WITHIN LOG
wlog_val = wlog_val + (w[j]*(((lamb*tau_i)**j)/math.factorial(j)))
objective = objective + (math.log(wlog_val) - (lamb*tau_i) + math.log(lamb))
return(-1 *objective)
#==================================================================#
# CONSTRAINT FUNCTION #
#==================================================================#
def constraint(w_lamb):
'''
Constraint 1: Weights must sum to 1
Inputs:
w: list of weights
Outputs:
constraint1: value of 1 - sum of the weights
'''
w=w_lamb[:-1]
n = len(w)
constraint1 = 1
for j in range(n):
constraint1 = constraint1 - w[j]
return constraint1
#==================================================================#
# CALCULATE WEIGHTS, HOLDING PERIOD, RATES #
#==================================================================#
def solver(tau, R_0, n, dist_type):
'''
The following function returns a list of weights given the 5 inputs.
Inputs:
tau: list of generation intervals times (in days)
R_0: basic reproduction number
n: number of infectious comparments
Output:
w_val: list of weights (based on minimization)
lambda: lambda value (based on minimization)
b_val: list of betas (based on minimization)
'''
wl_0 = np.zeros(n+1)
if dist_type == "gamma" or dist_type == "lognorm":
shape = (np.mean(tau)**2)/np.var(tau) #shape of the disribution
w2 = shape - math.trunc(shape) #expected weight of "second" compartment
w1 = 1 - w2 #expected weight of "first" compartment
comps = [math.trunc(shape)-1, math.trunc(shape)] #location of the "first" and "second" compartments where weights should exceed one
weights = [w1, w2]
for c, w in zip(comps,weights):
wl_0[c] = w
wl_0[-1]= np.mean(tau)/np.var(tau)
#elif dist_type == "lognorm":
# for i in range(n):
# wl_0[i] = 1/n
# log_mean = np.mean(tau)
# log_std = np.std(tau)
# norm_mean = np.log(log_mean)-0.5*np.log((log_std/log_mean)**2+1)
# wl_0[-1]= norm_mean
elif dist_type == "weibull":
for i in range(n):
wl_0[i] = 1/n
wl_0[-1]= np.std(tau)
b = (0, 1)
bnds=()
for i in range(n):
bnds = bnds + (b,)
b_lamb = (0.00000000001, None)
bnds = bnds + (b_lamb,)
#specify constraints
con1 = {'type': 'eq', 'fun': constraint}
cons = ([con1])
#optimize
solution = minimize(objective, wl_0, method='SLSQP', args=(tau), bounds=bnds,constraints=cons)
#get weights
w_val = solution.x[:-1]
lamb = solution.x[-1]
b_val = [weight*lamb*R_0 for weight in w_val]
return(w_val, lamb, b_val)
#==================================================================#
# OBJECTIVE FUNCTION #
#==================================================================#
def solutions(gi_data,min_n, max_n, R_0, dist_type):
weight = []
lambda_n = []
beta = []
obj = []
for n_val in list(range(min_n, max_n+1)):
if n_val == min_n:
str_p = "Solving: "+ str(dist_type)+ " with R_0="+str(R_0)+" for n in "+ str(min_n)+",...,"+str(max_n)
print(str_p)
w, l, b = solver(gi_data, R_0, n_val, dist_type)
o = objective(list(w)+[l],gi_data)
if n_val == int((max_n+1-min_n)/4+min_n):
print("n=",str(n_val)," is done. 25% Done!")
if n_val == int((max_n+1-min_n)/2+min_n):
print("n=",str(n_val)," is done. Half way there!")
if n_val == int(3*(max_n+1-min_n)/4 + min_n):
print("n=",str(n_val)," is done. 75% Done!")
if n_val == max_n:
print("Done!")
weight.append(w)
lambda_n.append(l)
beta.append(b)
obj.append(o)
return weight, lambda_n, beta, obj
#==================================================================#
# EXPECTED INFECTIOUS CURVE #
#==================================================================#
def beta_u(u_i,beta_vals, lambda_n):
'''
Beta(u): Find transmission rate for every time in u_i
Inputs:
u_i: list of generation intervals times (in days)
beta_vals: list of betas (based on minimization)
lambda_n: rate that infected move to the next compartment
Outputs:
y:
'''
n = len(beta_vals)
y = []
for u in u_i:
transmission=0
for j in range(n):
transmission = transmission + beta_vals[j]*((np.exp(-lambda_n*u)*(lambda_n*u)**j)/math.factorial(j))
y.append(transmission)
return(y)
#==================================================================#
# VISUALIZE EXPECTED INFECTIOUS CURVE #
#==================================================================#
def beta_u_plot(lambda_n, beta_vals):
#create x axis
x = np.linspace(0, 15, 100)
#create df of x, y data
beta_df = pd.DataFrame(x, columns=["x"])
beta_df[str(len(beta_vals))] = [float(b) for b in beta_u(x,beta_vals,lambda_n)]
fig = go.Figure()
fig.add_trace(go.Scatter(x=beta_df.x, y=beta_df.iloc[:, 1]))
#format graph
fig.update_layout(legend_title_text='Compartment')
fig.update_xaxes(title_text='Days', nticks=20)
fig.update_yaxes(title_text='Transmission Rate')
return(fig)
#==================================================================#
# VISUALIZE EXPECTED INFECTIOUS CURVE #
#==================================================================#
def plot_beta_dist(betas, lambdas):
color=["skyblue","darkorange","green"]
dist = ["Gamma", "Lognormal","Weibull"]
count = 0
fig = make_subplots(rows=1, cols=1)
for beta,lamb in zip(betas,lambdas):
data = beta_u_plot(lamb, beta)
fig.add_trace(
go.Scatter(x=data['data'][0]['x'], y=data['data'][0]['y'],
name=dist[count], line_color=color[count],
line=dict(width=3)),
row=1, col=1
)
count+=1
#-----STYLING--------
fig.update_layout(
title="Estimated Infectious Curve of each Distribution", #β(𝜏)
xaxis_title="Duration of Infection (days)",
yaxis_title="Transmission Rate", legend_title="Legend", font=dict(size=14),
legend=dict(yanchor="top", y=0.99, xanchor="right", x=0.99))
return(fig)
#==================================================================#
# SInR(S) MODEL #
#==================================================================#
def SInR(y, t, N, c, h, beta, lambda_n):
'''
Inputs:
y: initial condition vector (S, I, R)
t: time points (in days)
N: total population
c: contact rate
h: waning immunity rate
beta: transmission rate
lambda_n: optimized holding period
Outputs:
pop_status: returns a list of how many people are in each compartment
'''
S = y[0]
I = y[1:-1]
R = y[-1]
npI = np.array(I)
npBeta = np.array(beta)
#Calculate dSdt
dSdt = -S/N * np.sum(npI * npBeta)*c+ h*R
#Calculate dI1dt
dI1dt = S/N* np.sum(npI * npBeta)*c - lambda_n* I[0]
#Calculate dI_dt values from n=2,..,n
dIndt = []
for index in range(len(I)-1):
dIndt.append(lambda_n* I[index] - lambda_n* I[index+1])
#Calculate dRdt
dRdt = lambda_n * I[len(I)-1] - h*R
#Create list of results for S through R
pop_status = [dSdt, dI1dt]
pop_status.extend(dIndt)
pop_status.append(dRdt)
return pop_status
#==================================================================#
# VISUALIZE SInR(S) MODEL #
#==================================================================#
def SInR_plot(y_t0, t, N, c, h, beta, lambda_n):
# Integrate the SIR equations over the time grid, t.
ret = odeint(SInR, y_t0, t, args=(N, c, h, beta, lambda_n))
S = ret.T[0]
I = sum(ret.T[1:-1])
R = ret.T[-1]
fig = go.Figure()
fig.add_trace(go.Scatter(x=t, y=S/N,name="Susceptible"))
fig.add_trace(go.Scatter(x=t, y=I/N,name="Sum of Infected Compartments"))
fig.add_trace(go.Scatter(x=t, y=R/N,name="Recovered"))#
fig.update_layout(legend_title_text='Compartment')
fig.update_xaxes(title_text='Time (Days)')#,nticks=20)
fig.update_yaxes(title_text='Percentage of the Population')
return(fig)
# In[3]:
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
##########################################################################
# HELPER FUNCTIONS #
##########################################################################
def create_dropdown_options(series):
options = [{'label': i, 'value': i} for i in series.sort_values().unique()]
return options
def create_dropdown_value(series):
value = series.sort_values().unique().tolist()
return value
def create_slider_marks(values): #NEED
marks = {i: {'label': str(i)} for i in values}
return marks
##########################################################################
# ADD ONS TO APP (images, files,etc) #
##########################################################################
#pull Gamma Data
gi_df = pd.read_excel ('GI_Values.xlsx')[["Source", "Mean","SD", "Dist_Type"]]
gi_df.sort_values(by=["Dist_Type",'Mean'], inplace=True)
gi_df.reset_index(drop=True, inplace=True)
colors = {'background': '#111111','text': 'black'}
subheader_size = 20
##########################################################################
# DASHBOARD APP #
##########################################################################
app.layout = html.Div(children=[
dcc.Location(id="url",refresh=False),
html.Div(id="output-div")
])
##########################################################################
# HOME PAGE #
##########################################################################
home_layout = html.Div(
#==========Create "Header" Data=================
children=[
html.Div(
[
html.Img(src=app.get_asset_url("edinburgh.png"), height=50),
html.Div([html.H4(children=('SI',html.Sub("n"),'R Covid-19 Modeling'), style={'textAlign': 'center', "font-weight": "bold"}),]),
], style={'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}
),
#---Page Links---
html.Div([dbc.Row(
[
dbc.Col(html.Div(dcc.Link('Home',href="/")), style={'textAlign': 'center'}),
dbc.Col(html.Div(dcc.Link('Simulate Data',href="/simulate")), style={'textAlign': 'center'}),
dbc.Col(html.A("Github Code", href='https://github.com/annette-bell/SInR-Covid-Dissertation',
target="_blank"), style={'textAlign': 'center'}),
],
className="g-0",
)]),
#---Line Break---
html.Div([html.Hr(style={'borderWidth': "0.3vh", "color": "#FEC700"}),]),
#===============Home Page Information==========
html.Div(
[
html.H6("About this app", style={"margin-top": "0","font-weight": "bold","text-align": "left"}),
#html.Hr(),
html.P("The Susceptible-Infected-Recovered (SIR) compartmental model is used in epidemiology to identify\
and categorize members of a population based on their status with regards to a disease. Less\
studied variations of this problem are the SInR and SInRS models. These models, which have applications\
in latent infection and varying transmission rates, will be used on three different generation\
interval—the time between primary exposure and secondary infection—distributions: gamma, lognormal,\
and Weibull. The distributions are ultimately tested against one another to see not only \
which provides most realistic data, but how these data-sets interact.\
This app is meant to help people understand dynamics of COVID-19 modeling through a simply dashboard application.\
To see a more in-depth explanation, please see the Github repository which includes my dissertation.",
className="control_label",style={"text-align": "justify"}),
],
className="pretty_container almost columns",
),
#============AUTHOR=============
html.Div(
[
html.H6("Authors", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
html.P("Annette Bell ([email protected])", style={"text-align": "center", "font-size":"10pt"}),
],
className="pretty_container almost columns",
),
#============ACKNOWLEDGEMENTS=============
html.Div(
[
html.H6("Acknowledgements", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
html.P("John Dagpunar: Dr. Dagpunar was my thesis advisor and extremely helpful throughout the project.)", style={"text-align": "left", "font-size":"10pt"}),
],
className="pretty_container almost columns",
),
#============SOURCES=============
html.Div(
[
html.H6("Sources", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
dcc.Markdown(
"""\
- Code Layout was used from Plotly public dash application: https://dash.gallery/dash-food-consumption/
- I examined another dash application to better understand how to use it. In addition to dash application resources, I analyzed the source code to clarify how to implement dash: https://github.com/FranzMichaelFrank/health_eu
- Edinburgh PNG: https://uploads-ssl.webflow.com/5eb13d58c8c08b73773d6b1c/600ea3810bde89c60e317be7_uni-of-edinburgh.png
"""
,style={"font-size":"10pt"}),
],
className="pretty_container almost columns",
),
])
##########################################################################
# SIMULATION PAGE #
##########################################################################
sim_layout = html.Div(
#==========Create "Header" Data=================
children=[
#---Title---
html.Div(
[
html.Img(src=app.get_asset_url("edinburgh.png"), height=50),
html.Div([html.H4(children=('SI',html.Sub("n"),'R Covid-19 Modeling'), style={'textAlign': 'center', "font-weight": "bold"}),]),
], style={'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}
),
#---Page Links---
html.Div([dbc.Row(
[
dbc.Col(html.Div(dcc.Link('Home',href="/")), style={'textAlign': 'center'}),
dbc.Col(html.Div(dcc.Link('Simulate Data',href="/simulate")), style={'textAlign': 'center'}),
dbc.Col(html.A("Github Code", href='https://github.com/annette-bell/SInR-Covid-Dissertation',
target="_blank"), style={'textAlign': 'center'}),
], className="g-0",
)]),
#---Line Break---
html.Div([html.Hr(style={'borderWidth': "0.3vh", "color": "#FEC700"}),]),
#============OVERIEW OF THE SIMULATION DATA=================
html.Div(
[
html.H6(["Overview of Distributions", html.Br()], style={"margin-top": "0","font-weight": "bold","text-align": "left"}),
html.Div(
[
dbc.Row(
[#---Table of Previous Analysis---:
dbc.Col(dash_table.DataTable(gi_df.to_dict('records'), [{"name": i, "id": i} for i in gi_df.columns],
style_header={'text-align': 'center', 'fontWeight': 'bold',},
style_table={'height': '200px', 'overflowY': 'auto'},
style_cell={'textAlign': 'left'},), width=3),
#---Commentary on the table
dbc.Col(html.Div([html.P("There are three main commonly used distributions to model the generation interval-\
the time from primary exposure to secondary infectiousness. These distributions include gamma, weibull, and log-normal.\
To the left, you can see a table of means and standard deviations from others previous work.",className="control_label",style={"text-align": "justify"}),]))]),]),
],
className="pretty_container",
),
#================= GENERATION INTERVAL SIMULATION =====================
html.Div(
[
html.Div(
#----------------INPUTS-----------
[
html.H6("Generation Interval Distribution:", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
html.P("Please select the distribution you wish to base your simulated generation interval data off of. Note: Seed=1234.", className="control_label",style={"text-align": "justify"}),
html.Br(),
#Shows the inputs for the specified distribution
html.Div(
[
dbc.Row([
dbc.Col(html.Div([
#---Mean---
html.P("Input the population mean:", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='pop_mean',placeholder='', type='number', value= 4.9),
])),
dbc.Col(html.Div([
#---SD---
html.P("Input the standard deviation:", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='stan_dev',placeholder='', type='number', value= 2),
])),
],),
#---Data Set Size---
html.P("Select size of data:", className="control_label", style={"font-weight": "bold", "text-align": "center"}),
dcc.Slider(id='gi_size', min=1000, max=10000, step=500, value=5000,
marks=create_slider_marks(list(range(1000,10001))[::1000])),
html.Br(),
#---Update Button---
html.Button(id='update_button', children="Simulate", n_clicks=0,style=dict(width='220px')),
],),
],
className="pretty_container four columns",
),
#----------------GI Plot-----------
html.Div(
[
html.H6("Generation Interval Simulations", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
#---Information Regarding Shape and Scale---
html.P(id='shape_scale', style={"text-align": "justify"}),
#---GI Histogram---
html.Div(id='gammaplot_container',children=[
dcc.Graph(id="gi_plots", style={'height': '80vh'}),
]),
],
className="pretty_container seven columns",
),
],
className="row flex-display",
),
#===============Transmission Rate==========
html.Br(),
html.Div([dbc.Row(
[
dbc.Col(html.Div(html.Hr(style={'borderWidth': "0.3vh", "color": "#FEC700", 'align':'right'},)), width=5),
dbc.Col(html.Div(html.H6("Transmission Rate", style={"text-align": "center", "font-weight": "bold",
"margin-top": "14px", "color": "#384142"})), width=1.5),
dbc.Col(html.Div(html.Hr(style={'borderWidth': "0.3vh", "color": "#FEC700", 'align':'left'},)), width=5),
],
)]),
html.Div(
[
html.Div(
[#----------------Parameters of Beta(u)-----------
html.H6("Parameters of \u03b2(u):", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
html.P("As the transmission rate is not constant, we create a function that simulates transmission a non constant transmission rate.", className="control_label",style={"text-align": "justify"}),
#---R_0---
html.P("Input the basic reproduction number:", className="control_label", style={"font-weight": "bold", "text-align": "center"}),
dcc.Input(id='R0', placeholder='', type='number', value= 2.3),
html.Br(),
html.Br(),
#---Update Button---
html.Button(id='beta_update_button', children="Calculate B(u)", n_clicks=0, style=dict(width='220px')),
], className="pretty_container four columns",
),
html.Div(
[#----------------Beta(u) Plot-----------
html.H6("Expected Infectious Curve", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
#html.P("Visualize the transmission rates. Note: the following results are based on the parameters and the GI Data simulated above.", className="control_label",style={"text-align": "justify"}),
html.Br(),
#---return weights and betas---
#html.P(id='weights_beta_info', style={"text-align": "justify"}),
#html.P(id='lambdas', style={"text-align": "justify"}),
#html.P(id='weights', style={"text-align": "justify"}),
#html.P(id='betas', style={"text-align": "justify"}),
dcc.Graph(id="beta_plot", style={'height': '60vh'}),
],
className="pretty_container seven columns",
),
],
className="row flex-display",
),
#=====================SI(n)R Model=====================
html.Br(),
html.Div([dbc.Row(
[
dbc.Col(html.Div(html.Hr(style={'borderWidth': "0.3vh", "color": "#FEC700", 'align':'right'},)), width=5),
dbc.Col(html.Div(html.H6("Modeling COVID-19", style={"text-align": "center", "font-weight": "bold",
"margin-top": "14px", "color": "#384142"})), width=1.5),
dbc.Col(html.Div(html.Hr(style={'borderWidth': "0.3vh", "color": "#FEC700", 'align':'left'},)), width=5),
],
)]),
html.Div(
[
html.Div(
[#----------------Parameters of SInR Model-----------
html.H6("Parameters of the Model:", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
#html.P("Beta(u) was last calculated using the following:", className="control_label",style={"text-align": "justify"}),
html.Div([
dcc.RadioItems(
id='distribution-dropdown',
options=[
{'label': 'gamma', 'value': 'gamma'},
{'label': 'weibull', 'value': 'weibull'},
{'label': 'log-normal','value': 'lognorm'},
{'label': 'all','value': 'all'}],
value='all',
labelStyle={"display": "inline-block"},
style={"font-weight": "bold", "text-align": "center"},
),],),
dbc.Row(
[
dbc.Col(html.Div([
#---Total Population---
html.P("Total Population (N):", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='N_size', placeholder='', type='number', value = 67886011),
])),
dbc.Col(html.Div([
#---simulated days---
html.P("Total Days to Simulate Over:", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='t_days', placeholder='', type='number', value = 180),
])),
], className="g-0",
),
dbc.Row(
[
dbc.Col(html.Div([
#---Recovered---
html.P("Initial Recovered (R):", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='R_size', placeholder='', type='number', value = 0),
])),
dbc.Col(html.Div([
#---simulated days---
html.P("Contact Rate:", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='c', placeholder='', type='number', value = 1),
])),
], className="g-0",
),
dbc.Row(
[
dbc.Col(html.Div([
#---Infected---
html.P("Initail Infected (I):", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='I_size', placeholder='', type='number', value = 1),
])),
dbc.Col(html.Div([
#---simulated days---
html.P("Waning Immunity Rate:", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='h', placeholder='', type='number', value = 0),
])),
], className="g-0",
),
#---n_slider---
html.P("Select the compartment size: ", className="control_label", style={"font-weight": "bold", "text-align": "center"}),
dcc.Slider(1, 20, step=1, value=10, id='n_val'),
#---SInR Button---
html.Br(),
html.Br(),
html.Button(id='model_button', children="Model", n_clicks=0, style=dict(width='220px')),
], className="pretty_container four columns",
),
html.Div(
[#----------------SInR Plot-----------
html.H6(('SI',html.Sub("n"),'R Covid-19 Modeling'), style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
html.P(id='model_parameters', style={"text-align": "justify"}),
#html.P("Visualize the how th population shifts.", className="control_label",style={"text-align": "justify"}),
dcc.Graph(id="SInR_plot", style={'height': '60vh'}),
html.Div([ ], id='plot1'),
],
className="pretty_container seven columns",
),
],
className="row flex-display",
),
],
id="mainContainer",
style={"display": "flex", "flex-direction": "column"},
)
##########################################################################
# LINK TO EACH PAGE #
##########################################################################
@app.callback(
Output(component_id="output-div",component_property="children"),
Input(component_id="url",component_property="pathname"))
def update_page_layout(pathname):
if pathname == "/simulate":
return sim_layout
else:
return home_layout
##########################################################################
# SIMULATE AND VISUALIZE DISTRIBUTION DATA SETS #
##########################################################################
@app.callback(
[Output('gi_plots', 'figure'),
Output('shape_scale', 'children')],
[Input(component_id='update_button', component_property='n_clicks')],
[State('stan_dev', 'value'),
State('pop_mean', 'value'),
State('gi_size', 'value')]
)
def update_sim_gi(n_clicks, sd, mean, size):
'''
This callback and function combination simulates
a desired distribution (either gamma, weibull, or log-normal)
given the information.
'''
#----------CREATE DISTRIBUTIONS---------
gamma_data, lognorm_data, weibull_data = create_gi(mean, sd, size)
mean_vals = [np.mean(gamma_data), np.mean(lognorm_data), np.mean(weibull_data)]
std_vals = [np.std(gamma_data), np.std(lognorm_data), np.std(weibull_data)]
#--------------VISUALIZE----------------
gi_fig = gi_visualize(gamma_data, lognorm_data, weibull_data)
return gi_fig, f'Given the input mean and standard deviation of {mean} and {sd} respectively, the distributions are as follows: Gamma (x\u0305={round(mean_vals[0],3)}, s={round(std_vals[0],3)}). Lognormal(x\u0305 ={round(mean_vals[1],3)}, s={round(std_vals[1],3)}). Weibull(x\u0305={round(mean_vals[2],3)}, s={round(std_vals[2],3)}).'
##########################################################################
# CREATE AND PLOT BETA(u) #
##########################################################################
@app.callback(
[Output('beta_plot', 'figure')], #Output('weights_beta_info', 'children'), Output('lambdas', 'children'), Output('weights', 'children'), Output('betas', 'children')],
[Input(component_id='beta_update_button', component_property='n_clicks')],
[State('R0', 'value'), #DISTRIBUTION STATES
State('stan_dev', 'value'),
State('pop_mean', 'value'),
State('gi_size', 'value'),
]
)
def update_beta_u_plot(n_click, R0, sd, mean, size):
'''
Function will run beta_u function once "Calculate Beta(u)" button is clicked
'''
#----------CREATE DISTRIBUTIONS---------
gamma_data, lognorm_data, weibull_data = create_gi(mean, sd, size)
#----determine mimnimal acceptable size -----
g_min_comp = math.ceil((np.mean(gamma_data)**2)/(np.var(gamma_data)))
l_min_comp = math.ceil((np.mean(lognorm_data)**2)/(np.var(lognorm_data)))
w_min_comp = math.ceil((np.mean(weibull_data)**2)/(np.var(weibull_data)))
min_acceptable = max(g_min_comp, l_min_comp, w_min_comp)
#----------------CALC VALS------------------
w_gamma, l_gamma, b_gamma = solver(gamma_data, R0, min_acceptable, "gamma")
w_lognorm, l_lognorm, b_lognorm = solver(lognorm_data, R0, min_acceptable, "lognorm")
w_weibull, l_weibull, b_weibull = solver(weibull_data, R0, min_acceptable, "weibull")
#----------------PLOT Beta(u)------------------
b_n = [b_gamma, b_lognorm, b_weibull]
l_n = [l_gamma, l_lognorm, l_weibull]
w_n = [w_gamma, w_lognorm, w_weibull]
beta_plot = plot_beta_dist(b_n, l_n)
return [go.Figure(data=beta_plot)]
##########################################################################
# UPDATE SInR MODEL #
##########################################################################
@app.callback(
[Output('SInR_plot', 'figure'),],#Output(component_id="plot1", component_property="children"),],
#Output(component_id='model_parameters', component_property='children')],
[Input(component_id='model_button', component_property='n_clicks')],
[State('stan_dev', 'value'),
State('pop_mean', 'value'),
State('gi_size', 'value'),
State('distribution-dropdown', 'value'),
State('N_size', 'value'),
State('I_size', 'value'),
State('R_size', 'value'),
State('t_days', 'value'),
State('n_val', 'value'),
State('R0', 'value'),
State('c', 'value'),
State('h', 'value')]
)
def update_SInR_plot(n_click, sd, mean, size, show, N, I1_t0, R_t0, days, n, R0, c_val, h_val):
'''
Visualize the SInR(S) plot
'''
gamma_data, lognorm_data, weibull_data = create_gi(mean, sd, size)
#----determine mimnimal acceptable size -----
g_min_comp = math.ceil((np.mean(gamma_data)**2)/(np.var(gamma_data)))
l_min_comp = math.ceil((np.mean(lognorm_data)**2)/(np.var(lognorm_data)))
w_min_comp = math.ceil((np.mean(weibull_data)**2)/(np.var(weibull_data)))
#----------------CALC VALS------------------
w_gamma, l_gamma, b_gamma = solver(gamma_data, R0, n, "gamma")
w_lognorm, l_lognorm, b_lognorm = solver(lognorm_data, R0, n, "lognorm")
w_weibull, l_weibull, b_weibull = solver(weibull_data, R0, n, "weibull")
#-------Create lists of data-----
b_n = [b_gamma, b_lognorm, b_weibull]
l_n = [l_gamma, l_lognorm, l_weibull]
w_n = [w_gamma, w_lognorm, w_weibull]
print(b_n)
color=["skyblue","darkorange","green"]
dist = ["Gamma", "Lognormal","Weibull"]
count=0
#----specify compartments-------
I_t0 = [I1_t0]+(n-1)*[0]
S_t0 = N - sum(I_t0) - R_t0
y_t0 = [S_t0]+ I_t0 +[R_t0]
t = np.array(list(range(0,days+1)))
#----specify model type-----
if h_val== 0:
model_type = "SI\u2099R Model of "
else:
model_type = "SI\u2099RS Model of "
if show == "all":
SInR_compare = go.Figure()
dash = ["dot","dash"]
for b,l in zip(b_n, l_n):
#SIR MODEL
if count == 0:
fig = SInR_plot(y_t0, t, N, c_val, h_val, b, l)
s_data = list(fig['data'][0]['y'])
i_data = list(fig['data'][1]['y'])
r_data = list(fig['data'][2]['y'])
SInR_compare.update_layout(title=model_type+" for all Generation Intervals",
legend=dict(yanchor="top", y=-0.2, xanchor="left",x=0.02, orientation="h"),
font_size=14)
SInR_compare.add_trace(go.Scatter(x=t, y= s_data, name= "Susceptible: " +dist[count]+" GI",
line_color="blue",))
SInR_compare.add_trace(go.Scatter(x=t, y= i_data, name= "Infected: " +dist[count]+" GI",
line_color="red",))
SInR_compare.add_trace(go.Scatter(x=t, y= r_data, name= "Recovered: " +dist[count]+" GI",
line_color="green"))
count+=1
else:
fig2 = SInR_plot(y_t0, t, N, c_val, h_val, b, l)
s_data = list(fig2['data'][0]['y'])
i_data = list(fig2['data'][1]['y'])
r_data = list(fig2['data'][2]['y'])
SInR_compare.add_trace(go.Scatter(x=t, y= s_data, name= "Susceptible: " +dist[count]+" GI",
line_color="blue", line_dash=dash[count-1]))
SInR_compare.add_trace(go.Scatter(x=t, y= i_data, name= "Infected: " +dist[count]+" GI",
line_color="red", line_dash=dash[count-1]))
SInR_compare.add_trace(go.Scatter(x=t, y= r_data, name= "Recovered: " +dist[count]+" GI",
line_color="green", line_dash=dash[count-1]))
count+=1
SInR_compare.update_xaxes(title_text='Time (Days)')#,nticks=20)
SInR_compare.update_yaxes(title_text='Percentage of the Population')
return [go.Figure(data=SInR_compare)]
else:
if show == "gamma":
index = 0
if show == "lognorm":
index = 1
else:
index=2
I_t0 = [I1_t0]+(n-1)*[0]
S_t0 = N - sum(I_t0) - R_t0
y_t0 = [S_t0]+ I_t0 +[R_t0]
t = np.array(list(range(0,days+1)))
#SIR MODEL
fig = SInR_plot(y_t0, t, N, c_val, h_val, b_n[index], l_n[index])
fig.update_layout(title=model_type+" of "+dist[index]+" Generation Interval",
legend=dict(yanchor="top", y=0.35, xanchor="left", x=0.01),font_size=14)
return [go.Figure(data=fig)]
##########################################################################
# RUN THE APP #
##########################################################################
if __name__ == "__main__":
app.run_server(debug=False)
#TABLE STYLING:
#https://dash.plotly.com/datatable/style
# <br><br><br><br>
|
annette-bell/SInR-Covid-Dissertation
|
dash_lambda.py
|
dash_lambda.py
|
py
| 44,277 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33851174074
|
'''
boss class
'''
import pygame
class Boss(pygame.sprite.Sprite):
def __init__(self,laser):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("images/Boss.gif").convert()
self.rect = self.image.get_rect()
self.rect.x = 500
self.rect.y = 0
self.health = 200
self.laser = laser
self.lasertimer = 0
self.left = False
self.right = True
#update the boss
def update(self):
self.movement()
self.attack()
#get the health
def getHealth(self):
return self.health
#set the health
def setHealth(self):
self.health = self.health - 10
#laser attack of the boss
def attack(self):
self.lasertimer += 1
if self.lasertimer == 20:
self.laser.rect.x = self.rect.x + 50
self.laser.rect.y = self.rect.y
if self.lasertimer > 20:
self.laser.rect.y += 15
if self.laser.rect.y > 600:
self.lasertimer = 0
self.laser.rect.x = -500
self.laser.rect.y = -500
#set up movement for boss
def movement(self):
if self.rect.x > 900:
self.right = False
self.left = True
if self.rect.x < 50:
self.left = False
self.right = True
if self.left:
self.rect.x -= 10
if self.right:
self.rect.x += 10
|
Inviernos/Alien-Lord
|
boss.py
|
boss.py
|
py
| 1,580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43968820856
|
#!/usr/bin/env python
from Bio import SeqIO
import argparse
import json
import os
from CPT_GFFParser import gffParse, gffWrite
def parse_xmfa(xmfa):
"""Simple XMFA parser until https://github.com/biopython/biopython/pull/544
"""
current_lcb = []
current_seq = {}
for line in xmfa.readlines():
if line.startswith("#"):
continue
if line.strip() == "=":
if "id" in current_seq:
current_lcb.append(current_seq)
current_seq = {}
yield current_lcb
current_lcb = []
else:
line = line.strip()
if line.startswith(">"):
if "id" in current_seq:
current_lcb.append(current_seq)
current_seq = {}
data = line.strip().split()
# 0 1 2 3 4 5
# > 1:5986-6406 + CbK.fa # CbK_gp011
id, loc = data[1].split(":")
start, end = loc.split("-")
current_seq = {
"rid": "_".join(data[1:]),
"id": id,
"start": int(start),
"end": int(end),
"strand": 1 if data[2] == "+" else -1,
"seq": "",
"comment": "",
}
if len(data) > 5:
current_seq["comment"] = " ".join(data[5:])
# else:
# current_seq['seq'] += line.strip()
def percent_identity(a, b):
"""Calculate % identity, ignoring gaps in the host sequence
"""
match = 0
mismatch = 0
for char_a, char_b in zip(list(a), list(b)):
if char_a == "-":
continue
if char_a == char_b:
match += 1
else:
mismatch += 1
if match + mismatch == 0:
return 0.0
return 100 * float(match) / (match + mismatch)
def get_fasta_ids(sequences):
"""Returns a list of fasta records in the order they appear
"""
ids = []
for seq in SeqIO.parse(sequences, "fasta"):
ids.append(seq.id)
return ids
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="parse xmfa file")
parser.add_argument("gff3", type=argparse.FileType("r"), help="Multi-GFF3 File")
parser.add_argument("fasta", type=argparse.FileType("r"), help="Multi-FA file")
parser.add_argument("xmfa", type=argparse.FileType("r"), help="XMFA File")
parser.add_argument("output_dir", type=str, help="output directory")
args = parser.parse_args()
fasta_list = get_fasta_ids(args.fasta)
lcbs = parse_xmfa(args.xmfa)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
output = {"fasta": [], "gff3": [], "xmfa": None}
processed_xmfa = os.path.join(args.output_dir, "regions.json")
with open(processed_xmfa, "w") as handle:
json.dump([lcb for lcb in lcbs if len(lcb) > 1], handle, sort_keys=True)
output["xmfa"] = processed_xmfa
# Have to seek because we already access args.fasta once in id_tn_dict
args.fasta.seek(0)
# Load up sequence(s) for GFF3 data
seq_dict = SeqIO.to_dict(SeqIO.parse(args.fasta, "fasta"))
# Parse GFF3 records
gffs = gffParse(args.gff3, base_dict=seq_dict)
for record in sorted(gffs, key=lambda rec: fasta_list.index(rec.id)):
gff_output = os.path.join(args.output_dir, record.id + ".gff")
with open(gff_output, "w") as handle:
gffWrite([record], handle)
output["gff3"].append(gff_output)
fa_output = os.path.join(args.output_dir, record.id + ".txt")
with open(fa_output, "w") as handle:
handle.write(str(record.seq))
output["fasta"].append(
{"path": fa_output, "length": len(record.seq), "name": record.id}
)
print(json.dumps(output, sort_keys=True))
|
TAMU-CPT/galaxy-tools
|
tools/comparative/xmfa_process.py
|
xmfa_process.py
|
py
| 3,928 |
python
|
en
|
code
| 5 |
github-code
|
6
|
43953915570
|
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import *
# My Code
from util import *
app = Flask(__name__)
# Channel Access Token
line_bot_api = LineBotApi('++7wQ1tXdLomUPrrUbvcKEE12HAh+eeIh1s46ynQESIAH2zkobGXkk19oxFSHS/5fgOju9fHnX3wu02ALT70wQSYcrFuE5ZoKd5vYwkr+VRIdTiMfFSVFerWzr5j1Syf5YlS5NGCFoXbPBiF730F3AdB04t89/1O/w1cDnyilFU=')
# Channel Secret
handler = WebhookHandler('095348740b93fb668776aa36c9571a44')
# 監聽所有來自 /callback 的 Post Request
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
# 處理訊息
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
msg = event.message.text
if '聯絡方式' in msg:
message = imagemap_message()
line_bot_api.reply_message(event.reply_token, message)
elif '你是誰' in msg:
message = TextSendMessage(text= "嗨我是吳岳,很高興認識你!")
line_bot_api.reply_message(event.reply_token, message)
elif '你會什麼' in msg:
message = Carousel_Template()
line_bot_api.reply_message(event.reply_token, message)
elif '你喜歡什麼' in msg:
message = image_gallery()
line_bot_api.reply_message(event.reply_token, message)
elif "你想去哪裡工作" in msg:
line_bot_api.reply_message(event.reply_token,LocationSendMessage(title='LINE Taiwan', address='No. 333號, Ruiguang Rd, Neihu District, Taipei City, 114', latitude=25.07726625171245, longitude=121.57513202616131))
else:
message = TextSendMessage(text='echo: ' + msg)
line_bot_api.reply_message(event.reply_token, message)
import os
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
|
asianpwnage422/myLineBot
|
line-bot-kevin/app.py
|
app.py
|
py
| 2,208 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27318807553
|
# @PascalPuchtler
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import json
import time
from sys import platform
import serial
class MoveControllerCommunication:
def __init__(self,carModel, com = None, baudrate= 9600, changeMoveCallback = None):
self.radiusBig = 0.55
self.radiusSmall = 0.365
self.error = False
self.carModel = carModel
self.changeMoveCallback = changeMoveCallback
if com is None:
if platform == "win32":
com = 'COM7'
else:
com ='/dev/ttyACM0'
try:
self.communication = serial.Serial(com, baudrate= 9600,
timeout=2.5,
parity=serial.PARITY_NONE,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE)
time.sleep(1)
self.communication.reset_input_buffer()
except:
print('Error: No Move controller available over port ' + com)
self.error = True
def turnLeft(self):
self.driveCircle(self.radiusSmall,True, True)
def turnRight(self):
self.driveCircle(self.radiusSmall,True, False)
def drive(self):
self.driveCircle(float('inf'),True, False)
def driveLeft(self):
self.driveCircle(self.radiusBig,True, True)
def driveRight(self):
self.driveCircle(self.radiusBig,True, False)
def backwardLeft(self):
self.driveCircle(self.radiusBig,False, True)
def backwardRight(self):
self.driveCircle(self.radiusBig,False, False)
def backward(self):
self.driveCircle(float('inf'), False, False)
def stop(self):
if self.changeMoveCallback is not None:
self.changeMoveCallback(0, 0)
self.move([0,0])
def fullLeft(self):
self.move([-100,100])
time.sleep(0.2)
self.stop()
def driveCircle(self, radius, forward, left):
motor, gear, speed = self.carModel.getMotorSpeedFromRadius(radius, forward, left)
print('l:', round(motor[0]), 'r:', round(motor[1]), 'g:', round(gear,4), 's:', round(speed,4))
if self.changeMoveCallback is not None:
self.changeMoveCallback(gear, speed)
self.move(motor)
def move(self, motor):
if not self.error:
command = {}
command["command"] = "move"
command["left"] = int(motor[0])
command["right"] = int(motor[1])
self.communication.write(json.dumps(command).encode('ascii'))
def getSonic(self):
if not self.error:
inputBuffer = self.communication.readline()
command = {}
command["command"] = "sonic"
self.communication.write(json.dumps(command).encode('ascii'))
try:
sonic = json.loads(inputBuffer)
return sonic
except:
print("exception in get Sonic")
return {"left": 0, "right": 0, "middle":0}
|
iisys-hof/autonomous-driving
|
car-controller/src/mainController/Controller/MoveController/MoveControllerCommunication.py
|
MoveControllerCommunication.py
|
py
| 3,571 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27451303499
|
#!/usr/bin/env python3
import sys
if(len(sys.argv) < 2):
sys.exit("Usage: makeMetadata.py ebook_title path_to_write_metadata")
output = ''
output += '---\n'
output += 'title: ' + sys.argv[1].split('/')[len(sys.argv[1].split('/'))-1].title() + '\n'
output += 'author: ' + 'Kyle Simpson' + '\n'
output += 'rights: ' + 'Creative Commons Attribution-NonCommercial-NoDerivs 3.0 Unported License' + '\n'
output += 'language: ' + 'en-US' + '\n'
output += '...'
try:
if(sys.argv[3] == '--debug' or sys.argv[3] == '-d'):
print(output)
except Exception as e:
pass
with open(sys.argv[2] + '/title.txt', 'w') as metadata:
try:
metadata.write(output)
except Exception as e:
print(str(e))
|
aidanharris/You-Dont-Know-JS
|
makeMetadata.py
|
makeMetadata.py
|
py
| 686 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73816651386
|
import unittest
from unittest import mock
from pydis_core.site_api import ResponseCodeError
from bot.exts.backend.sync._syncers import Syncer
from tests import helpers
class TestSyncer(Syncer):
"""Syncer subclass with mocks for abstract methods for testing purposes."""
name = "test"
_get_diff = mock.AsyncMock()
_sync = mock.AsyncMock()
class SyncerSyncTests(unittest.IsolatedAsyncioTestCase):
"""Tests for main function orchestrating the sync."""
def setUp(self):
patcher = mock.patch("bot.instance", new=helpers.MockBot(user=helpers.MockMember(bot=True)))
self.bot = patcher.start()
self.addCleanup(patcher.stop)
self.guild = helpers.MockGuild()
TestSyncer._get_diff.reset_mock(return_value=True, side_effect=True)
TestSyncer._sync.reset_mock(return_value=True, side_effect=True)
# Make sure `_get_diff` returns a MagicMock, not an AsyncMock
TestSyncer._get_diff.return_value = mock.MagicMock()
async def test_sync_message_edited(self):
"""The message should be edited if one was sent, even if the sync has an API error."""
subtests = (
(None, None, False),
(helpers.MockMessage(), None, True),
(helpers.MockMessage(), ResponseCodeError(mock.MagicMock()), True),
)
for message, side_effect, should_edit in subtests:
with self.subTest(message=message, side_effect=side_effect, should_edit=should_edit):
TestSyncer._sync.side_effect = side_effect
ctx = helpers.MockContext()
ctx.send.return_value = message
await TestSyncer.sync(self.guild, ctx)
if should_edit:
message.edit.assert_called_once()
self.assertIn("content", message.edit.call_args[1])
async def test_sync_message_sent(self):
"""If ctx is given, a new message should be sent."""
subtests = (
(None, None),
(helpers.MockContext(), helpers.MockMessage()),
)
for ctx, message in subtests:
with self.subTest(ctx=ctx, message=message):
await TestSyncer.sync(self.guild, ctx)
if ctx is not None:
ctx.send.assert_called_once()
|
python-discord/bot
|
tests/bot/exts/backend/sync/test_base.py
|
test_base.py
|
py
| 2,317 |
python
|
en
|
code
| 1,206 |
github-code
|
6
|
30419861071
|
"""
An AI agent that will explore its environment and perform certain tasks (mining, smelting, forging, and buying/selling items)
"""
import sys
from time import sleep
import traceback
import cv2
import pyautogui
from game_map import GameMap
import utilities as utils
from user_interface import UserInterface
from player import Player
# Set defaults
task = Player.TASKS.MINE
if len(sys.argv) > 1:
task = Player.TASKS[sys.argv[1].upper()]
# Initialize classes
game_map = GameMap()
player = Player(game_map, task)
user_interface = UserInterface()
utils.log("INIT", "====================================================")
utils.log("INIT", "Initializing...")
utils.log("INIT", F"Default task set to {task}")
# Find blocking window in screenshot
screenshot = utils.take_screenshot(False)
result = cv2.matchTemplate(screenshot, user_interface.templates['sponsored'], cv2.TM_CCORR_NORMED)
_, max_val, _, max_loc = cv2.minMaxLoc(result)
# Found the blocking window window with high confidence
if max_val > 0.9:
click_at = (max_loc[0] + 428, max_loc[1] + 144)
utils.log("INIT", "Closed blocking window")
pyautogui.moveTo(click_at[0], click_at[1], 0.15)
pyautogui.click()
sleep(5)
# Bring game to foreground
utils.bring_game_to_foreground()
# Detect environment
screenshot = utils.take_screenshot()
game_map.update_player_position(screenshot)
utils.log("INIT", F"Player location initialized")
game_map.update_map()
utils.log("INIT", "Field of view mapped")
utils.log("INIT", "Initialization complete")
utils.log("INIT", "====================================================")
try:
while utils.bring_game_to_foreground():
player.perform_task()
except Exception as exception:
utils.log("SEVERE", exception)
utils.log("SEVERE", traceback.format_exc())
utils.quit_game()
|
jeffaustin32/game_ai
|
main.py
|
main.py
|
py
| 1,821 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2226786256
|
import pandas as pd
import subprocess
import os
df = pd.read_csv(snakemake.input.predictions, sep="\t")
cells_unselected = df.loc[df["prediction"] == 0, "cell"].tolist()
# ADDING NEW COLUMN TO CONFIG FILE
df_config = pd.read_csv("{data_location}/config/config_df_ashleys.tsv".format(data_location=snakemake.config["data_location"]), sep='\t')
df_config["Selected"] = True
df_config.loc[df_config["Cell"].isin([e.split('.')[0] for e in cells_unselected]), "Selected"] = False
df_config.to_csv("{data_location}/config/config_df_ashleys.tsv".format(data_location=snakemake.config["data_location"]), sep='\t', index=False)
with open(snakemake.output[0], 'w') as out:
out.write("data_location processed: {data_location}\n".format(data_location=snakemake.params.path))
out.write("Removed following cells:\n")
for cell in cells_unselected:
# print("rm {path}/{sample}/selected/{cell}".format(path=snakemake.params.path, sample=snakemake.wildcards.sample, cell=cell))
subprocess.call("rm {path}/{sample}/selected/{cell}".format(path=snakemake.params.path, sample=snakemake.wildcards.sample, cell=cell), shell=True)
subprocess.call("rm {path}/{sample}/selected/{cell}.bai".format(path=snakemake.params.path, sample=snakemake.wildcards.sample, cell=cell), shell=True)
out.write("- {cell}\n".format(cell=cell))
|
friendsofstrandseq/ashleys-qc-pipeline
|
workflow/scripts/utils/rm_unselected_cells.py
|
rm_unselected_cells.py
|
py
| 1,345 |
python
|
en
|
code
| 3 |
github-code
|
6
|
26471438721
|
from math import floor, gcd, isqrt, log2, sqrt
def gauss_sum(n: int) -> int:
""" Calculates the sum of the first n natural numbers, based on the formula:
{n}Sigma{k=1} k = n * (n + 1) / 2
Conversion of very large floats to integers in this formula can lead to large
rounding losses, so division by 2 & int cast is replaced with a single bitwise
right shift, as n >> 1 = n / 2^1.
"""
return n * (n + 1) >> 1
def is_coprime(x: int, y: int) -> bool:
"""
Two integers are co-prime (relatively/mutually prime) if the only positive
integer that is a divisor of both of them is 1.
"""
return gcd(x, y) == 1
def is_hexagonal_number(h_n: int) -> int | None:
"""
Derivation solution is based on the formula:
n(2n - 1) = h_n, in quadratic form becomes:
0 = 2n^2 - n - h_n, with a, b, c = 2, -1, -h_n
putting these values in the quadratic formula becomes:
n = (1 +/- sqrt(1 + 8h_n)) / 4
so the inverse function, positive solution becomes:
n = (1 + sqrt(1 + 8h_n)) / 4
:returns: h_n's corresponding term if hexagonal, or None.
"""
n = 0.25 * (1 + sqrt(1 + 8 * h_n))
return int(n) if n == floor(n) else None
def is_pentagonal_number(p_n: int) -> int | None:
"""
Derivation solution is based on the formula:
n(3n - 1) / 2 = p_n, in quadratic form becomes:
0 = 3n^2 - n - 2p_n, with a, b, c = 3, -1, -2p_n
putting these values in the quadratic formula becomes:
n = (1 +/- sqrt(1 + 24p_n)) / 6
so the inverse function, positive solution becomes:
n = (1 + sqrt(1 + 24p_n)) / 6
:returns: p_n's corresponding term if pentagonal, or None.
"""
n = (1 + sqrt(1 + 24 * p_n)) / 6
return int(n) if n == floor(n) else None
def is_prime(n: int) -> bool:
""" Checks if n is prime.
This version will be used preferentially, unless the argument is expected to
frequently exceed 1e5.
SPEED (WORSE for N > 1e15)
1.86s for a 15-digit prime
SPEED (WORSE for N > 1e10)
5.01ms for a 10-digit prime
SPEED (WORSE for N > 1e5)
6.6e4ns for a 6-digit prime
SPEED (BETTER for N < 1e5)
1.7e4ns for a 5-digit prime
SPEED (BETTER for N < 1e3)
7700ns for a 3-digit prime
"""
if n < 2:
return False
elif n < 4: # 2 and 3 are primes
return True
elif not n % 2: # 2 is the only even prime
return False
elif n < 9: # 4, 6, and 8 already excluded
return True
elif not n % 3:
# primes > 3 are of the form 6k(+/-1)
# i.e. they are never multiples of 3
return False
else:
# n can only have 1 prime factor > sqrt(n): n itself!
max_p = isqrt(n)
step = 5 # as multiples of prime 5 not yet assessed
# 11, 13, 17, 19, and 23 will all bypass this loop
while step <= max_p:
if not n % step or not n % (step + 2):
return False
step += 6
return True
def is_prime_mr(num: int, k_rounds: list[int] | None = None) -> bool:
""" Miller-Rabin probabilistic algorithm determines if a large number is
likely to be prime.
This version will only be used if the argument is expected to frequently
exceed 1e5.
- The number received, once determined to be odd, is expressed as
n = (2^r)s + 1, with s being odd.
- A random integer, a, is chosen k times (higher k means higher accuracy),
with 0 < a < num.
- Calculate a^s % n. If this equals 1 or this plus 1 equals n while s has
the powers of 2 previously factored out returned, then n passes as a
strong probable prime.
- n should pass for all generated a.
The algorithm's complexity is O(k*log^3*n). This algorithm uses a list of
the first 5 primes instead of randomly generated a, as this has been proven
valid for numbers up to 2.1e12. Providing a list of the first 7 primes gives
test validity for numbers up to 3.4e14.
SPEED (BETTER for N > 1e15)
1.5e5ns for a 15-digit prime
SPEED (BETTER for N > 1e10)
7.6e4ns for a 10-digit prime
SPEED (BETTER for N > 1e5)
5.6e4ns for a 6-digit prime
SPEED (WORSE for N < 1e5)
4.2e4ns for a 5-digit prime
SPEED (WORSE for N < 1e3)
3.9e4ns for a 3-digit prime
"""
if k_rounds is None:
k_rounds = [2, 3, 5, 7, 11]
if 2 <= num <= 3:
return True
if num < 2 or num % 2 == 0:
return False
def miller_rabin(a: int, s: int, r: int, n: int) -> bool:
# calculate a^s % n
x = pow(a, s, n)
if x == 1 or x == n - 1:
return True
for _ in range(r):
x = pow(x, 2, n)
if x == 1:
return False
if x == n - 1:
return True
return False
# write num as 2^r * s + 1 by first getting r, the largest power of 2
# that divides (num - 1), by getting the index of the right-most one bit
n_r = int(log2((num - 1) & -(num - 1)))
# x * 2^y == x << y
n_s = (num - 1) // (2 << (n_r - 1))
for k in k_rounds:
if k > num - 2:
break
if not miller_rabin(k, n_s, n_r, num):
return False
return True
def is_triangular_number(t_n: int) -> int | None:
"""
Derivation solution is based on the formula:
n(n + 1) / 2 = t_n, in quadratic form becomes:
0 = n^2 + n - 2t_n, with a, b, c = 1, 1, -2t_n
putting these values in the quadratic formula becomes:
n = (-1 +/- sqrt(1 + 8t_n)) / 2
so the inverse function, positive solution becomes:
n = (sqrt(1 + 8t_n) - 1) / 2
:returns: t_n's corresponding term if triangular, or None.
"""
n = 0.5 * (sqrt(1 + 8 * t_n) - 1)
return int(n) if n == floor(n) else None
def power_digit_sum(base: int, exponent: int) -> int:
""" Calculates the sum of the digits of the number, base^exponent. """
return sum(map(int, str(pow(base, exponent))))
def prime_factors_og(n: int) -> dict[int, int]:
""" Prime decomposition repeatedly divides out all prime factors using an
optimised Direct Search Factorisation algorithm.
Every prime number after 2 will be odd and there can be at most 1 prime factor
greater than sqrt(n), which would be n itself if n is a prime. This is based
on all cofactors having been already tested following the formula:
n / floor(sqrt(n) + 1) < sqrt(n)
e.g. N = 12 returns {2=2, 3=1} -> 2^2 * 3^1 = 12
SPEED (WORSE for N with large factors)
55.88s for N = 600_851_475_143
SPEED (WORSE for N with small factors)
74.70ms for N = 1e12
:returns: Dict of prime factors (keys) and their exponents (values).
:raises ValueError: If argument is not greater than 1.
"""
if n <= 1:
raise ValueError("Must provide a natural number greater than 1")
primes = dict()
factors = [2]
factors.extend(range(3, isqrt(n) + 1, 2))
for factor in factors:
while n % factor == 0:
if factor in primes:
primes[factor] += 1
else:
primes[factor] = 1
n //= factor
if n > 2:
primes[n] = primes[n] + 1 if n in primes else 1
return primes
def prime_factors(n: int) -> dict[int, int]:
""" Prime decomposition repeatedly divides out all prime factors using a
Direct Search Factorisation algorithm without any optimisation.
e.g. N = 12 returns {2=2, 3=1} -> 2^2 * 3^1 = 12
This version will be used in future solutions.
SPEED (BETTER for N with large factors)
2.9e+05ns for N = 600_851_475_143
SPEED (BETTER for N with small factors)
8590ns for N = 1e12
:returns: Dict of prime factors (keys) and their exponents (values).
:raises ValueError: If argument is not greater than 1.
"""
if n <= 1:
raise ValueError("Must provide a natural number greater than 1")
primes = dict()
factor = 2
while factor * factor <= n:
while n % factor == 0 and n != factor:
if factor in primes:
primes[factor] += 1
else:
primes[factor] = 1
n //= factor
factor += 1
if n > 1:
primes[n] = primes[n] + 1 if n in primes else 1
return primes
def prime_numbers_og(n: int) -> list[int]:
"""
Sieve of Eratosthenes algorithm outputs all prime numbers less than or equal
to the upper bound provided.
SPEED (WORSE)
23.04ms for N = 1e5
"""
# create mask representing [2, max], with all even numbers except 2 (index 0)
# marked false
boolean_mask = [not (i != 0 and i % 2 == 0) for i in range(n - 1)]
for p in range(3, isqrt(n) + 1, 2):
if boolean_mask[p - 2]:
if p * p > n:
break
# mark all multiples (composites of the divisors) that are >= p squared
# as false
for m in range(p * p, n + 1, 2 * p):
boolean_mask[m - 2] = False
primes = []
for i, isPrime in enumerate(boolean_mask):
if isPrime:
primes.append(i + 2)
return primes
def prime_numbers(n: int) -> list[int]:
"""
Still uses Sieve of Eratosthenes method to output all prime numbers less than
or equal to the upper bound provided, but cuts processing time in half by only
allocating mask memory to odd numbers and by only looping through multiples of
odd numbers.
This version will be used in future solutions.
SPEED (BETTER)
14.99ms for N = 1e5
"""
if n < 2:
return []
odd_sieve = (n - 1) // 2
upper_limit = isqrt(n) // 2
# create mask representing [2, 3..n step 2]
boolean_mask = [True] * (odd_sieve + 1)
# boolean_mask[0] corresponds to prime 2 & is skipped
for i in range(1, upper_limit + 1):
if boolean_mask[i]:
# j = next index at which multiple of odd prime exists
j = i * 2 * (i + 1)
while j <= odd_sieve:
boolean_mask[j] = False
j += 2 * i + 1
primes = []
for i, isPrime in enumerate(boolean_mask):
if i == 0:
primes.append(2)
continue
if isPrime:
primes.append(2 * i + 1)
return primes
def pythagorean_triplet(m: int, n: int, d: int) -> tuple[int, int, int]:
"""
Euclid's formula generates all Pythagorean triplets from 2 numbers, m and n.
All triplets originate from a primitive one by multiplying them by
d = gcd(a, b, c).
:raises ValueError: If arguments do not follow m > n > 0, or if not exactly
one is even, or if they are not co-prime, i.e. gcd(m, n) != 1.
"""
if n < 1 or m < n:
raise ValueError("Positive integers assumed to be m > n > 0")
if not ((m % 2 == 0) ^ (n % 2 == 0)):
raise ValueError("Integers must be opposite parity")
if not is_coprime(m, n):
raise ValueError("Positive integers must be co-prime")
a = (m * m - n * n) * d
b = 2 * m * n * d
c = (m * m + n * n) * d
return min(a, b), max(a, b), c
def sum_proper_divisors_og(n: int) -> int:
""" Calculates the sum of all divisors of n, not inclusive of n.
Solution optimised based on the following:
- N == 1 has no proper divisor but 1 is a proper divisor of all other naturals.
- A perfect square would duplicate divisors if included in the loop range.
- Loop range differs for odd numbers as they cannot have even divisors.
SPEED (WORSE)
8.8e4ns for N = 1e6 - 1
"""
if n < 2:
return 0
total = 1
max_divisor = isqrt(n)
if max_divisor * max_divisor == n:
total += max_divisor
max_divisor -= 1
divisor_range = range(3, max_divisor + 1, 2) if n % 2 != 0 \
else range(2, max_divisor + 1)
for d in divisor_range:
if n % d == 0:
total += d + n // d
return total
def sum_proper_divisors(num: int) -> int:
""" Calculates the sum of all divisors of num, not inclusive of num.
Solution above is further optimised by using prime factorisation to
out-perform the original method.
This version will be used in future solutions.
SPEED (BETTER)
1.5e4ns for N = 1e6 - 1
"""
if num < 2:
return 0
n = num
total = 1
p = 2
while p * p <= num and n > 1:
if n % p == 0:
j = p * p
n //= p
while n % p == 0:
j *= p
n //= p
total *= (j - 1)
total //= (p - 1)
if p == 2:
p += 1
else:
p += 2
if n > 1:
total *= (n + 1)
return total - num
|
bog-walk/project-euler-python
|
util/maths/reusable.py
|
reusable.py
|
py
| 12,786 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32506954132
|
import csv
import numpy as np
import os
import pydicom
from segmentation_models.backbones import get_preprocessing
import tensorflow as tf
from pneumothorax_segmentation.constants import image_size, folder_path
from pneumothorax_segmentation.data_augment import apply_random_data_augment
from pneumothorax_segmentation.params import tf_image_size
# Documentation for reading dicom files at https://pydicom.github.io/pydicom/stable/viewing_images.html#using-pydicom-with-matplotlib
preprocess_input = get_preprocessing("resnet34")
def get_all_images_list(folder):
"Load all images filenames in folder. Returns a list of (filepath, filename)"
all_images_in_folder = []
for dirName, _, fileList in os.walk(folder_path + "/data/dicom-images-%s" % folder):
for filename in fileList:
if ".dcm" in filename.lower():
all_images_in_folder.append((os.path.join(dirName,filename), filename.replace(".dcm", "")))
return all_images_in_folder
def get_dicom_data(file_path):
"Return the dicom raw data of a given file"
return pydicom.dcmread(file_path)
cached_csv = []
def get_raw_masks(name):
"""
Returns a list of the masks as they appear in train-rle.csv. Masks '-1' are filtered out\n
Note side-effect: loads the csv on the first run and caches it
"""
global cached_csv
# The csv data is stored in a cache. This way, the csv is read only once
if (len(cached_csv) == 0):
with open(folder_path + '/data/train-rle.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
cached_csv.append(row)
# Retrieve masks as they are in the csv
raw_masks = []
for row in cached_csv:
if row[0] == name:
raw_masks.append(row[1])
# Remove the -1 from images with no mask
if (raw_masks[0] == " -1"):
raw_masks = []
return raw_masks
def get_image_label(name):
"Returns 1 if there is a pneumothorax, 0 otherwise. Based on data in train-rle.csv"
raw_masks = get_raw_masks(name)
if len(raw_masks) == 0:
return 0
return 1
def get_true_mask(name):
"Takes the name of the image as input and returns the mask mapping as a numpy matrix of shape (image_size, image_size) and values 0-1"
raw_masks = get_raw_masks(name)
# Format the masks to an exploitable format
masks = []
for raw_mask in raw_masks:
mask = raw_mask.split(" ")
mask = mask[1:] # raw_mask starts with a space
mask = [int(m) for m in mask]
masks.append(mask)
# Use the masks to create the actual mapping of image_size * image_size
mask_mapping = np.zeros(image_size ** 2, dtype=np.int)
for mask in masks:
is_it_a_mask = False
current_pixel = 0
for pixel_long_movement in mask:
if is_it_a_mask:
for i in range(pixel_long_movement):
mask_mapping[current_pixel + i] = 1
current_pixel += pixel_long_movement
is_it_a_mask = not is_it_a_mask
mask_mapping = np.reshape(mask_mapping, (image_size, image_size))
mask_mapping = np.transpose(mask_mapping, (1, 0))
return mask_mapping
def format_pixel_array_for_tf(pixel_array, apply_data_augment_technique=None):
"""
Inputs pixel_array as they are stroed in the dicom file. Outputs a tensor ready to go through the models\n
apply_data_augment_technique can be used to apply data augmentation. See apply_random_data_augment for values
"""
image = tf.convert_to_tensor(pixel_array, dtype=tf.float32)
image = tf.reshape(image, (1, image_size, image_size, 1))
if (apply_data_augment_technique != None):
image = apply_random_data_augment(image, apply_data_augment_technique)
# tf.image.resize behaves weirdly with the default method when reducing size. AREA method makes more sense in our case, thought the default bilinear method makes more sense when making an image bigger
image = tf.image.resize(image, (tf_image_size, tf_image_size), align_corners=True, method=tf.image.ResizeMethod.AREA)
image = tf.image.grayscale_to_rgb(image)
image = preprocess_input(image)
return image
|
benoitkoenig/pneumothorax-segmentation
|
preprocess.py
|
preprocess.py
|
py
| 4,252 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24430998404
|
from sys import stdin
def minimum_swaps(arr,n):
grafo = {}
solucion = [i+1 for i in range(n)]
ans = 0
i = 0
while solucion != arr:
#print(solucion,arr)
#Si no es necesario acomodar el elemento en su lugar
if arr[i] != solucion[i]:
aux = arr[i] #4
arr[i] = arr[aux-1]
arr[aux-1]=aux
ans += 1
else:
i+=1
return ans
def main():
n = int(stdin.readline().strip())
array = [int(_) for _ in stdin.readline().strip().split()]
print(minimum_swaps(array,n))
main()
|
Sim0no/Arenas
|
arrays/minimum_swaps_2.py
|
minimum_swaps_2.py
|
py
| 666 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24270752412
|
import numpy as np
import matplotlib.pyplot as plt
import os
import torch
import torchvision
import numpy as np
from torchvision import transforms
from sklearn.metrics import precision_recall_curve, average_precision_score, auc, roc_auc_score, roc_curve
import matplotlib.pyplot as plt
from config import *
import random
maha_dists = np.load('maha_dists.npy',allow_pickle=True)
input_data = np.load('/network/tmp1/bhattdha/detectron2_kitti/embeddings_storage/final_data.npy', allow_pickle=True)[()]
input_data_OOD = np.load('/network/tmp1/bhattdha/detectron2_kitti/embeddings_storage/final_data_OOD.npy', allow_pickle=True)[()]
## the dataset
X_org = input_data['features']
y_org = input_data['labels']
# X_ood = input_data_OOD['features']
# y_ood = input_data_OOD['labels']
# y_ood[y_ood == 6] = 5
# y_ood[y_ood == 7] = 5
# ood_class = [5, 6, 7]
X = X_org
y = y_org
## total reprodicibility
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
X_ood = input_data_OOD['features']
y_ood = input_data_OOD['labels']
val_data_all_classes = {}
means = {}
covars = {}
# train_fraction = 0.7
num_classes = max(y)+1
class_labels = np.arange(num_classes)
# class_labels = np.random.permutation(num_classes)
# class_labels = np.random.permutation(class_labels)
for class_label in class_labels:
print('Training Class# {}'.format(class_label))
indices = np.where(y==class_label)[0]
# indices = np.random.permutation(indices)
data = X[indices,:]
# class_label_fake = (class_label + 5)%len(class_labels)
# indices_fake = np.where(y==class_label_fake)[0]
# val_other_class_data = X[indices_fake,:]
# data = np.random.permutation(data)
train_data_samples = int(len(data)*train_fraction)
val_data_samples = int(len(data) - train_data_samples)
train_data = 1e2*data[:train_data_samples,:]
val_data = 1e2*data[train_data_samples:, :]
# data = {'train_data': train_data, 'val_data': val_data}
val_data_all_classes[str(class_label)] = val_data
mean = np.mean(train_data, axis = 0)
cov = np.cov(train_data.T)
means[str(class_label)] = mean
## may be wrong!
covars[str(class_label)] = np.linalg.inv(cov + 1e-10*np.identity(1024))
maha_class = maha_dists[:,5].astype(int)
maha_true_dist = []
maha_false_dist = []
# for ind, m in enumerate(maha_dists):
# maha_true_dist.append(m[maha_class[ind]])
# m[maha_class[ind]] = np.inf
# m[5] = np.inf
# maha_false_dist.append(m.min())
## loading the results
# maha_true_dist = np.array(maha_true_dist)
# maha_false_dist = np.array(maha_false_dist)
# input_data_OOD = np.load('/network/tmp1/bhattdha/detectron2_kitti/embeddings_storage/final_data_OOD.npy', allow_pickle=True)[()]
# X_ood = input_data_OOD['features']
# y_ood = input_data_OOD['labels']
acc_threshs = [60.0, 70.0, 80.0, 85.0, 90.0, 95.0]
ood_stats = {}
for acc_thresh in acc_threshs:
print("For accuracy: ", acc_thresh)
mahathresh = {}
class_dist = {}
for i in range(num_classes):
class_dist[i] = maha_dists[maha_dists[:,5]==i][:,i]
class_dist[i].sort()
class_dist[i] = class_dist[i][::-1]
index = int(len(class_dist[i]) - len(class_dist[i])*acc_thresh/100.0)
mahathresh[i] = class_dist[i][index]
# mahathresh = {0: 3093.944707607109, 1: 5710.413855647991, 2: 28235.425795092746, 3: 79163.39452332728, 4: 2313.9860080440644}
tp = 0
fp = 0
for x in X_ood:
data_point = 1e2*x
flag = True
mds = [] ## has mahalanobis distances
for mean_label in means.keys():
diff = (data_point - means[mean_label]).reshape(len(data_point), 1)
mahalanobis_distance = np.dot(diff.T, np.dot(covars[mean_label], diff))[0][0]
# maha_all.append(mahalanobis_distance)
mds.append(mahalanobis_distance)
for i in mahathresh.keys():
if mahathresh[i] > mds[i]:
fp += 1
flag = False
break
else:
continue
if flag:
tp += 1
ood_stats[acc_thresh] = {'tp':tp, 'fp':fp, 'accuracy': tp/(tp+fp)}
import ipdb; ipdb.set_trace()
colors = ['C'+str(i+1) for i in range(5)]
for i in range(4):
plt.plot(class_dist[i], '-o', alpha=0.7, color=colors[i], label="class maha dists"+str(i).zfill(5))
# plt.plot(maha_false_dist, '-o', alpha=0.7, color=colors[1], label="maha_false_dist")
# [1e3, 1e4, 1e3]
plt.legend()
plt.legend(loc='upper right')
plt.xlabel('datapoint ->')
plt.ylabel('mahalanobis distance -> ')
plt.title('Mahalanobis distance plot')
plt.savefig('maha_dists.png')
import ipdb; ipdb.set_trace()
|
dhaivat1729/detectron2_CL
|
generative_classifier/maha_dist_analysis.py
|
maha_dist_analysis.py
|
py
| 4,431 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8022762471
|
#!/usr/bin/env python
# encoding: utf-8
# @Time : 2019-07-31 10:24
__author__ = 'Ted'
from PIL import Image, ImageFont, ImageDraw
content={
"back_img":"pre/paper.jpg",
"001":{
"ad":'老板,买10盒月饼呗',
"head":'001.jpg'
},
"002": {
"ad": '老板,买20盒月饼呗',
"head": '002.jpg'
},
"003": {
"ad": '老板,生活不易,买50盒月饼呗',
"head": '003.jpg'
},
"004": {
"ad": '老板,买个80盒月饼,不多',
"head": '004.jpg'
},
"005": {
"ad": '老板,看面相,你应该买100盒月饼',
"head": '005.jpg'
},
"006": {
"ad": '老板,恭喜你中奖了,奖品是150盒月饼',
"head": '006.jpg'
},
"007": {
"ad": '老板,你的员工让我告诉你,他们想吃月饼了',
"head": '007.jpg'
},
"008": {
"ad": '老板,我卖月饼,买200盒呗',
"head": '008.jpg'
},
"009": {
"ad": '老板,不整500盒月饼送礼啊',
"head": '009.jpg'
}
}
def get_pic(background,head,adcontent,mark,pic_name):
im = Image.open(background)
head_img = Image.open(f"head/{head}").resize((150,150),Image.ANTIALIAS)
im.paste(head_img,(75,20))
draw = ImageDraw.Draw(im)
fnt = ImageFont.truetype("pre/SimSun.ttf",20)
ad_parts = adcontent.split(",")
y_pos = 180
for ad_part in ad_parts:
if ad_part!=ad_parts[-1]:
ad_w,ad_h = draw.textsize(ad_part+",", font=fnt)
draw.text(((300-ad_w)/2,y_pos),ad_part+",",font=fnt,fill=(0,0,0))
y_pos+=ad_h+10
else:
ad_w, ad_h = draw.textsize(ad_part, font=fnt)
draw.text(((300 - ad_w) / 2, y_pos), ad_part, font=fnt, fill=(0, 0, 0))
y_pos += ad_h + 10
mark_font = ImageFont.truetype("pre/arial.ttf",100)
draw.text((125,400),mark,font=mark_font,fill=(0,0,0))
haha = Image.open("pre/haha.jpg")
im.paste(haha,(0,650))
qrcode = Image.open("pre/tedxpy.jpg").resize((80,80),Image.ANTIALIAS)
im.paste(qrcode,(180,810))
sign_font = ImageFont.truetype("pre/SimSun.ttf",10)
draw.text((60,875),"自定义制作图片,请扫码",font=sign_font,fill=(0,0,0))
im.save(pic_name)
if __name__== "__main__":
for i in range(1,10):
background = "pre/paper.jpg"
head = content[f'00{i}']['head']
adcontent = content[f'00{i}']['ad']
get_pic(background,head,adcontent,f"{i}",f"{i}.jpg")
print("九宫格图片生成完毕!")
|
pengfexue2/friends_ad
|
create_pics.py
|
create_pics.py
|
py
| 2,590 |
python
|
en
|
code
| 3 |
github-code
|
6
|
194935106
|
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import serializers
from rest_framework import generics
from rest_framework import viewsets
from rest_framework.decorators import detail_route, list_route
from rest_framework.views import APIView
from core.models import *
from django.forms import widgets
from django.conf.urls import patterns, url
from services.cord.models import VOLTTenant, VBNGTenant, CordSubscriberRoot
from core.xoslib.objects.cordsubscriber import CordSubscriber
from plus import PlusSerializerMixin, XOSViewSet
from django.shortcuts import get_object_or_404
from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
from xos.exceptions import *
import json
import subprocess
if hasattr(serializers, "ReadOnlyField"):
# rest_framework 3.x
ReadOnlyField = serializers.ReadOnlyField
else:
# rest_framework 2.x
ReadOnlyField = serializers.Field
class CordSubscriberIdSerializer(serializers.ModelSerializer, PlusSerializerMixin):
id = ReadOnlyField()
service_specific_id = ReadOnlyField()
vlan_id = ReadOnlyField() # XXX remove this
c_tag = ReadOnlyField()
s_tag = ReadOnlyField()
vcpe_id = ReadOnlyField()
instance = ReadOnlyField()
image = ReadOnlyField()
vbng_id = ReadOnlyField()
firewall_enable = serializers.BooleanField()
firewall_rules = serializers.CharField()
url_filter_enable = serializers.BooleanField()
url_filter_rules = serializers.CharField()
url_filter_level = serializers.CharField(required=False)
cdn_enable = serializers.BooleanField()
instance_name = ReadOnlyField()
image_name = ReadOnlyField()
routeable_subnet = serializers.CharField(required=False)
ssh_command = ReadOnlyField()
bbs_account = ReadOnlyField()
wan_container_ip = ReadOnlyField()
uplink_speed = serializers.CharField(required=False)
downlink_speed = serializers.CharField(required=False)
status = serializers.CharField()
enable_uverse = serializers.BooleanField()
lan_ip = ReadOnlyField()
wan_ip = ReadOnlyField()
nat_ip = ReadOnlyField()
private_ip = ReadOnlyField()
wan_mac = ReadOnlyField()
vcpe_synced = serializers.BooleanField()
humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
class Meta:
model = CordSubscriber
fields = ('humanReadableName', 'id',
'service_specific_id', 'vlan_id', 's_tag', 'c_tag',
'vcpe_id', 'instance', 'instance_name', 'image', 'image_name',
'firewall_enable', 'firewall_rules',
'url_filter_enable', 'url_filter_rules', 'url_filter_level',
'bbs_account',
'ssh_command',
'vcpe_synced',
'cdn_enable', 'vbng_id', 'routeable_subnet', 'nat_ip', 'lan_ip', 'wan_ip', 'private_ip', 'wan_mac',
'wan_container_ip',
'uplink_speed', 'downlink_speed', 'status', 'enable_uverse')
def getHumanReadableName(self, obj):
return obj.__unicode__()
#------------------------------------------------------------------------------
# The "old" API
# This is used by the xoslib-based GUI
#------------------------------------------------------------------------------
class CordSubscriberList(XOSListCreateAPIView):
queryset = CordSubscriber.get_tenant_objects().select_related().all()
serializer_class = CordSubscriberIdSerializer
method_kind = "list"
method_name = "cordsubscriber"
class CordSubscriberDetail(XOSRetrieveUpdateDestroyAPIView):
queryset = CordSubscriber.get_tenant_objects().select_related().all()
serializer_class = CordSubscriberIdSerializer
method_kind = "detail"
method_name = "cordsubscriber"
# We fake a user object by pulling the user data struct out of the
# subscriber object...
def serialize_user(subscriber, user):
return {"id": "%d-%d" % (subscriber.id, user["id"]),
"name": user["name"],
"level": user.get("level",""),
"mac": user.get("mac", ""),
"subscriber": subscriber.id }
class CordUserList(APIView):
method_kind = "list"
method_name = "corduser"
def get(self, request, format=None):
instances=[]
for subscriber in CordSubscriber.get_tenant_objects().all():
for user in subscriber.users:
instances.append( serialize_user(subscriber, user) )
return Response(instances)
def post(self, request, format=None):
data = request.DATA
subscriber = CordSubscriber.get_tenant_objects().get(id=int(data["subscriber"]))
user = subscriber.create_user(name=data["name"],
level=data["level"],
mac=data["mac"])
subscriber.save()
return Response(serialize_user(subscriber,user))
class CordUserDetail(APIView):
method_kind = "detail"
method_name = "corduser"
def get(self, request, format=None, pk=0):
parts = pk.split("-")
subscriber = CordSubscriber.get_tenant_objects().filter(id=parts[0])
for user in subscriber.users:
return Response( [ serialize_user(subscriber, user) ] )
raise XOSNotFound("Failed to find user %s" % pk)
def delete(self, request, pk):
parts = pk.split("-")
subscriber = CordSubscriber.get_tenant_objects().get(id=int(parts[0]))
subscriber.delete_user(parts[1])
subscriber.save()
return Response("okay")
def put(self, request, pk):
kwargs={}
if "name" in request.DATA:
kwargs["name"] = request.DATA["name"]
if "level" in request.DATA:
kwargs["level"] = request.DATA["level"]
if "mac" in request.DATA:
kwargs["mac"] = request.DATA["mac"]
parts = pk.split("-")
subscriber = CordSubscriber.get_tenant_objects().get(id=int(parts[0]))
user = subscriber.update_user(parts[1], **kwargs)
subscriber.save()
return Response(serialize_user(subscriber,user))
#------------------------------------------------------------------------------
# The "new" API with many more REST endpoints.
# This is for integration with with the subscriber GUI
#------------------------------------------------------------------------------
class CordSubscriberViewSet(XOSViewSet):
base_name = "subscriber"
method_name = "rs/subscriber"
method_kind = "viewset"
queryset = CordSubscriber.get_tenant_objects().select_related().all()
serializer_class = CordSubscriberIdSerializer
def get_vcpe(self):
subscriber = self.get_object()
if not subscriber.vcpe:
raise XOSMissingField("vCPE object is not present for subscriber")
return subscriber.vcpe
@classmethod
def get_urlpatterns(self):
patterns = super(CordSubscriberViewSet, self).get_urlpatterns()
patterns.append( self.detail_url("vcpe_synced/$", {"get": "get_vcpe_synced"}, "vcpe_synced") )
patterns.append( self.detail_url("url_filter/$", {"get": "get_url_filter"}, "url_filter") )
patterns.append( self.detail_url("url_filter/(?P<level>[a-zA-Z0-9\-_]+)/$", {"put": "set_url_filter"}, "url_filter") )
patterns.append( self.detail_url("services/$", {"get": "get_services"}, "services") )
patterns.append( self.detail_url("services/(?P<service>[a-zA-Z0-9\-_]+)/$", {"get": "get_service"}, "get_service") )
patterns.append( self.detail_url("services/(?P<service>[a-zA-Z0-9\-_]+)/true/$", {"put": "enable_service"}, "enable_service") )
patterns.append( self.detail_url("services/(?P<service>[a-zA-Z0-9\-_]+)/false/$", {"put": "disable_service"}, "disable_service") )
patterns.append( self.detail_url("users/$", {"get": "get_users", "post": "create_user"}, "users") )
patterns.append( self.detail_url("users/clearusers/$", {"get": "clear_users", "put": "clear_users", "post": "clear_users"}, "clearusers") )
patterns.append( self.detail_url("users/newuser/$", {"put": "create_user", "post": "create_user"}, "newuser") )
patterns.append( self.detail_url("users/(?P<uid>[0-9\-]+)/$", {"delete": "delete_user"}, "user") )
patterns.append( self.detail_url("users/(?P<uid>[0-9\-]+)/url_filter/$", {"get": "get_user_level"}, "user_level") )
patterns.append( self.detail_url("users/(?P<uid>[0-9\-]+)/url_filter/(?P<level>[a-zA-Z0-9\-_]+)/$", {"put": "set_user_level"}, "set_user_level") )
patterns.append( self.detail_url("bbsdump/$", {"get": "get_bbsdump"}, "bbsdump") )
patterns.append( url("^rs/initdemo/$", self.as_view({"put": "initdemo", "get": "initdemo"}), name="initdemo") )
patterns.append( url("^rs/subidlookup/(?P<ssid>[0-9\-]+)/$", self.as_view({"get": "ssiddetail"}), name="ssiddetail") )
patterns.append( url("^rs/subidlookup/$", self.as_view({"get": "ssidlist"}), name="ssidlist") )
patterns.append( url("^rs/vbng_mapping/$", self.as_view({"get": "get_vbng_mapping"}), name="vbng_mapping") )
return patterns
def list(self, request):
object_list = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(object_list, many=True)
return Response({"subscribers": serializer.data})
def get_vcpe_synced(self, request, pk=None):
subscriber = self.get_object()
return Response({"vcpe_synced": subscriber.vcpe_synced})
def get_url_filter(self, request, pk=None):
subscriber = self.get_object()
return Response({"level": subscriber.url_filter_level})
def set_url_filter(self, request, pk=None, level=None):
subscriber = self.get_object()
subscriber.url_filter_level = level
subscriber.save()
return Response({"level": subscriber.url_filter_level})
def get_users(self, request, pk=None):
subscriber = self.get_object()
return Response(subscriber.users)
def get_user_level(self, request, pk=None, uid=None):
subscriber = self.get_object()
user = subscriber.find_user(uid)
if user and user.get("level", None):
level = user["level"]
else:
level = self.get_object().url_filter_level
return Response( {"id": uid, "level": level} )
def set_user_level(self, request, pk=None, uid=None, level=None):
subscriber = self.get_object()
subscriber.update_user(uid, level=level)
subscriber.save()
return self.get_user_level(request, pk, uid)
def create_user(self, request, pk=None):
data = request.DATA
name = data.get("name",None)
mac = data.get("mac",None)
if (not name):
raise XOSMissingField("name must be specified when creating user")
if (not mac):
raise XOSMissingField("mac must be specified when creating user")
subscriber = self.get_object()
newuser = subscriber.create_user(name=name, mac=mac)
subscriber.save()
return Response(newuser)
def delete_user(self, request, pk=None, uid=None):
subscriber = self.get_object()
subscriber.delete_user(uid)
subscriber.save()
return Response( {"id": uid, "deleted": True} )
def clear_users(self, request, pk=None):
subscriber = self.get_object()
subscriber.users = []
subscriber.save()
return Response( "Okay" )
def get_services(self, request, pk=None):
subscriber = self.get_object()
return Response(subscriber.services)
def get_service(self, request, pk=None, service=None):
service_attr = service+"_enable"
subscriber = self.get_object()
return Response({service: getattr(subscriber, service_attr)})
def enable_service(self, request, pk=None, service=None):
service_attr = service+"_enable"
subscriber = self.get_object()
setattr(subscriber, service_attr, True)
subscriber.save()
return Response({service: getattr(subscriber, service_attr)})
def disable_service(self, request, pk=None, service=None):
service_attr = service+"_enable"
subscriber = self.get_object()
setattr(subscriber, service_attr, False)
subscriber.save()
return Response({service: getattr(subscriber, service_attr)})
def get_bbsdump(self, request, pk=None):
subscriber = self.get_object()
if not subsciber.volt or not subscriber.volt.vcpe:
raise XOSMissingField("subscriber has no vCPE")
if not subscriber.volt.vcpe.bbs_account:
raise XOSMissingField("subscriber has no bbs_account")
result=subprocess.check_output(["python", "/opt/xos/observers/vcpe/broadbandshield.py", "dump", subscriber.volt.vcpe.bbs_account, "123"])
if request.GET.get("theformat",None)=="text":
from django.http import HttpResponse
return HttpResponse(result, content_type="text/plain")
else:
return Response( {"bbs_dump": result } )
def setup_demo_subscriber(self, subscriber):
# nuke the users and start over
subscriber.users = []
subscriber.create_user(name="Mom's PC", mac="010203040506", level="PG_13")
subscriber.create_user(name="Dad's PC", mac="90E2Ba82F975", level="PG_13")
subscriber.create_user(name="Jack's Laptop", mac="685B359D91D5", level="PG_13")
subscriber.create_user(name="Jill's Laptop", mac="34363BC9B6A6", level="PG_13")
subscriber.save()
def initdemo(self, request):
object_list = CordSubscriber.get_tenant_objects().all()
# reset the parental controls in any existing demo vCPEs
for o in object_list:
if str(o.service_specific_id) in ["0", "1"]:
self.setup_demo_subscriber(o)
demo_subscribers = [o for o in object_list if o.is_demo_user]
if demo_subscribers:
return Response({"id": demo_subscribers[0].id})
subscriber = CordSubscriberRoot(service_specific_id=1234,
name="demo-subscriber",)
subscriber.is_demo_user = True
subscriber.save()
self.setup_demo_subscriber(subscriber)
return Response({"id": subscriber.id})
def ssidlist(self, request):
object_list = CordSubscriber.get_tenant_objects().all()
ssidmap = [ {"service_specific_id": x.service_specific_id, "subscriber_id": x.id} for x in object_list ]
return Response({"ssidmap": ssidmap})
def ssiddetail(self, pk=None, ssid=None):
object_list = CordSubscriber.get_tenant_objects().all()
ssidmap = [ {"service_specific_id": x.service_specific_id, "subscriber_id": x.id} for x in object_list if str(x.service_specific_id)==str(ssid) ]
if len(ssidmap)==0:
raise XOSNotFound("didn't find ssid %s" % str(ssid))
return Response( ssidmap[0] )
def get_vbng_mapping(self, request):
object_list = VBNGTenant.get_tenant_objects().all()
mappings = []
for vbng in object_list:
if vbng.mapped_ip and vbng.routeable_subnet:
mappings.append( {"private_ip": vbng.mapped_ip, "routeable_subnet": vbng.routeable_subnet, "mac": vbng.mapped_mac, "hostname": vbng.mapped_hostname} )
return Response( {"vbng_mapping": mappings} )
class CordDebugIdSerializer(serializers.ModelSerializer, PlusSerializerMixin):
# Swagger is failing because CordDebugViewSet has neither a model nor
# a serializer_class. Stuck this in here as a placeholder for now.
id = ReadOnlyField()
class Meta:
model = CordSubscriber
class CordDebugViewSet(XOSViewSet):
base_name = "cord_debug"
method_name = "rs/cord_debug"
method_kind = "viewset"
serializer_class = CordDebugIdSerializer
@classmethod
def get_urlpatterns(self):
patterns = []
patterns.append( url("^rs/cord_debug/vbng_dump/$", self.as_view({"get": "get_vbng_dump"}), name="vbng_dump"))
return patterns
# contact vBNG service and dump current list of mappings
def get_vbng_dump(self, request, pk=None):
result=subprocess.check_output(["curl", "http://10.0.3.136:8181/onos/virtualbng/privateip/map"])
if request.GET.get("theformat",None)=="text":
from django.http import HttpResponse
result = json.loads(result)["map"]
lines = []
for row in result:
for k in row.keys():
lines.append( "%s %s" % (k, row[k]) )
return HttpResponse("\n".join(lines), content_type="text/plain")
else:
return Response( {"vbng_dump": json.loads(result)["map"] } )
|
xmaruto/mcord
|
xos/core/xoslib/methods/cordsubscriber.py
|
cordsubscriber.py
|
py
| 17,144 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12009423058
|
from __future__ import annotations
from dataclasses import fields
from typing import Tuple
import numpy as np
from napari.layers import Image
def update_layer_contrast_limits(
layer: Image,
contrast_limits_quantiles: Tuple[float, float] = (0.01, 0.98),
contrast_limits_range_quantiles: Tuple[float, float] = (0.0, 1.0),
) -> None:
nonzero_mask = layer.data > 0
if (~nonzero_mask).all():
return
limit_0, limit_1, limit_range_0, limit_range_1 = np.quantile(
layer.data[nonzero_mask],
(*contrast_limits_quantiles, *contrast_limits_range_quantiles),
)
layer.contrast_limits = (limit_0, limit_1 + 1e-8)
layer.contrast_limits_range = (limit_range_0, limit_range_1 + 1e-8)
def array_safe_eq(a, b) -> bool:
"""Check if a and b are equal, even if they are numpy arrays"""
if a is b:
return True
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
return a.shape == b.shape and (a == b).all()
try:
return a == b
except TypeError:
return NotImplemented
def dataclass_eq(dc1, dc2) -> bool:
"""checks if two dataclasses which hold numpy arrays are equal"""
if dc1 is dc2:
return True
if dc1.__class__ is not dc2.__class__:
return NotImplemented
fields_names = [f.name for f in fields(dc1)]
return all(
array_safe_eq(getattr(dc1, field_name), getattr(dc2, field_name))
for field_name in fields_names
)
|
bkntr/napari-brainways
|
src/napari_brainways/utils.py
|
utils.py
|
py
| 1,473 |
python
|
en
|
code
| 6 |
github-code
|
6
|
29943421716
|
import argparse
import yaml
from pyspark.sql.functions import udf, when, year
class CreateSqlInput:
def __init__(self):
self.name = 'CalculateStats'
@staticmethod
@udf
def extract_production(dict_string):
try:
production_array = yaml.load(dict_string, Loader=yaml.FullLoader)
parsed_production = []
for production in production_array:
parsed_production.append(production['name'])
except ValueError:
parsed_production = []
return parsed_production
@staticmethod
def main(spark, config):
joined_parquet_path = config.get('PATHS', 'joined_parquet_path')
sql_input_path = config.get('PATHS', 'sql_input_path')
joined_df = spark.read.parquet(joined_parquet_path)
joined_df = joined_df.withColumn('production_companies',
CreateSqlInput.extract_production('production_companies'))
joined_df = joined_df.withColumn('ratio',
when(joined_df['revenue'] != 0, joined_df['budget']/joined_df['revenue'])
.otherwise(0.0))
joined_df = joined_df.withColumn('year', year('release_date'))
joined_df = joined_df.orderBy('ratio', ascending=False)
joined_df.select('title', 'production_companies', 'budget', 'revenue', 'ratio', 'year').show(5, False)
joined_df.select(['title',
'budget',
'year',
'revenue',
'vote_average',
'ratio',
'production_companies',
'url',
'abstract']).write.mode('overwrite').parquet(sql_input_path)
if __name__ == '__main__':
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from tfi_etl.sparkscript import SparkScriptRunner
parser = argparse.ArgumentParser()
parser.add_argument('-config')
args = parser.parse_args()
config_path = str(args.config)
calculate_stats = CreateSqlInput()
script_runner = SparkScriptRunner(config_path, calculate_stats)
script_runner.run()
|
richierichard99/TrueFilmIngest
|
tfi_etl/CreateSqlInput.py
|
CreateSqlInput.py
|
py
| 2,302 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12643025727
|
from playsound import playsound
import os
import pandas as pd
path = "audio/" # path to the dataset
files = os.listdir(path)
df = pd.DataFrame([], columns = ["file_name", "label"])
for file, i in zip(files, range(len(files))):
print("Currently playing " + file)
playsound(path + file)
label = input("Please, provide the label(n for noisy and c for clean audio files): ")
while(label != "c" and label != "n"):
label = input("Provided label is neither n nor c. Try again... ")
df.loc[i] = [file, label]
df.to_json("data.json", orient = "records")
|
Barsegh-A/audio-labelling
|
script.py
|
script.py
|
py
| 556 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74957082106
|
# Sinw wave plot tool
import numpy as np
import matplotlib.pyplot as plt
f =0.5 #frequency of sine wave
# f =2
A =5# maximum amplitude of sine wave
# A = 1
x = np.arange(-6.28, 6.28, 0.01)# array arranged from -pi to +pi and with small increment of 0.01
# x = np.arange(-3.14, 3.14, 0.01)
#y = A*np.sin(f*x)
y = A*np.tan(f*x)
plt.plot(x,y)
plt.xlabel('angle')
plt.ylabel('amplitude')
plt.show()
|
dilshad-geol/IRIS-2022-Seismology-Skill-Building-Workshop
|
00_UNIX_DataFiles/python/numpy/sine.py
|
sine.py
|
py
| 397 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34405330102
|
import matplotlib.pyplot as plt
import numpy as np
import os, tkinter, tkinter.filedialog, tkinter.messagebox
# show the file selection filedialog
root = tkinter.Tk()
root.withdraw()
fTyp = [('','*')]
iDir = os.path.abspath(os.path.dirname(__file__))
# tkinter.messagebox.showinfo('簡易プロットプログラムです','どのフォルダのcsvでグラフを作る?')
# output the processing file name
file = tkinter.filedialog.askopenfilename(filetypes = fTyp,initialdir = iDir)
# tkinter.messagebox.showinfo('oxプログラム',file)
df = np.loadtxt(file, skiprows=5, delimiter=',', encoding='utf-8')
rows = len(df[:,0])
x = np.arange(rows)
# 横軸の単位をsecondにしてグラフを見たいときはこちら↓を使う。
# plt.plot(df[:,0], df[:,1])
# 横軸を「1~csvファイルの行数」としてグラフを見たいときはこちら↓を使う。
plt.plot(x, df[:,1])
# plt.vlines(np.arange(24800,26400,200), -0.05, 0.05, color='k', linestyle=':', lw=0.5)
# plt.fill_between([24800,26400], -0.05, 0.05, color='skyblue')
plt.show()
|
kobashin/GHz-ultrasonic
|
easy_plot.py
|
easy_plot.py
|
py
| 1,071 |
python
|
ja
|
code
| 1 |
github-code
|
6
|
38292113901
|
import cv2
from cv2 import dnn_superres
# Create an SR object
sr = dnn_superres.DnnSuperResImpl_create()
# Read image
image = cv2.imread('2.jpg')
# ##########Read the desired model
#path = "./models/EDSR_x3.pb"
path = "./models/LapSRN_x2.pb"
sr.readModel(path)
# Set the desired model and scale to get correct pre- and post-processing
sr.setModel("edsr", 3)
# Upscale the image
result = sr.upsample(image)
cv2.imshow("Original Image", image)
cv2.imshow("Super Resolution by bicubic", cv2.resize(image,None, fx=3, fy=3, interpolation=cv2.INTER_CUBIC))
cv2.imshow("Super Resolution by DL", result)
key = cv2.waitKey(20000)
cv2.destroyAllWindows()
# Save the image
cv2.imwrite("./upscaled.png", result)
#OK
############################################### if you want to use GPU
# Read the desired model
"""
path = "EDSR_x3.pb"
sr.readModel(path)
# Set CUDA backend and target to enable GPU inference
sr.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
sr.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
"""
|
Hsoleimanii/SuperResolution
|
super.py
|
super.py
|
py
| 1,013 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42411313589
|
# -*- coding: utf-8 -*-
#
# File: BPDOrganizacion.py
#
# Copyright (c) 2011 by Conselleria de Infraestructuras y Transporte de la
# Generalidad Valenciana
#
# GNU General Public License (GPL)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
#
__author__ = """Conselleria de Infraestructuras y Transporte de la Generalidad Valenciana
<[email protected]>, Model Driven Development sl <[email protected]>,
Antonio Carrasco Valero <[email protected]>"""
__docformat__ = 'plaintext'
from AccessControl import ClassSecurityInfo
from Products.Archetypes.atapi import *
from Products.gvSIGbpd.BPDUnidadOrganizacional import BPDUnidadOrganizacional
from Products.gvSIGbpd.config import *
# additional imports from tagged value 'import'
from Acquisition import aq_inner, aq_parent
##code-section module-header #fill in your manual code here
##/code-section module-header
schema = Schema((
BooleanField(
name='permiteLeer',
widget=BooleanField._properties['widget'](
label="Permite ver Organizacion",
label2="Allow to see Organization",
description="Si Verdadero, entonces los usuarios pueden ver la Organizacion. Puede ser Falso durante procesos de importacion largos, o por indicacion del administrador.",
description2="If True, then the users may see the Organization. It may be False during long import processes or by manager request.",
label_msgid='gvSIGbpd_BPDOrganizacion_attr_permiteLeer_label',
description_msgid='gvSIGbpd_BPDOrganizacion_attr_permiteLeer_help',
i18n_domain='gvSIGbpd',
),
description="Si Verdadero, entonces los usuarios pueden ver la Organizacion. Puede ser Falso durante procesos de importacion largos, o por indicacion del administrador.",
duplicates="0",
label2="Allow to see Organization",
ea_localid="344",
derived="0",
collection="false",
styleex="volatile=0;",
description2="If True, then the users may see the Organization. It may be False during long import processes or by manager request.",
ea_guid="{4AB95A00-7009-4f7d-8E86-E26666E1849C}",
read_only="True",
default="True",
label="Permite ver Organizacion",
containment="Not Specified",
position="0",
owner_class_name="BPDOrganizacion",
exclude_from_exportconfig="True",
exclude_from_copyconfig="True"
),
BooleanField(
name='permiteModificar',
widget=BooleanField._properties['widget'](
label="Permite modificar Organizacion",
label2="Allow to change Organization",
description="Si Verdadero, entonces los usuarios pueden cambiar la Organizacion. Puede ser Falso durante procesos de importacion largos, o por indicacion del administrador.",
description2="If True, then the users may change the Organization. It may be False during long import processes or by manager request.",
label_msgid='gvSIGbpd_BPDOrganizacion_attr_permiteModificar_label',
description_msgid='gvSIGbpd_BPDOrganizacion_attr_permiteModificar_help',
i18n_domain='gvSIGbpd',
),
description="Si Verdadero, entonces los usuarios pueden cambiar la Organizacion. Puede ser Falso durante procesos de importacion largos, o por indicacion del administrador.",
duplicates="0",
label2="Allow to change Organization",
ea_localid="345",
derived="0",
collection="false",
styleex="volatile=0;",
description2="If True, then the users may change the Organization. It may be False during long import processes or by manager request.",
ea_guid="{159316D0-39EF-4c8d-8CF7-FF2DBFE4C49D}",
read_only="True",
default="True",
label="Permite modificar Organizacion",
containment="Not Specified",
position="2",
owner_class_name="BPDOrganizacion",
exclude_from_exportconfig="True",
exclude_from_copyconfig="True"
),
ComputedField(
name='coleccionesPoliticasDeNegocio',
widget=ComputedWidget(
label="Politicas de Negocio (colecciones)",
label2="Business Policies (collections)",
description="Colecciones de Politicas de Negocio que gobiernan la Organizacion y sus Procesos de Negocio, y constituyen la base de las Reglas de Negocio.",
description2="Collections of Business Policies governing the Organisation and its Business Processes, and constitute the basis for the Business Rules.",
label_msgid='gvSIGbpd_BPDOrganizacion_contents_coleccionesPoliticasDeNegocio_label',
description_msgid='gvSIGbpd_BPDOrganizacion_contents_coleccionesPoliticasDeNegocio_help',
i18n_domain='gvSIGbpd',
),
contains_collections=True,
label2='Business Policies (collections)',
label='Politicas de Negocio (colecciones)',
represents_aggregation=True,
description2='Collections of Business Policies governing the Organisation and its Business Processes, and constitute the basis for the Business Rules.',
multiValued=1,
owner_class_name="BPDOrganizacion",
expression="context.objectValues(['BPDColeccionPoliticasDeNegocio'])",
computed_types=['BPDColeccionPoliticasDeNegocio'],
non_framework_elements=False,
description='Colecciones de Politicas de Negocio que gobiernan la Organizacion y sus Procesos de Negocio, y constituyen la base de las Reglas de Negocio.'
),
ComputedField(
name='coleccionesReglasDeNegocio',
widget=ComputedWidget(
label="Reglas de Negocio (colecciones)",
label2="Business Rules (collections)",
description="Colecciones de Reglas deNegocio que se derivan de las politicas de Negocio, y dirigen los Procesos de Negocio de la Organizacion.",
description2="Collections of Business Rules derived from Business Policies, and driving the Business Process in the Organisation.",
label_msgid='gvSIGbpd_BPDOrganizacion_contents_coleccionesReglasDeNegocio_label',
description_msgid='gvSIGbpd_BPDOrganizacion_contents_coleccionesReglasDeNegocio_help',
i18n_domain='gvSIGbpd',
),
contains_collections=True,
label2='Business Rules (collections)',
label='Reglas de Negocio (colecciones)',
represents_aggregation=True,
description2='Collections of Business Rules derived from Business Policies, and driving the Business Process in the Organisation.',
multiValued=1,
owner_class_name="BPDOrganizacion",
expression="context.objectValues(['BPDColeccionReglasDeNegocio'])",
computed_types=['BPDColeccionReglasDeNegocio'],
non_framework_elements=False,
description='Colecciones de Reglas deNegocio que se derivan de las politicas de Negocio, y dirigen los Procesos de Negocio de la Organizacion.'
),
ComputedField(
name='coleccionesProcesosDeNegocio',
widget=ComputedWidget(
label="Procesos de Negocio (colecciones)",
label2="Business Processes (collections)",
description="Colecciones de Procesos de Negocio realizando cursos de accion con los que la Organizacion persigue propositos especificos.",
description2="Collections of Business Processes realising courses of action through which the Organisation pursues specific goals.",
label_msgid='gvSIGbpd_BPDOrganizacion_contents_coleccionesProcesosDeNegocio_label',
description_msgid='gvSIGbpd_BPDOrganizacion_contents_coleccionesProcesosDeNegocio_help',
i18n_domain='gvSIGbpd',
),
contains_collections=True,
label2='Business Processes (collections)',
label='Procesos de Negocio (colecciones)',
represents_aggregation=True,
description2='Collections of Business Processes realising courses of action through which the Organisation pursues specific goals.',
multiValued=1,
owner_class_name="BPDOrganizacion",
expression="context.objectValues(['BPDColeccionProcesosDeNegocio'])",
computed_types=['BPDColeccionProcesosDeNegocio'],
non_framework_elements=False,
description='Colecciones de Procesos de Negocio realizando cursos de accion con los que la Organizacion persigue propositos especificos.'
),
ComputedField(
name='coleccionesArtefactos',
widget=ComputedWidget(
label="Artefactos (colecciones)",
label2="Artefacts (collections)",
description="Colecciones de Artefactos que se producen, consumen, consultan, editan, y en general son el objeto del esfuerzo de la Organizacion.",
description2="Collections of Artefacts produced, consumed, consulted, edited, or otherwise object of the Organisation effort.",
label_msgid='gvSIGbpd_BPDOrganizacion_contents_coleccionesArtefactos_label',
description_msgid='gvSIGbpd_BPDOrganizacion_contents_coleccionesArtefactos_help',
i18n_domain='gvSIGbpd',
),
contains_collections=True,
label2='Artefacts (collections)',
label='Artefactos (colecciones)',
represents_aggregation=True,
description2='Collections of Artefacts produced, consumed, consulted, edited, or otherwise object of the Organisation effort.',
multiValued=1,
owner_class_name="BPDOrganizacion",
expression="context.objectValues(['BPDColeccionArtefactos'])",
computed_types=['BPDColeccionArtefactos'],
non_framework_elements=False,
description='Colecciones de Artefactos que se producen, consumen, consultan, editan, y en general son el objeto del esfuerzo de la Organizacion.'
),
ComputedField(
name='coleccionesHerramientas',
widget=ComputedWidget(
label="Herramientas (colecciones)",
label2="Tools (collections)",
description="Colecciones de Herramientas que la Organizacion aplica para manejar ciertos Artefactos y asistir en la ejecucion de Pasos de Procesos de Negocio.",
description2="Collections of Tools applied in the Organisation to handle certain Artefacts, and assist in the execution of Business Process Steps.",
label_msgid='gvSIGbpd_BPDOrganizacion_contents_coleccionesHerramientas_label',
description_msgid='gvSIGbpd_BPDOrganizacion_contents_coleccionesHerramientas_help',
i18n_domain='gvSIGbpd',
),
contains_collections=True,
label2='Tools (collections)',
label='Herramientas (colecciones)',
represents_aggregation=True,
description2='Collections of Tools applied in the Organisation to handle certain Artefacts, and assist in the execution of Business Process Steps.',
multiValued=1,
owner_class_name="BPDOrganizacion",
expression="context.objectValues(['BPDColeccionHerramientas'])",
computed_types=['BPDColeccionHerramientas'],
non_framework_elements=False,
description='Colecciones de Herramientas que la Organizacion aplica para manejar ciertos Artefactos y asistir en la ejecucion de Pasos de Procesos de Negocio.'
),
),
)
##code-section after-local-schema #fill in your manual code here
##/code-section after-local-schema
BPDOrganizacion_schema = OrderedBaseFolderSchema.copy() + \
getattr(BPDUnidadOrganizacional, 'schema', Schema(())).copy() + \
schema.copy()
##code-section after-schema #fill in your manual code here
##/code-section after-schema
class BPDOrganizacion(OrderedBaseFolder, BPDUnidadOrganizacional):
"""
"""
security = ClassSecurityInfo()
__implements__ = (getattr(OrderedBaseFolder,'__implements__',()),) + (getattr(BPDUnidadOrganizacional,'__implements__',()),)
# This name appears in the 'add' box
archetype_name = 'Organizacion'
meta_type = 'BPDOrganizacion'
portal_type = 'BPDOrganizacion'
# Change Audit fields
creation_date_field = 'fechaCreacion'
creation_user_field = 'usuarioCreador'
modification_date_field = 'fechaModificacion'
modification_user_field = 'usuarioModificador'
deletion_date_field = 'fechaEliminacion'
deletion_user_field = 'usuarioEliminador'
is_inactive_field = 'estaInactivo'
change_counter_field = 'contadorCambios'
sources_counters_field = 'contadoresDeFuentes'
change_log_field = 'registroDeCambios'
# Versioning and Translation fields
inter_version_field = 'uidInterVersionesInterno'
version_field = 'versionInterna'
version_storage_field = 'versionInternaAlmacenada'
version_comment_field = 'comentarioVersionInterna'
version_comment_storage_field = 'comentarioVersionInternaAlmacenada'
inter_translation_field = 'uidInterTraduccionesInterno'
language_field = 'codigoIdiomaInterno'
fields_pending_translation_field = 'camposPendientesTraduccionInterna'
fields_pending_revision_field = 'camposPendientesRevisionInterna'
allowed_content_types = ['BPDColeccionPoliticasDeNegocio', 'BPDColeccionHerramientas', 'BPDColeccionProcesosDeNegocio', 'BPDColeccionReglasDeNegocio', 'BPDColeccionArtefactos'] + list(getattr(BPDUnidadOrganizacional, 'allowed_content_types', []))
filter_content_types = 1
global_allow = 1
content_icon = 'bpdorganizacion.gif'
immediate_view = 'Textual'
default_view = 'Textual'
suppl_views = ('Textual', 'Tabular', )
typeDescription = "Raiz de contenidos para definicion y publicacion de procedimientos de gestion."
typeDescMsgId = 'gvSIGbpd_BPDOrganizacion_help'
archetype_name2 = 'Organisation'
typeDescription2 = '''Root of all definition and publicacion content.'''
archetype_name_msgid = 'gvSIGbpd_BPDOrganizacion_label'
factory_methods = None
factory_enablers = None
propagate_delete_impact_to = None
actions = (
{'action': "string:$object_url/base_edit",
'category': "object",
'id': 'edit',
'name': 'Edit',
'permissions': ("ModifyPortalContent",),
'condition': """python:('portal_factory' in object.getPhysicalPath())"""
},
{'action': "string:$object_url/Editar",
'category': "object",
'id': 'editar',
'name': 'Edit',
'permissions': ("ModifyPortalContent",),
'condition': """python:( not ('portal_factory' in object.getPhysicalPath())) and object.fAllowWrite()"""
},
{'action': "string:${object_url}/MDDNewVersion",
'category': "object_buttons",
'id': 'mddnewversion',
'name': 'New Version',
'permissions': ("Modify portal content",),
'condition': """python:object.fAllowVersion() and object.getEsRaiz()"""
},
{'action': "string:${object_url}/MDDNewTranslation",
'category': "object_buttons",
'id': 'mddnewtranslation',
'name': 'New Translation',
'permissions': ("Modify portal content",),
'condition': """python:0 and object.fAllowTranslation() and object.getEsRaiz()"""
},
{'action': "string:$object_url/content_status_history",
'category': "object",
'id': 'content_status_history',
'name': 'State',
'permissions': ("View",),
'condition': """python:0"""
},
{'action': "string:${object_url}/MDDInspectClipboard",
'category': "object_buttons",
'id': 'inspectclipboard',
'name': 'Clipboard',
'permissions': ("View",),
'condition': """python:object.fAllowRead()"""
},
{'action': "string:${object_url}/MDDOrdenar",
'category': "object_buttons",
'id': 'reorder',
'name': 'Reorder',
'permissions': ("Modify portal content",),
'condition': """python:object.fAllowWrite()"""
},
{'action': "string:${object_url}/MDDExport",
'category': "object_buttons",
'id': 'mddexport',
'name': 'Export',
'permissions': ("View",),
'condition': """python:object.fAllowExport()"""
},
{'action': "string:${object_url}/MDDImport",
'category': "object_buttons",
'id': 'mddimport',
'name': 'Import',
'permissions': ("Modify portal content",),
'condition': """python:object.fAllowImport()"""
},
{'action': "string:${object_url}/sharing",
'category': "object",
'id': 'local_roles',
'name': 'Sharing',
'permissions': ("Manage properties",),
'condition': """python:1"""
},
{'action': "string:${object_url}/",
'category': "object",
'id': 'view',
'name': 'View',
'permissions': ("View",),
'condition': """python:1"""
},
{'action': "string:${object_url}/MDDChanges",
'category': "object_buttons",
'id': 'mddchanges',
'name': 'Changes',
'permissions': ("View",),
'condition': """python:1"""
},
{'action': "string:${object_url}/MDDVersions",
'category': "object_buttons",
'id': 'mddversions',
'name': 'Versions',
'permissions': ("View",),
'condition': """python:1"""
},
{'action': "string:${object_url}/MDDCacheStatus/",
'category': "object_buttons",
'id': 'mddcachestatus',
'name': 'Cache',
'permissions': ("View",),
'condition': """python:1"""
},
{'action': "string:${object_url}/TextualRest",
'category': "object_buttons",
'id': 'textual_rest',
'name': 'TextualRest',
'permissions': ("View",),
'condition': """python:1"""
},
)
_at_rename_after_creation = True
schema = BPDOrganizacion_schema
##code-section class-header #fill in your manual code here
##/code-section class-header
# Methods
security.declarePublic('manage_afterAdd')
def manage_afterAdd(self,item,container):
"""
"""
return self.pHandle_manage_afterAdd( item, container)
security.declarePublic('manage_pasteObjects')
def manage_pasteObjects(self,cb_copy_data=None,REQUEST=None):
"""
"""
return self.pHandle_manage_pasteObjects( cb_copy_data, REQUEST)
security.declarePublic('cb_isMoveable')
def cb_isMoveable(self):
"""
"""
return self._at_rename_after_creation or ('portal_factory' in self.getPhysicalPath()) or (( not self.getEsRaiz()) and self.fAllowWrite())
security.declarePublic('fAllowRead')
def fAllowRead(self):
"""
"""
return self.getPermiteLeer() and ( self.getEsRaiz() or self.getRaiz().fAllowRead())
security.declarePublic('fAllowWrite')
def fAllowWrite(self):
"""
"""
return self.fAllowRead() and self.getPermiteModificar() and ( self.getEsRaiz() or self.getRaiz().fAllowWrite())
security.declarePublic('moveObjectsByDelta')
def moveObjectsByDelta(self,ids,delta,subset_ids=None):
"""
"""
return self.pHandle_moveObjectsByDelta( ids, delta, subset_ids=subset_ids)
registerType(BPDOrganizacion, PROJECTNAME)
# end of class BPDOrganizacion
##code-section module-footer #fill in your manual code here
##/code-section module-footer
|
carrascoMDD/gvSIG-bpd
|
gvSIGbpd/BPDOrganizacion.py
|
BPDOrganizacion.py
|
py
| 20,894 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38840281994
|
from django.shortcuts import render, get_object_or_404, redirect, HttpResponse
from django.contrib.auth.decorators import login_required
from account.models import User
from product.models import Product
from .models import Message, MessageRoom
@login_required
def check_message_room(request, product_id):
product = get_object_or_404(Product, id=product_id)
receiver = product.creator
sender = request.user
message_room = MessageRoom.objects.filter(product=product,sender=sender)
if not message_room.exists():
message_room =MessageRoom.objects.create(product=product,sender=sender, receiver=receiver)
else:
message_room = MessageRoom.objects.get(product=product,sender=sender)
# if request.method == 'POST':
# content = request.POST.get('content')
# sender = request.user
# message = Message.objects.create(
# sender=sender,
# receiver=receiver,
# product=product,
# content=content
# )
# Perform any additional actions, notifications, or redirects
return redirect('messaging:message-room',id=message_room.id)
@login_required
def message_room(request,id):
message_room = get_object_or_404(MessageRoom, id=id)
if request.user == message_room.sender or request.user == message_room.receiver:
return render(request, 'messaging/messageroom.html', {'message_room':message_room})
else:
return HttpResponse('Unauthorized access. Sorry')
def view_messages(request):
# messages = MessageRoom.objects.filter(receiver=user).order_by('-id') | MessageRoom.objects.filter(sender=user).order_by('-id')
messages = MessageRoom.with_messages(request.user)
return render(request, 'messaging/view_messages.html', {'messages': messages})
@login_required
def send_messages(request):
if request.method == "POST":
message = request.POST.get('message')
room_id = request.POST.get("roomid")
room = MessageRoom.objects.get(id=room_id)
Message.objects.create(room=room,content=message, sender=request.user)
return redirect('messaging:message-room',id=room.id)
return HttpResponse('Something went wrong.')
|
bishwopr/creative-poultry
|
messaging/views.py
|
views.py
|
py
| 2,261 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9642919839
|
from tkinter import *
from tkinter import messagebox as mbox
import socket
win=Tk()
win.title(' CLIENT ')
win.configure(bg='#BC8F8F')
win.geometry('320x500')
typemsg=Listbox(win,height=25,width=45)
typemsg.place(x=10,y=15)
udpsocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
udpsocket.sendto(str.encode("Client is connected!"), ("localhost", 5555))
mbox.showinfo('info',"Client Connected")
req = udpsocket.recvfrom(1024)
typemsg.insert(0,"Server : "+req[0].decode())
def sent():
message = matter_name.get()
typemsg.insert(END,"Client : "+message)
udpsocket.sendto(str.encode(message),("localhost",5555))
req = udpsocket.recvfrom(1024)
typemsg.insert(END,"Server : "+req[0].decode())
matter_entrybox.delete(0,END)
matter_name=StringVar()
matter_entrybox=Entry(win,width=35,textvariable=matter_name,border=4,font=('arial','10'))
matter_entrybox.place(x=10,y=440)
send_button=Button(win,text='Send',command=sent,borderwidth=0,bg="#20B2AA",fg="gold" ,font=("times new roman",13) )
send_button.place(x=275,y=440)
win.mainloop()
|
vaibhav477/TCP_chat_app
|
Socket_Programming_GUI/client.py
|
client.py
|
py
| 1,112 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4822221945
|
# python 3.6.6
# 0 0 0 ------> no elements selected (0)
# 0 0 1 ------> only "c" element has been selected (1)
# 0 1 0 ------> only "b" element has been selected (2)
# 0 1 1 ------> only "b" and "c" element has been selected (3)
# 1 0 0 ------> similarly (4)
# 1 0 1 ------> (5)
# 1 1 0 ------> (6)
# 1 1 1 ------> (7)
def power_set(items):
N = len(items) # 3 items
combo = []
for i in range(2**N): # create all combinations count => 8
temp_combo = []
# 2 1 0 - index j
# 0 0 0 - binary representation
for j in range(N):
# iterate by index over binary number, from right-> left. ex: 001 - take 1, then 0, then 0.
# to understand if it's 1 or 0 used modulo operator %.
# 0 % 2 = 0
# 1 % 2 = 1
if(i >> j) % 2 == 1:
temp_combo.append(items[j])
if temp_combo:
combo.append(temp_combo)
return combo
# Output:
# 0:['a']
# 1:['b']
# 2:['a', 'b']
# 3:['c']
# 4:['a', 'c']
# 5:['b', 'c']
# 6:['a', 'b', 'c']
if __name__ == '__main__':
items = ['a','b','c']
power_set = power_set(items)
|
p039/python
|
6.00x/optimization-01-knapsack-problem/power_set.py
|
power_set.py
|
py
| 1,147 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10694085688
|
def pomekons_battle(bonus: int, player1_pokemons_attack: list[int], player2_pokemons_attack: list[int]) -> str:
[player1_ai, player1_di, player1_li] = player1_pokemons_attack
[player2_ai, player2_di, player2_li] = player2_pokemons_attack
player1_attack: float = (player1_ai + player1_di) / 2.0
player2_attack: float = (player2_ai + player2_di) / 2.0
if player1_li % 2 == 0:
player1_attack += bonus
if player2_li % 2 == 0:
player2_attack += bonus
if player1_attack > player2_attack:
return 'Dabriel'
if player2_attack > player1_attack:
return 'Guarte'
return 'Empate'
def main() -> None:
game: int = int(input())
while game > 0:
bonus: int = int(input())
player1_pokemons_attack: list[int] = list(map(int, input().split()))
player2_pokemons_attack: list[int] = list(map(int, input().split()))
print(pomekons_battle(bonus, player1_pokemons_attack, player2_pokemons_attack))
game -= 1
if __name__ == '__main__':
main()
|
pdaambrosio/python_uri
|
Unknow/uri2221.py
|
uri2221.py
|
py
| 1,049 |
python
|
en
|
code
| 0 |
github-code
|
6
|
45356075426
|
import numpy as np
from PyQt5.QtCore import QSize
from PyQt5.QtGui import QIcon, QColor
from PyQt5.QtWidgets import QListWidgetItem, QPushButton
from LGEprocess.flags_LGE import *
from skimage import exposure, img_as_float
import torch.utils.data as Datas
from LGEprocess import Network as Network
import nibabel as nib
import os
import torch
def Seg(img):
print(img.shape)
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
device = torch.device("cuda:0")
print(torch.__version__)
data=img
dataloder = Datas.DataLoader(dataset=data, batch_size=1, shuffle=False)
Segnet = Network.DenseBiasNet(n_channels=1, n_classes=4).to(device)
pretrained_dict = torch.load('./model/net_epoch_source-Seg-Network.pkl', map_location='cpu')
model_dict = Segnet.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
Segnet.load_state_dict(model_dict)
with torch.no_grad():
for epoch in range(1):
for step, (img) in enumerate(dataloder):
print(img.shape)
img=img.to(device).float()
print(img.shape)
img=Segnet(img)
img= img[0, 1, :, :, :] * 1 + img[0, 2, :, :, :] * 2 + img[0, 3, :, :, :] * 3
img = img.data.cpu().numpy()
print(img.shape)
return img
def Reg(mov,fix):
print(mov.shape)
print(fix.shape)
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
device = torch.device("cuda:0")
print(torch.__version__)
data = mov,fix
dataloder = Datas.DataLoader(dataset=data, batch_size=2, shuffle=False)
Flownet = Network.VXm(2).to(device)
##
pretrained_dict = torch.load('./model/net_epoch_source-Flow-Network.pkl')
model_dict = Flownet.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
Flownet.load_state_dict(model_dict)
with torch.no_grad():
for epoch in range(1):
for step, (mov,fix) in enumerate(dataloder):
print(mov.shape)
print(fix.shape)
mov = mov.to(device).float()
fix = fix.to(device).float()
print(mov.shape)
print(fix.shape)
flow_field_x1, mov_fix, flow_field_x2, es_source = Flownet(fix, mov, fix)
mov_fix = mov_fix[0, 0, :, :, :].data.cpu().numpy()
print(mov_fix.shape)
return mov_fix
def load_nii(path):
image = nib.load(path)
affine = image.affine
image = np.asarray(image.dataobj)
return image, affine
def normor(image):
image -=image.mean()
image /=image.std()
return image
def crop_img(label_es, img, box_height=128, box_width=128):
a = label_es.nonzero()
a_x = a[0]
a_x_middle = np.median(a[0])
a_height = max((a_x)) - min((a_x)) + 1
assert a_height < box_height, 'height小了'
a_x_start = max(0,int(a_x_middle - box_height / 2))
if(int(a_x_middle - box_height / 2)>=0):
a_x_end = int(a_x_middle + box_height / 2)
else:
a_x_end=box_height
print('axs',a_x_start)
print('axe',a_x_end)
print('x:',a_x_end-a_x_start)
a_y = a[1]
a_y_middle = np.median(a_y)
a_width = max(a_y) - min(a_y) + 1
# print(a_width,a_height)
assert a_width < box_width, 'width小了'
a_y_start = max(0,int(a_y_middle - box_width / 2))
if(int(a_y_middle - box_width / 2)>=0):
a_y_end = int(a_y_middle + box_width / 2)
else:
a_y_end=box_width
img_1 = img[a_x_start:a_x_end, a_y_start:a_y_end, :]
print('img1',img_1.shape)
#plt.imshow(img_1[:,:,5], cmap='gray')
return img_1
class MyItem_LGE(QListWidgetItem):
def __init__(self, name=None, parent=None):
super(MyItem_LGE, self).__init__(name, parent=parent)
self.setIcon(QIcon('icons/color.png'))
self.setSizeHint(QSize(60, 60)) # size
print('MyItem_LGE')
def get_params(self):
protected = [v for v in dir(self) if v.startswith('_') and not v.startswith('__')]
param = {}
for v in protected:
param[v.replace('_', '', 1)] = self.__getattribute__(v)
return param
def update_params(self, param):
for k, v in param.items():
if '_' + k in dir(self):
self.__setattr__('_' + k, v)
class LabelItem(MyItem_LGE):
def __init__(self, parent=None):
super(LabelItem, self).__init__('添加GT', parent=parent)
def __call__(self, label):
# blank = np.zeros(img.shape, img.dtype)
# img = cv2.addWeighted(img, self._alpha, blank, 1 - self._alpha, self._beta)
return label
class NormItem(MyItem_LGE):
def __init__(self, parent=None):
super(NormItem, self).__init__('归一化', parent=parent)
def __call__(self, img):
max = img.max()
min = img.min()
img = (img - min) / (max - min)
return img
class LightItem(MyItem_LGE):
def __init__(self, parent=None):
super(LightItem, self).__init__('亮度', parent=parent)
self.alpha = 1
def __call__(self, img):
img = img_as_float(img)
if (self.alpha <=1 & self.alpha >0):
img = exposure.adjust_gamma(img, self.alpha) # 图片调暗
elif (self.alpha > 1):
img = exposure.adjust_gamma(img, 0.5) # 图片调亮
else:
print('请输入大于0的数字!')
return img
class ROIItem(MyItem_LGE):
def __init__(self, parent=None):
super(ROIItem, self).__init__('ROI提取', parent=parent)
def __call__(self, img):
print(img.shape)
label_path='./image/patient081_frame01_gt.nii.gz'
label=nib.load(label_path).get_data()
print(label.shape)
img=crop_img(label_es=label,img=img,box_height=128,box_width=128)
print(img.shape)
return img
class RegItem(MyItem_LGE):
def __init__(self, parent=None):
super(RegItem, self).__init__('配准', parent=parent)
def __call__(self, img):
path='./image/_es.nii.gz'
fix=nib.load(path).get_data()
img = np.transpose(img, (2, 1, 0)) # xyz-zyx
img = normor(img)
img = img[np.newaxis, np.newaxis, :, :, :]
fix = np.transpose(fix, (2, 1, 0)) # xyz-zyx
fix = normor(fix)
fix = fix[np.newaxis, np.newaxis, :, :, :]
mov=img
img=Reg(mov,fix)
img = np.transpose(img, (2, 1, 0)) # zyx-xyz
return img
class SegItem(MyItem_LGE):
def __init__(self, parent=None):
super(SegItem, self).__init__('分割', parent=parent)
def __call__(self, img):
img = np.transpose(img, (2, 1, 0)) # xyz-zyx
img=normor(img)
img = img[np.newaxis,np.newaxis, :, :, :]
# print(img.shape)
img=Seg(img)
img=np.transpose(img,(2,1,0))#zyx-xyz
print(img.shape)
return img
|
JefferyCYH/pyqt_medical
|
LGEprocess/listWidgetItems_LGE.py
|
listWidgetItems_LGE.py
|
py
| 6,988 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44248042853
|
import cv2
import numpy as np
import glob
import uuid
import caffe
import skimage.io
from util import histogram_equalization
from scipy.ndimage import zoom
from skimage.transform import resize
import random
import cv2
import numpy as np
from matplotlib import pyplot as plt
import dlib
from project_face import frontalizer
IMAGE_WIDTH = 32
IMAGE_HEIGHT = 32
class mouth_detector():
def __init__(self):
self.PATH_face_model = '../lib/shape_predictor_68_face_landmarks.dat'
self.face_cascade = cv2.CascadeClassifier('../lib/haarcascade/haarcascade_frontalface_default.xml')
self.eye_cascade = cv2.CascadeClassifier('../lib/haarcascade/haarcascade_eye.xml')
self.mouth_cascade = cv2.CascadeClassifier('../lib/haarcascade/mouth.xml')
self.md_face = dlib.shape_predictor(self.PATH_face_model)
self.fronter = frontalizer('../lib/ref3d.pkl')
def mouth_detect_single(self,image,isPath):
if isPath == True:
img = cv2.imread(image, cv2.IMREAD_UNCHANGED)
else:
img = image
img = histogram_equalization(img)
gray_img1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray_img1, 1.3, 5)
for (x,y,w,h) in faces:
roi_gray = gray_img1[y:y+h, x:x+w]
eyes = self.eye_cascade.detectMultiScale(roi_gray)
if(len(eyes)>0):
p = x
q = y
r = w
s = h
face_region = gray_img1[q:q+s, p:p+r]
face_region_rect = dlib.rectangle(long(q),long(p),long(q+s),long(p+r))
rectan = dlib.rectangle(long(x),long(y),long(x+w),long(y+h))
shape = self.md_face(img,rectan)
p2d = np.asarray([(shape.part(n).x, shape.part(n).y,) for n in range(shape.num_parts)], np.float32)
rawfront, symfront = self.fronter.frontalization(img,face_region_rect,p2d)
face_hog_mouth = symfront[165:220, 130:190]
gray_img = cv2.cvtColor(face_hog_mouth, cv2.COLOR_BGR2GRAY)
crop_img_resized = cv2.resize(gray_img, (IMAGE_WIDTH, IMAGE_HEIGHT), interpolation = cv2.INTER_CUBIC)
#cv2.imwrite("../img/output_test_img/mouthdetectsingle_crop_rezized.jpg",gray_img)
return crop_img_resized,rectan.left(),rectan.top(),rectan.right(),rectan.bottom()
else:
return None,-1,-1,-1,-1
def mouth_detect_bulk(self,input_folder,output_folder):
transformed_data_set = [img for img in glob.glob(input_folder+"/*jpg")]
for in_idx, img_path in enumerate(transformed_data_set):
mouth,x,y,w,h = self.mouth_detect_single(img_path,True)
if 'showingteeth' in img_path:
guid = uuid.uuid4()
uid_str = guid.urn
str_guid = uid_str[9:]
path = output_folder+"/"+str_guid+"_showingteeth.jpg"
cv2.imwrite(path,mouth)
else:
guid = uuid.uuid4()
uid_str = guid.urn
str_guid = uid_str[9:]
path = output_folder+"/"+str_guid+".jpg"
cv2.imwrite(path,mouth)
def negative_image(self,imagem):
imagem = (255-imagem)
return imagem
def adaptative_threashold(self,input_img_path):
img = cv2.imread(input_img_path,0)
img = cv2.medianBlur(img,3)
ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,11,2)
th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
#cv2.imwrite("../img/output_test_img/hmouthdetectsingle_adaptative.jpg",th3)
return th3
|
juanzdev/TeethClassifierCNN
|
src/mouth_detector_opencv.py
|
mouth_detector_opencv.py
|
py
| 3,870 |
python
|
en
|
code
| 3 |
github-code
|
6
|
27082622973
|
from fastapi import APIRouter, Depends, HTTPException
from ...celery.tasks import ExcelParser
from ..crud.dishes_crud import DishesCrud
from ..crud.menu_crud import MenuCrud
from ..crud.submenu_crud import SubmenuCrud
parser_router = APIRouter(prefix='/parser', tags=['Parser'])
@parser_router.post('/parse-excel')
async def parse_excel(
menu_service: MenuCrud = Depends(),
submenu_service: SubmenuCrud = Depends(),
dish_service: DishesCrud = Depends()
):
try:
excel_parser = ExcelParser(menu_service, submenu_service, dish_service)
await excel_parser.parser()
return {'message': 'Excel data parsed and loaded successfully'}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
|
puplishe/testproject
|
fastapi1/api/routes/excel_router.py
|
excel_router.py
|
py
| 756 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2075941699
|
import pandas as pd
from datetime import datetime
import os
def get_csv(source):
try:
df = pd.read_csv('data/' + source + '.csv')
except (OSError, IOError) as e:
df = pd.DataFrame()
print(e)
return df;
def get_status(source_name):
return '';
def set_status(source_name, status):
return;
def get_data(source_name, meta_filter):
df = get_csv(source_name)
df = df[df['meta'].str.contains(meta_filter)]
return df
def put_all_data(source_name, descr, df):
local = get_csv(source_name)
result = pd.concat([local, df]).drop_duplicates(['ref', 'date'])
result = result.sort_values(by=['ref', 'date'])
if not os.path.exists('data'):
os.makedirs('data')
result.to_csv('data/'+source_name+'.csv', columns=['ref', 'date', 'meta', 'value', 'file_date'], quoting=1, index=False)
df['file_date'] = pd.to_datetime(df['file_date'])
date = df['file_date'].max()
date = date.today().replace(microsecond=0)
lu = pd.DataFrame(data=[[source_name, date, 'None']], columns=['Name', 'Date', 'Status'])
try:
lu_new = pd.read_csv('data/last-update.csv')
except (OSError, IOError) as e:
lu_new = lu
result = pd.concat([lu, lu_new]).drop_duplicates(['Name'])
result.to_csv('data/last-update.csv', quoting=1, index=False)
print(result)
def get_last_update(source_name, alternative_date):
try:
df = pd.read_csv('data/last-update.csv', index_col='Name')
except (OSError, IOError) as e:
return None
if df.empty or source_name not in df.index:
return alternative_date;
date = df.get_value(source_name, "Date", takeable=False)
return datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
|
shodnebo/datafetch
|
csv_helper.py
|
csv_helper.py
|
py
| 1,729 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72607237949
|
'''
Created on 22 Jul 2018
@author: Paulo
'''
from random import sample
import pprint
class MinesweeperLogic(object):
"""classdocs"""
def __init__(self, rowSize, columnSize, numberMines):
'''
Constructor
'''
self.NewGame(rowSize, columnSize, numberMines)
def GenerateMines(self):
mineCoordinates=[]
mines = sample(range(0, (self.columnSize*self.rowSize)-1) , self.numberMines)
#print (mines)
for mine in mines:
mineCoordinates.append(self.IntToCoordinates(mine))
#print(mineCoordinates)
return ((mines, mineCoordinates))
def GenerateGameMatrix(self, mines):
matrix = [[Cell() for _ in range(self.columnSize)] for _ in range(self.rowSize)]
for mine in mines:
mineRow, mineColumn = (mine)
matrix[mineRow][mineColumn].value = -1
rowRange = range (mineRow-1, mineRow + 2)
columnRange = range (mineColumn -1, mineColumn + 2)
for i in rowRange:
for j in columnRange:
if ( 0 <= i < self.rowSize and 0 <= j < self.columnSize and matrix[i][j].value!= -1):
matrix[i][j].value+=1
#self.PrintGameMatrix(matrix)
return matrix
def NewGame(self, rowSize, columnSize, numberMines):
self.rowSize = rowSize
self.columnSize = columnSize
self.numberMines = numberMines
self.numberMoves = self.rowSize * self.columnSize
self.minesInt, self.minesLocations = self.GenerateMines()
self.gameMatrix = self.GenerateGameMatrix(self.minesLocations)
def ClickMove(self, buttonNumber):
result={
"finish":False,
"mine":False,
"tile_info" : []
}
#Translates the int to Coordinates
row , column = self.IntToCoordinates(buttonNumber)
#Sets the specific cell as clicked
self.gameMatrix[row][column].SetClicked()
#Decreases the number of plays (used to know if game is won)
self.numberMoves -= 1
result['tile_info'].append((self.CoordinatesToInt(row, column) ,self.gameMatrix[row][column].value))
if self.gameMatrix[row][column].value == -1:
result['finish'] = result['mine']= True
if self.gameMatrix[row][column].value == 0:
#Runs the propagation calculation
propagateList = self.PropagateZeros(row, column)
#Updates the number of moves
self.numberMoves -= len(propagateList)
for cell in propagateList:
row, column = cell
self.gameMatrix[row][column].SetClicked()
result['tile_info'].append((self.CoordinatesToInt(row, column) ,self.gameMatrix[row][column].value))
if self.numberMoves <= self.numberMines:
result['finish'] = True
return result
def FlagMove(self, buttonNumber):
#Translates the int to Coordinates
row , column = self.IntToCoordinates(buttonNumber)
self.gameMatrix[row][column].ToggleFlag()
def IntToCoordinates(self, i):
if i < 0 :
raise ValueError
row = int(i / self.columnSize)
column = i % self.columnSize
return (row, column)
def CoordinatesToInt(self, row, column):
return column + row * self.columnSize
def PropagateZeros(self, row, column):
propagateList=[]
def FloodFill(row, column):
rowRange = range (row-1, row + 2)
columnRange = range (column -1, column + 2)
for i in rowRange:
for j in columnRange:
#Inside row boundaries and Column boundaries and not flagged cell and not the initial cell (row column)
if ( 0 <= i < self.rowSize and 0 <= j < self.columnSize and self.gameMatrix[i][j].flag == False and self.gameMatrix[i][j].clicked == False and not (i==row and j==column)):
if (i,j) in propagateList:
continue
else:
propagateList.append((i,j))
if (self.gameMatrix[i][j].value == 0):
FloodFill(i, j)
FloodFill(row, column)
return propagateList
def ShowMines(self):
return self.minesInt
def PrintGameMatrix(self, matrix):
aux_matrix = [[matrix[p][o].value for o in range(self.columnSize)] for p in range(self.rowSize)]
pprint.pprint(aux_matrix, indent=4, width=200)
return aux_matrix
class Cell():
def __init__(self):
self.flag = False
self.clicked = False
#-1 means mine
self.value = 0
def ToggleFlag(self):
if self.flag == True:
self.flag = False
else:
self.flag = True
def SetClicked(self):
self.clicked=True
|
fonsecapaulo/wxpython_minesweeper
|
minesweeper/minesweeper_logic.py
|
minesweeper_logic.py
|
py
| 5,407 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70488599227
|
# 5. 2520 - самое маленькое число, которое делится без остатка на все числа от 1 до 10.
# Какое самое маленькое число делится нацело на все числа от 1 до 20?
from Python_introduction.HWSem2.AddTask4 import get_primes # import of the method from another task solved
def min_number(n):
"""
:param n: max_number
:return: the min_number for which: min_number % i = 0, i from 1 to max_number
"""
primes = get_primes(int(n ** 0.5) + 1)
def get_factors(number):
"""
:param number: number to be factorized
:return: factorization dictionary
"""
factor_dict = {1: 1} # factor dictionary initialization
for prime in primes:
if prime * prime > number: # get prime factors before sqrt(number)
break
while number != 1 and number % prime == 0:
number /= prime
if prime in factor_dict:
factor_dict[prime] += 1 # increases a power (value) of this prime factor
else:
factor_dict[prime] = 1 # adds a new prime factor to the dict
if number == 1: # if a number is equal to one -> factorization is over
break
if number != 1: # the last factor that is bigger than sqrt(number) is the prime factor (like in 2*3*17: 17 is the one)
factor_dict[number] = 1
return factor_dict
res_factor_dict = {1: 1}
product = 1
for i in range(2, n + 1): # for all elements from 2 to n we build a unique factors dictionary
curr_f_dict = get_factors(i)
for item in curr_f_dict:
if item in res_factor_dict: # if the factor is already in the res_factor_dict
res_factor_dict[item] = max(res_factor_dict[item], curr_f_dict[item]) # if the power is larger -> extends the factors dictionary
else:
res_factor_dict[item] = curr_f_dict[item] # if the factors' dictionary does not contain the current factor -> adds it with its power
for i in res_factor_dict: # here we're building the product
product *= int(i ** res_factor_dict[i])
print(res_factor_dict)
# print(res_factor_dict) # dictionary checking
return product
print(min_number(10))
print(min_number(20))
print(min_number(100))
print(min_number(100000)) # optimization checking
|
LocusLontrime/Python
|
Python_introduction/HWSem2/AddTask5.py
|
AddTask5.py
|
py
| 2,480 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7590990457
|
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if len(nums) in [0, 1]:
return nums
nonzeroCount = 0
for element in nums:
if element != 0:
nonzeroCount += 1
insertPtr = 0
currPtr = 0
while nonzeroCount > 0 and currPtr < len(nums):
print("currPtr {} , insertPtr {}, nonzeroCount {}".format(currPtr, insertPtr, nonzeroCount))
if currPtr != insertPtr and nums[currPtr] != 0:
nums[insertPtr] = nums[currPtr]
nums[currPtr] = 0
insertPtr += 1
nonzeroCount -= 1
print("Index {}, NewVal {}".format(insertPtr, nums[insertPtr]))
if currPtr == insertPtr and nums[currPtr] != 0:
insertPtr += 1
currPtr += 1
|
kashyapchaganti/Leetcode-Solutions
|
0283-move-zeroes/0283-move-zeroes.py
|
0283-move-zeroes.py
|
py
| 976 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8310320668
|
from stevedore import extension
class Extensions:
"""Lazy singleton container for stevedore extensions.
Loads each namespace when requested for the first time.
"""
_managers = {}
def __init__(self):
raise NotImplementedError()
@classmethod
def get(cls, namespace, name):
manager = cls._managers.get(namespace)
if manager is None:
manager = cls._load_namespace(namespace)
return manager[name].plugin
@classmethod
def _load_namespace(cls, namespace):
manager = extension.ExtensionManager(namespace)
cls._managers[namespace] = manager
return manager
|
dwtcourses/SHARE
|
share/util/extensions.py
|
extensions.py
|
py
| 658 |
python
|
en
|
code
| null |
github-code
|
6
|
72784213307
|
# https://www.codewars.com/kata/58ad388555bf4c80e800001e
def cut_the_ropes(arr):
res = [len(arr)]
for i in arr:
m = min(arr)
arr = [x - m for x in arr if x > m]
rem = len(arr) - arr.count(0)
if rem == 0:
return res
res.append(rem)
|
blzzua/codewars
|
6-kyu/simple_fun_160_cut_the_ropes.py
|
simple_fun_160_cut_the_ropes.py
|
py
| 292 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21725716579
|
#program to verify mobile number using regex
import re
# \w [a-zA-Z0-9]
#\W [^a-zA-Z0-9]
phn = "412-555a-1212"
if(re.search("\d{3}-\d{3}-d{4}", phn)):
print("It is a phone number")
else:
print("Invalid phone number")
|
ItsSamarth/ds-python
|
regexToVerifyMobile.py
|
regexToVerifyMobile.py
|
py
| 229 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40650003765
|
import re
import requests
from selenium import webdriver
from xvfbwrapper import Xvfb
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.common import exceptions
class YTGrabber:
'''
Класс принимает Youtube URL страниц, все плейлисты, плейлист и все видео,
и возваращает все видеоматериалы данной страницы.
'''
driver = None
vdisplay = None
def _check_valid_url(self, url):
if type(url) is int:
raise TypeError("URL is not to be int type!")
self.url = url.strip()
if re.match(r"https://www\.youtube\.com/(playlist\?list=|channel/)[\w]+(/playlists|/videos)", self.url):
return True
if re.match(r"https://www\.youtube\.com/(playlist\?list=|channel/)[\w]+", self.url):
return True
raise ValueError("URL is not correct!")
def _get_page(self, url):
self._check_valid_url(url)
resp = requests.get(self.url)
if resp.text.find("404 Not Found") >= 0:
raise ValueError("'{}' , страница не найдена либо не существует".format(self.url))
if resp.text.find("Произошла ошибка! - YouTube") >= 0:
raise ValueError("'{}' , Произошла ошибка! - YouTube".format(self.url))
self.driver.get(self.url)
return True
def get_content(self, url):
self._get_page(url)
preload = True
html = WebDriverWait(self.driver, 3).until(EC.presence_of_element_located((By.TAG_NAME , "html")), "Содержимое не найден или его нет !")
while preload:
html.send_keys(Keys.END)
try:
WebDriverWait(self.driver, 3).until(EC.presence_of_element_located((By.CSS_SELECTOR, "#contents #contents #continuations #spinner")))
except:
preload = False
items = self.driver.find_elements(By.CSS_SELECTOR , "#contents #contents #items > *")
if not items:
items = self.driver.find_elements(By.CSS_SELECTOR , "#contents #contents #contents > *")
if not items:
raise ValueError("Содержимое не найден или его нет !")
videos = []
for item in items:
videos.append({
"title": item.find_element_by_id("video-title").get_attribute("title"),
"href": item.find_element_by_id("video-title").get_attribute("href") or item.find_element_by_class_name("ytd-thumbnail").get_attribute("href"),
"thumbnail": item.find_element_by_id("img").get_attribute("src"),
})
return videos
def __enter__(self):
self.vdisplay = Xvfb()
self.vdisplay.start()
options = webdriver.ChromeOptions()
options.handless = False
options.add_argument("--no-sandbox")
options.add_argument("--disable-setuid-sandbox")
self.driver = webdriver.Chrome(options=options, executable_path="driver/chromedriver")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.driver:
self.driver.close()
if self.vdisplay:
self.vdisplay.stop()
|
Foxonn/ytgrabber
|
ytgrabber.py
|
ytgrabber.py
|
py
| 3,726 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
7129738963
|
#!/usr/bin/env python
import pandas as pd
from collections import defaultdict
import argparse
def bowtie2bed(fn, fo):
"""
From a bowtie output (tsv, NOT sam) file, return a BED file.
:param fn: string
name of bowtie default output tsv file
:param fo: string
name of bedfile output to write
:return:
"""
bowtie_headers = [
"read_name", "strand", "chrom", "start", "seq", "ascii_score", "alt_align", "mismatches"
]
df = pd.read_csv(fn, names=bowtie_headers, sep="\t")
df['len'] = df['seq'].apply(lambda x: len(x))
df['read_name_fixed'] = df['read_name'].apply(lambda x: x.split("_")[0].split('#')[:-1])
df['end'] = df['start'] + df['len']
df = df[['chrom','start','end','read_name_fixed','alt_align','strand']]
df.to_csv(fo, sep="\t", header=False, index=False)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--in_file",
required=True,
)
parser.add_argument(
"--out_file",
required=True,
)
# Process arguments
args = parser.parse_args()
out_file = args.out_file
in_file = args.in_file
# main func
bowtie2bed(
fn=in_file,
fo=out_file
)
if __name__ == "__main__":
main()
|
YeoLab/chim-eCLIP
|
bin/bowtie2bed.py
|
bowtie2bed.py
|
py
| 1,281 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12919232280
|
import datetime
categories = ['INACTIVE', 'WEB', 'AUDIO', 'VIDEO', 'GAMING']
inp = raw_input("Clear? Y/N\n")
if inp in ["y", "Y"]:
with open('log.txt', 'w') as f:
f.write("")
while True:
for i, c in enumerate(categories):
print("{}: {}".format(i, c))
cat = raw_input()
print("\n")
time = datetime.datetime.now()
with open('log.txt', 'a') as f:
f.write(str(time) + '\n' + str(cat) + '\n')
|
noise-lab/ml-networking
|
activities/lib/interative_log.py
|
interative_log.py
|
py
| 420 |
python
|
en
|
code
| 8 |
github-code
|
6
|
21204997139
|
# -*- coding: utf-8 -*-
"""
This is a prototype script.
"""
import numpy as np
from PIL import Image
from PIL import ImageEnhance
from scipy.ndimage import gaussian_filter
import cv2
from skimage import io as ip
frame_rate = 24 #output frame rate
vidcap = cv2.VideoCapture('video9.mov')
success,image = vidcap.read()
count = 1
print('Demuxing video')
while success:
cv2.imwrite("frame%d.png" % count, image) # save frame as JPEG file
success,image = vidcap.read()
count += 1
def initial_processing(iminit, low_val, max_val):
img = Image.open(iminit)
converter = ImageEnhance.Contrast(img)
print(low_val)
print(max_val)
cont = (1/(max_val/low_val))*2.0
img = converter.enhance(cont)
array = np.array(img)
ip.imsave('temp1.png', array)
def calc_val(im1):
img = Image.open(im1)
array = np.array(img)
low_val = np.mean(array)
max_val = np.amax(array)
return low_val, max_val
def imadd(I, K):
import numbers
if isinstance(K, numbers.Number):
J = I.astype('int32')
J += K
elif isinstance(K, np.ndarray):
assert K.shape == I.shape, f'Cannot add images with sizes {I.shape} and {K.shape}.'
J = I.astype('int32') + K.astype('int32')
else:
raise TypeError('K must be a number or an array.')
np.clip(J, 0, 255, out=J)
J = J.astype('uint8')
return J
def gaussian_filt(I, sigma, pad=0):
import numbers
assert isinstance(pad, numbers.Number) or pad in ['reflect', 'nearest', 'wrap'], \
'Choose a correct value for pad: a number (0-255), ''reflect'', ''nearest'', or ''wrap''.'
if isinstance(pad, numbers.Number):
md = 'constant'
c = pad
else:
md = pad
c = 0
return gaussian_filter(I, sigma, mode=md, cval=c)
def final_processing(finalim, k):
I = ip.imread(finalim)
R = np.logical_and(I[:, :, 0] > 254, I[:, :, 1] < 255)
new_R = gaussian_filt(255 * R, 5)
J = I.copy()
J[:, :, 0] = imadd(new_R, J[:, :, 0])
ip.imsave('temp.png', J)
img2 = Image.open('temp.png')
converter = ImageEnhance.Color(img2)
img2 = converter.enhance(1.4)
im = np.array(img2)
ip.imsave('final{}.png'.format(k), im)
def process_loop():
for i in range(count):
low_val, max_val=calc_val('frame{}.png'.format(i+1))
print('Processing image {}'.format(i+1))
initial_processing('frame{}.png'.format(i+1), low_val, max_val)
final_processing('temp1.png', i+1)
def video_mux():
print("Remuxing Files")
pathOut = 'video_out.mp4'
fps = frame_rate
frame_array = []
files = ['final{}.png'.format(i+1) for i in range(count)]
for i in range(len(files)):
#filename=pathIn + files[i]
filename=files[i]
#reading each files
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width,height)
#inserting the frames into an image array
frame_array.append(img)
out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'H264'), fps, size)
for i in range(len(frame_array)):
# writing to a image array
out.write(frame_array[i])
out.release()
count = count-1
process_loop()
video_mux()
|
PindareTech/video-modding-script
|
editing_script.py
|
editing_script.py
|
py
| 3,414 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19579927717
|
from pymongo import MongoClient
from flask import Flask, jsonify
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route("/")
def hello():
new_list = []
client = MongoClient()
db = client.variables
variables = db.variables
cursor = variables.find({})
# print(variables)
for doc in cursor:
message = ''
symbol = doc['symbol']
fiveMinSuccess = doc['values']["5MIN"]['success']
fiveMinBlackX = doc['values']["5MIN"]['black_x']
fiveMinPrice = doc['values']["5MIN"]['price']
fiveMinMa = doc['values']["5MIN"]['ma']
fifteenMinSuccess = doc['values']["15MIN"]['success']
fifteenMinBlackX = doc['values']["15MIN"]['black_x']
fifteenMinPrice = doc['values']["15MIN"]['price']
fifteenMinMa = doc['values']["15MIN"]['ma']
oneHourSuccess = doc['values']["1HRS"]['success']
oneHourBlackX = doc['values']["1HRS"]['black_x']
oneHourPrice = doc['values']["1HRS"]['price']
oneHourMa = doc['values']["1HRS"]['ma']
fourHourSuccess = doc['values']["4HRS"]['success']
fourHourBlackX = doc['values']["4HRS"]['black_x']
fourHourPrice = doc['values']["4HRS"]['price']
fourHourMa = doc['values']["4HRS"]['ma']
oneDaySuccess = doc['values']["1DAY"]['success']
oneDayBlackX = doc['values']["1DAY"]['black_x']
oneDayPrice = doc['values']["1DAY"]['price']
oneDayMa = doc['values']["1DAY"]['ma']
new_dict = {"symbol": symbol, "fiveMin": f"{fiveMinSuccess}/{fiveMinBlackX} {calculate_difference(fiveMinPrice, fiveMinMa)}", "fifteenMin": f"{fifteenMinSuccess}/{fifteenMinBlackX} {calculate_difference(fifteenMinPrice, fifteenMinMa)}", "oneHour": f"{oneHourSuccess}/{oneHourBlackX} {calculate_difference(oneHourPrice, oneHourMa)}", "fourHour": f"{fourHourSuccess}/{fourHourBlackX} {calculate_difference(fourHourPrice, fourHourMa)}", "oneDay": f"{oneDaySuccess}/{oneDayBlackX} {calculate_difference(oneDayPrice, oneDayMa)}"}
new_list.append(new_dict)
print(new_list)
return jsonify(new_list)
def calculate_difference(price, ma) -> str:
up = '↗'
down = '↘'
if price > ma:
return up
return down
app.run(debug=True)
|
OlzyInnovation/DaveBot_Forex
|
server.py
|
server.py
|
py
| 2,308 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23184643387
|
#!/usr/bin/env python3
#encoding: UTF-8
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
import numpy as np
import matplotlib.pyplot as plt
import math
import TrashCan.Mathieson as mat
import C.PyCWrapper as PCWrap
import Util.plot as uPlt
import Util.dataTools as tUtil
def vErf(x):
y = np.zeros( x.shape )
for i in range( x.shape[0]):
y[i] = math.erf( x[i] )
return y
def computeGaussian1D( x, mu=0.0, var=1.0):
# print ( "???", __name__, x[-1], x.shape )
# print "x", x
TwoPi = 2 * np.pi
SqrtTwoPi = np.sqrt( TwoPi )
sig = np.sqrt(var)
u = (x - mu) / sig
# print "u", u
u = - 0.5 * u * u
cst = 1.0 / ( sig * SqrtTwoPi)
y = cst * np.exp( u )
return y
def gaussianIntegral(x):
mu0 = 0.0
var0 = 1.0
sig0 = np.sqrt( var0 )
cstx = 1.0 / ( np.sqrt(2.0)*sig0 )
integral = vErf( (x - mu0) * cstx )
return integral
class TabulatedChargeIntegration:
# Spline implementation of the book "Numerical Analysis" - 9th edition
# Richard L Burden, J Douglas Faires
# Section 3.5, p. 146
# Restrictions : planed with a regular sampling (dx = cst)
# spline(x) :[-inf, +inf] -> [-1/2, +1/2]
# Error < 7.0 e-11 for 1001 sampling between [0, 3.0]
def __init__(self, x, f, dx, lDerivate, rDerivate ):
self.nTabulations = x.size
N = x.size
self.a = np.copy( f )
self.b = np.zeros(N)
self.c = np.zeros(N)
self.d = np.zeros(N)
self.dx = dx
# Step 1
# for (i = 0; i < n - 1; ++i) h[i] = x[i + 1] - x[i];
#for i in range(0, N-1):
# self.h[i] = x[i+1] - x[i];
# h = x[0:N-1] = x[1:N] - x[0:N-1]
h = self.dx
# Step 2
# for (i = 1; i < n-1; ++i)
# A[i] = 3 * (a[i + 1] - a[i]) / h[i] - 3 * (a[i] - a[i - 1]) / h[i - 1];
# A[1:N-1] = 3 * (a[2:N] - a[1:N-1]) / h[1:N-1] - 3 * (a[1:N-1] - a[0:N-2]) / h[0:N-2]];
# Step 2 & 3
alpha = np.zeros(N)
# alpha[0] = 3.0 / self.h[0] * (f[1] - f[0]) - 3*lDerivate
alpha[0] = 3.0 / h * (f[1] - f[0]) - 3*lDerivate
# alpha[N-1] = 3*rDerivate - 3.0 / self.h[N-2] * (f[N-1] - f[N-2])
alpha[N-1] = 3*rDerivate - 3.0 / h * (f[N-1] - f[N-2])
# for (i = 1; i < n-1; ++i)
for i in range(1, N-1):
# alpha[i] = 3.0/self.h[i] * (f[i+1] - f[i]) - 3.0/self.h[i-1] * (f[i] - f[i-1]);
alpha[i] = 3.0/h * (f[i+1] - f[i]) - 3.0/h * (f[i] - f[i-1]);
# Step 4 to 6 solve a tridiagonal linear system
# Step 4
l = np.zeros(N)
mu = np.zeros(N)
z = np.zeros(N)
# l[0] = 2 * self.h[0]
l[0] = 2 * h
mu[0] = 0.5
z[0] = alpha[0] / l[0]
# Step 5
# for (i = 1; i < n - 1; ++i) {
for i in range(1, N-1):
# l[i] = 2 * (x[i+1] - x[i-1]) - self.h[i-1] * mu[i - 1];
# mu[i] = self.h[i] / l[i];
# z[i] = (alpha[i] - self.h[i-1]*z[i-1]) / l[i];
l[i] = 2 * (x[i+1] - x[i-1]) - h * mu[i-1];
mu[i] = h / l[i];
z[i] = (alpha[i] - h*z[i-1]) / l[i];
# Step 6 & 7
# l[N-1] = self.h[N-2]*(2.0-mu[N-2])
# z[N-1] = (alpha[N-1] - self.h[N-2]*z[N-2]) / l[N-1]
l[N-1] = h*(2.0-mu[N-2])
z[N-1] = (alpha[N-1] - h*z[N-2]) / l[N-1]
self.c[N-1] = z[N-1]
# for (j = n - 2; j >= 0; --j) {
for j in range(N-2, -1, -1):
self.c[j] = z[j] - mu[j] * self.c[j+1]
# self.b[j] = (f[j+1]-f[j]) / self.h[j] - self.h[j]/3.0 * (self.c[j+1] + 2*self.c[j])
# self.d[j] = (self.c[j+1]-self.c[j]) / (3 * self.h[j])
self.b[j] = (f[j+1]-f[j]) / h - h/3.0 * (self.c[j+1] + 2*self.c[j])
self.d[j] = (self.c[j+1]-self.c[j]) / (3 * h)
def splineAtanTanh( self, x ):
a = self.a
b = self.b
c = self.c
d = self.d
N = self.nTabulations
signX = np.where( x >= 0, 1.0, -1.0 )
# unsigned x
uX = x * signX
# 0.49999999724624
# 0.499999996965014 point precedent f0(2OO-1)
# 0.49999999724624 f0(200)
# 0.499999997245073 f(200-1)
# 0.499999997232819 y[200-1]
# 0.49999999748923 y[200]
# 0. 890
# 0.49999999724624 f0(200)
np.set_printoptions(precision=15)
# print("??? x / self.dx", x / self.dx)
cst = 1.0 / self.dx
u = np.trunc( uX * cst + self.dx*0.1)
# idx = u.astype(np.int)
idx = np.int32(u)
# print("??? idx ", idx)
idx = np.where( idx >= N, N-1, idx)
h = np.where( idx < N-1, uX - idx * self.dx, 0)
# h = x - idx * self.dx
# print("??? idx filter large indexes", idx)
print ("uX ", uX)
print ("h ", h)
print ("f(x0) ", a[idx])
print ("df|dx0", h*( b[idx] + h*( c[idx] + h *(d[idx]))))
print ("f, ", a[idx] + h*( b[idx] + h*( c[idx] + h *(d[idx]))))
f = signX * (a[idx] + h*( b[idx] + h*( c[idx] + h *(d[idx]))))
return f
if __name__ == "__main__":
#pcWrap = PCWrap.setupPyCWrapper()
#pcWrap.initMathieson()
xPrecision = 1.0e-3
xLimit = 3.0
N = int(xLimit / xPrecision) + 1
x = np.linspace(0.0, xLimit, N)
dxVar = x[1:] - x[0:-1]
print("Verify sampling N, xPrecision, dxMin, dxMax", N, xPrecision, np.min(dxVar), np.max(dxVar))
dx = xPrecision
mat0 = mat.Mathieson( 0, 1.0 )
leftDerivate = 2.0 * mat0.curK4x * mat0.curSqrtK3x * mat0.curK2x * mat0.curInvPitch
print("leftDerivate", leftDerivate)
# leftDerivate = 2.77
y = mat0.computeAtanTanh( x)
tf = TabulatedChargeIntegration(x, y, dx, leftDerivate, 0.0)
"""
m = int( N/2 )
print("N", N, x.size )
print("x ", x[0], x[1], x[2], '...', x[m-1], x[m], x[m+1], "...", x[-3], x[-2], x[-1] )
print("\n")
print("maxErr", np.max(np.abs(f-y)) )
"""
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(10, 7))
# Spline at sampling points
f = tf.splineAtanTanh(x)
ax[0,0].plot( x, y)
ax[0,0].scatter( x, f, marker='x', color="red")
# , markersize=4)
ax[0,0].set_ylabel( "atan(tanh(x0)) and spline(x0) [in red]")
#
ax[0,1].scatter( x, f-y, marker='x')
ax[0,1].set_ylabel( "atan(tanh(x0)) - spline(x0)")
# Far away points
print("--------------------")
x1 = x + 0.0095
y1 = mat0.computeAtanTanh( x1)
f1 = tf.splineAtanTanh(x1)
print("y1", y1)
ax[1,0].scatter( x1, f1-y1, marker='x')
ax[1,0].set_ylabel( "atan(tanh(x1)) - spline(x1)")
print("--------------------")
# RND
xrnd = (np.random.ranf(20*N) * 2 - 1.0) * (xLimit + 1.0)
frnd = tf.splineAtanTanh(xrnd)
yrnd = mat0.computeAtanTanh(xrnd)
#
ax[1,1].scatter( xrnd, frnd-yrnd, marker='x')
ax[1,1].set_ylabel( "atan(tanh(rnd)) - spline(rnd)")
# relative error
# ax[1,1].scatter( x1[1:], (f1[1:]-y1[1:]) / y1[1:] )
#
print("maxErr f1", np.max(np.abs(f1-y1)) )
print( "convergence last point y, dy ", y1[-1], np.max(np.abs(f1[-1]-y1[-1])))
np.set_printoptions(precision=15)
print( "f(x) x=[0, ..,9]", mat0.computeAtanTanh( np.arange(10.0)) - 0.5 )
print("FIRST POINT")
tf.splineAtanTanh( np.array([0.0]) )
print("Function", mat0.computeAtanTanh( np.array([0.0])) )
print("Last POINT")
tf.splineAtanTanh( np.array([2.0]) )
print("Function", mat0.computeAtanTanh( np.array([2.0])) )
print("Outer POINT")
tf.splineAtanTanh( np.array([15.0]) )
print("Function", mat0.computeAtanTanh( np.array([15.0])) )
xx = np.arange(6.0)
print("xx", xx )
print("f(xx) - 0.5", mat0.computeAtanTanh( xx ) - 0.5)
plt.show()
|
grasseau/MCHClustering
|
src/PyTests/spline_t.py
|
spline_t.py
|
py
| 7,607 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3660394854
|
import sqlite3
conn=sqlite3.connect("Stationary_inventt.db")
c = conn.cursor()
print(" database successful")
#using drop table to avoid duplicate copy
c.execute("DROP TABLE IF EXISTS Stationery_stock")
c.execute("""
CREATE TABLE Stationery_stock(
ITEM_ID INTEGER,
ITEMS TEXT,
COST_PRICE INTEGER,
QUANTITY_IN_STOCK INTEGER
)
""")
print("table created successfully")
Avail_items = [
(1,"2b 60 leaves bks",600,10),
(2,"Staple pin",800,5),
(3,"Gum",1000,15),
(4,"Pencils",500,30),
(5,"A4 paper",5000,7),
(6,"Flexible Ruler",1500,22),
(7,"set square",4000,5),
(8,"Math set",2500,3),
(9,"Eraser",750,8),
(10,"Calculator",3000,10)
]
c.executemany("INSERT INTO Stationery_stock VALUES(?,?,?,?)",Avail_items)
#amount the business owner invested in the procurement of the items.
c.execute("SELECT SUM(COST_PRICE) FROM Stationery_stock" )
print(c.fetchall())
# average quantity of items in stock.
c.execute("SELECT AVG(QUANTITY_IN_STOCK) FROM Stationery_stock")
print(c.fetchall())
#item with the least quantity in stock
c.execute("SELECT ITEMS, MIN(QUANTITY_IN_STOCK) FROM Stationery_stock")
print(c.fetchall())
# item with the most quantity in stock
c.execute("SELECT ITEMS,MAX(QUANTITY_IN_STOCK) FROM Stationery_stock")
print(c.fetchall())
conn.commit()
conn.close()
|
debbytech22/module-5-solutions
|
Lesson_3_solution.py
|
Lesson_3_solution.py
|
py
| 1,271 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.