hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6059c932e152e3b9d227f8831d4c5f1446130057
| 7,924 |
py
|
Python
|
research/cv/AVA_cifar/src/knn_eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/AVA_cifar/src/knn_eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/AVA_cifar/src/knn_eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""knn evaluation"""
import math
import numpy as np
import mindspore.nn as nn
from mindspore.ops import operations as P
class KnnEval(nn.Metric):
"""
collect features for eval
"""
def __init__(self, batch_size, device_num, K=200, sigma=0.1, C=10, feature_dim=128, train_data_size=50000,
test_data_size=10000):
super(KnnEval, self).__init__()
self.sum = P.ReduceSum()
self.batch_size = batch_size
self.device_num = device_num
self.feature_dim = feature_dim
self.train_data_size = train_data_size
self.test_data_size = test_data_size
self.K = K
self.C = C
self.sigma = sigma
self.clear()
def clear(self):
"""clear parameters"""
self.train_features = np.zeros(
shape=(self.train_data_size, self.feature_dim), dtype=np.float32)
self.test_features = np.zeros(
shape=(self.test_data_size, self.feature_dim), dtype=np.float32)
self.train_labels = np.zeros(
shape=(self.train_data_size,), dtype=np.int32)
self.test_labels = np.zeros(
shape=(self.test_data_size,), dtype=np.int32)
self._total_num = 0
self._total_num_train = 0
self._total_num_test = 0
def update(self, *inputs):
"""update"""
feature = inputs[0].asnumpy()
label = inputs[1].asnumpy()
training = inputs[2].asnumpy()
batch_size = label.shape[0]
if training.sum() == batch_size:
self.train_features[self._total_num_train:self._total_num_train +
batch_size * self.device_num] = feature
self.train_labels[self._total_num_train:self._total_num_train +
batch_size * self.device_num] = label
self._total_num_train += batch_size * self.device_num
elif training.sum() == 0:
self.test_features[self._total_num_test:self._total_num_test +
batch_size * self.device_num] = feature
self.test_labels[self._total_num_test:self._total_num_test +
batch_size * self.device_num] = label
self._total_num_test += batch_size * self.device_num
else:
for i, flag in enumerate(training):
if flag == 1:
self.train_features[self._total_num_train] = feature[i]
self.train_labels[self._total_num_train] = label[i]
self._total_num_train += 1
elif flag == 0:
self.test_features[self._total_num_test] = feature[i]
self.test_labels[self._total_num_test] = label[i]
self._total_num_test += 1
self._total_num = self._total_num_train + self._total_num_test
def topk(self, matrix, K, axis=1):
"""
numpy version of torch.topk
"""
if axis == 0:
row_index = np.arange(matrix.shape[1 - axis], dtype=np.int32)
topk_index = np.argpartition(-matrix, K, axis=axis)[0:K, :]
topk_data = matrix[topk_index, row_index]
topk_index_sort = np.argsort(-topk_data, axis=axis)
topk_data_sort = topk_data[topk_index_sort, row_index]
topk_index_sort = topk_index[0:K, :][topk_index_sort, row_index]
else:
column_index = np.arange(
matrix.shape[1 - axis], dtype=np.int32)[:, None]
topk_index = np.argpartition(-matrix, K, axis=axis)[:, 0:K]
topk_data = matrix[column_index, topk_index]
topk_index_sort = np.argsort(-topk_data, axis=axis)
topk_data_sort = topk_data[column_index, topk_index_sort]
topk_index_sort = topk_index[:, 0:K][column_index, topk_index_sort]
return topk_data_sort, topk_index_sort
def gather(self, a, dim, index):
expanded_index = [
index if dim == i else np.arange(a.shape[i]).reshape([-1 if i == j else 1 for j in range(a.ndim)]) for i in
range(a.ndim)]
return a[expanded_index]
def scatter(self, a, dim, index, b):
expanded_index = [
index if dim == i else np.arange(a.shape[i]).reshape([-1 if i == j else 1 for j in range(a.ndim)]) for i in
range(a.ndim)]
a[expanded_index] = b
def eval(self):
"""compute acc"""
top1 = 0
top5 = 0
for batch_idx in range(math.ceil(len(self.test_labels) / self.batch_size)):
if batch_idx * self.batch_size > len(self.test_labels):
test_features = self.test_features[batch_idx *
self.batch_size:]
test_labels = self.test_labels[batch_idx * self.batch_size:]
else:
test_features = self.test_features[batch_idx *
self.batch_size:batch_idx * self.batch_size + self.batch_size]
test_labels = self.test_labels[batch_idx *
self.batch_size:batch_idx * self.batch_size + self.batch_size]
dist = np.dot(test_features, self.train_features.T)
yd, yi = self.topk(dist, K=self.K, axis=1)
candidates = self.train_labels.reshape(
1, -1).repeat(len(test_labels), 0) # correct
retrieval = self.gather(candidates, dim=1, index=yi)
retrieval = retrieval.astype(np.int32)
retrieval_one_hot = np.zeros([len(test_labels) * self.K, self.C])
self.scatter(retrieval_one_hot, 1, retrieval.reshape(-1, 1), 1)
yd_transform = np.exp(yd / self.sigma)
retrieval_one_hot = retrieval_one_hot.reshape(
[len(test_labels), -1, self.C])
yd_transform = yd_transform.reshape(len(test_labels), -1, 1)
probs = np.sum(retrieval_one_hot * yd_transform, 1)
predictions = np.argsort(-probs, 1)
correct = predictions == test_labels.reshape(-1, 1)
top1 += np.sum(correct[:, 0:1])
top5 += np.sum(correct[:, 0:5])
top1 = top1 / len(self.test_labels)
top5 = top5 / len(self.test_labels)
print("top1 acc:{}, top5 acc:{}".format(top1, top5))
return top1
class FeatureCollectCell(nn.Cell):
"""
get features from net
"""
def __init__(self, network):
super(FeatureCollectCell, self).__init__(auto_prefix=False)
self._network = network
self.shape = P.Shape()
self.sum = P.ReduceSum()
def construct(self, data, label, training):
output = self._network(data, data, data) # redundant input
feature = output[0]
return feature, label, training
class FeatureCollectCell310(nn.Cell):
"""
get features from net
"""
def __init__(self, network):
super(FeatureCollectCell310, self).__init__(auto_prefix=False)
self._network = network
self.shape = P.Shape()
self.sum = P.ReduceSum()
def construct(self, data):
output = self._network(data, data, data) # redundant input
feature = output[0]
return feature
| 40.635897 | 119 | 0.590989 |
e81a5c5949538fb31d79439b6f55a45002a91c7b
| 888 |
py
|
Python
|
Interview Preparation Kits/Interview Preparation Kit/Arrays/Array Manipulation/array_manipulation.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | 1 |
2021-02-22T17:37:45.000Z
|
2021-02-22T17:37:45.000Z
|
Interview Preparation Kits/Interview Preparation Kit/Arrays/Array Manipulation/array_manipulation.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
Interview Preparation Kits/Interview Preparation Kit/Arrays/Array Manipulation/array_manipulation.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the arrayManipulation function below.
def arrayManipulation(n, queries):
arr = [0] * (n+1)
for i in range(len(queries)):
a = queries[i][0]
b = queries[i][1]
k = queries[i][2]
arr[a] += k
if b+1 <= n:
arr[b+1] -= k
current, max_value = 0, 0
for i in range(n+1):
current += arr[i]
max_value = max(current, max_value)
return max_value
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nm = input().split()
n = int(nm[0])
m = int(nm[1])
queries = []
for _ in range(m):
queries.append(list(map(int, input().rstrip().split())))
result = arrayManipulation(n, queries)
fptr.write(str(result) + '\n')
fptr.close()
| 18.893617 | 64 | 0.533784 |
98f16bbdbe92ef3693ecbcfb405e8cc2d856e3f4
| 896 |
py
|
Python
|
elements/python/12/1/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | 11 |
2019-02-08T06:54:34.000Z
|
2021-08-07T18:57:39.000Z
|
elements/python/12/1/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | 1 |
2019-05-21T08:14:10.000Z
|
2019-05-21T08:14:10.000Z
|
elements/python/12/1/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | null | null | null |
def bsearch(A, pred):
"""
Assume the boolean function pred returns all False and then all True for
values in A. Return the index of the first True, or len(A) if that does
not exist.
"""
# invariant: last False lies in [l, r) and A[l] is False
if pred(A[0]):
return 0
l = 0
r = len(A)
while r-l > 1:
m = l + (r-l)//2
result = pred(A[m])
if result:
r = m
else:
l = m
return l+1
def first_occurrence(A, k):
i = bsearch(A, lambda a: a >= k)
if i >= len(A) or A[i] != k:
return None
return i
def test():
A = [-14, -10, 2, 108, 108, 243, 285, 285, 285, 401]
assert first_occurrence(A, 108) == 3
assert first_occurrence(A, 285) == 6
assert first_occurrence(A, 150) is None
print 'pass'
def main():
test()
if __name__ == '__main__':
main()
| 20.837209 | 76 | 0.530134 |
98fa25302361a60278a0e6a24c143acc35cf1124
| 119,945 |
py
|
Python
|
System_monitoringu/skrypt_sterujacy.py
|
Beanarny/Praca_inz
|
38f843af8deeb1f1be6c77b553cfdcc4ad2a7c00
|
[
"MIT"
] | null | null | null |
System_monitoringu/skrypt_sterujacy.py
|
Beanarny/Praca_inz
|
38f843af8deeb1f1be6c77b553cfdcc4ad2a7c00
|
[
"MIT"
] | null | null | null |
System_monitoringu/skrypt_sterujacy.py
|
Beanarny/Praca_inz
|
38f843af8deeb1f1be6c77b553cfdcc4ad2a7c00
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5 import QtWidgets
from PyQt5.uic import loadUi
from time import sleep
import csv
import mysql.connector
import serial
import datetime
import matplotlib.pyplot as plt
import hashlib
import numpy as np
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from pyfirmata import Arduino
result = None ## HASLO ZMIENIONE, NOWA BAZA !!! user / userpass
while result is None: # wykonuje sie bez konca, jezeli nie uda sie polaczyc, potrzebne do logowania, ale infinite loop
try:
# auth = input("Podaj haslo do bazy:\n") # przeniesc to do "maina", wykonanie przed poczatkiem programu
cnx = mysql.connector.connect(user = 'user', password = 'userpass', host = 'localhost', database = 'main_db')
result = cnx
# print("...Connection established...")
except:
# print("Connection failed")
pass
cursor = cnx.cursor(buffered=True)
########################################################################
def encrypt_string(hash_string):
sha_signature = \
hashlib.sha256(hash_string.encode()).hexdigest()
return sha_signature
########################################################################
####################### stworzenie pracownika admin/admin do logowania, jeżeli jeszcze nie istnieje, aby móc zalogować się po raz pierwszy
####################### potem hasło admina należy zmienić wchodząc w Edycje danych pracownika -> zmień hasło
cursor.execute("SELECT login FROM personel WHERE login LIKE \'admin\'")
myresult = cursor.fetchall()
try:
x = myresult[0]
# print("admin istnieje")
print("Logowanie...")
except:
try:
print("Pierwsze logowanie...")
haslo_admina = encrypt_string("admin")
cursor.execute("INSERT INTO personel (imie, nazwisko, plec, data_urodzenia, PESEL, data_zatrudnienia, login, zaszyfrowane_haslo, telefon, email, kod_pocztowy, miejscowosc, ulica)\
VALUES (\'admin\',\'admin\',\'Mezczyzna\',\'2020-08-08\',\'55667712345\',\'2020-08-08\',\'admin\',\'{encr_admin_pass}\',\'a\',\'a\',\'a',\'a\',\'a\')".format(encr_admin_pass=haslo_admina))
cnx.commit()
# print("Pomyslnie dodano testowego admina")
except Exception as e: print (e)
########################################################################
port = "COM3"
ser = serial.Serial(port, 9600) # open serial port that Arduino is using
ser.timeout=0.1
class Worker(QRunnable):
def __init__(self, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.args = args
self.kwargs = kwargs
@pyqtSlot()
def run(self):
'''
Initialise the runner function with passed args, kwargs.
'''
#############################################################
class main_window(QMainWindow): # MAIN WINDOW
def __init__(self, *args, **kwargs):
super(main_window, self).__init__(*args, **kwargs)
loadUi('gui_v4.ui', self)
self.setWindowTitle("System monitorowania ruchu pacjentow")
self.pushButtonObserve.clicked.connect(self.pushButtonObserveClicked) # zmienic hello na cos innego
self.pushButtonBegin.clicked.connect(self.pushButtonBeginClicked)
self.newPatientButton.clicked.connect(self.newPatientButtonClicked)
self.newUserButton.clicked.connect(self.newUserButtonClicked)
self.rangeSlider.setMinimum(10) # 10 sekund
self.rangeSlider.setMaximum(180) # 3 * 60 sekund
self.rangeSlider.setValue(60) # ustalona wartosc poczatkowa
self.rangeSlider.setTickInterval(18) # liczba "Tickow" nie do konca wydaje sie byc uzywana
self.rangeSlider.setTickPosition(QSlider.TicksBelow) # ustalenie, ze Ticki (kreski) maja byc ponizej slidera
self.rangeSlider.valueChanged.connect(self.v_change) # okreslenie akcji nastepujacej po przesunieciu slidera, w programie jest to zmiana wartosci odpowiedniego LineEdita
self.sliderValueLineEdit.setText("60") # ta wartosc ma odpowiadac rangeSlider.setValue(_)
self.showHistoryButton.clicked.connect(self.showHistoryButtonClicked)
self.showEventsButton.clicked.connect(self.showEventsButtonClicked)
self.pushButtonFilterHistoryPatient.clicked.connect(self.pushButtonFilterHistoryPatientClicked)
self.pushButtonFilterLivePatient.clicked.connect(self.pushButtonFilterLivePatientClicked)
self.editPatientButton.clicked.connect(self.editPatientButtonClicked)
self.editUserButton.clicked.connect(self.editUserButtonClicked)
self.newSensorButton.clicked.connect(self.newSensorButtonClicked)
self.editSensorButton.clicked.connect(self.editSensorButtonClicked)
self.assignSensorPushButton.clicked.connect(self.assignSensorPushButtonClicked)
self.sendMsgPushButton.clicked.connect(self.sendMsgPushButtonClicked)
self.pushButtonCleanEvents.clicked.connect(self.pushButtonCleanEventsClicked)
self.eventLineEdit.setPlaceholderText("np. upadek")
self.filterLiveLineEdit.setPlaceholderText("imię, nazwisko lub ID")
self.filterHistoryLineEdit.setPlaceholderText("imię, nazwisko lub ID")
self.threadpool = QThreadPool()
self.current_user = None
def pushButtonCleanEventsClicked(self):
worker = Worker()
self.threadpool.start(worker)
qm = QMessageBox
ret = qm.question(self,'', "Czy na pewno chcesz wyczyscić listę zdarzeń?\n\n*zdarzenia można później wczytać z bazy danych", qm.Yes | qm.No)
if ret == qm.Yes:
self.eventList.clear()
def sendMsgPushButtonClicked(self):
python_to_arduino_msg_win.show()
worker = Worker()
self.threadpool.start(worker)
def pushButtonBeginClicked(self):
# print("Rozpoczęto wczytywanie danych z monitora szeregowego...")
notification_win.label.setText("\nRozpoczęto monitoring.\n")
notification_win.show()
self.counter = 0
# -------------------------- dotyczy wykrywania --> UPADKU <-- pacjentow ---------------------------------------------
self.dict_id_to_alarmvalue = {}
###################### #log #rejestr #zdarzenie ########################################################################################
# # print("login: ",self.current_user)
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=self.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "rozpoczecie pomiaru", "")
cursor.execute(query, taxi)
cnx.commit()
self.eventList.insertItem(0, "rozpoczecie pomiaru, "+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
# stworzenie Dictionary (slownika) z ID_czujnika przypisanymi do okr. pacjentow i wart. alar. tych pacj.
cursor.execute("SELECT prz.ID_czujnika, pac.wartosc_alarmowa\
FROM przydzial_czujnikow prz\
JOIN pacjenci pac\
ON prz.ID_pacjenta = pac.ID_pacjenta;")
# print("...SELECT query succeeded...")
myresult = cursor.fetchall()
for x in myresult:
# print(x[0],x[1])
self.dict_id_to_alarmvalue[str(x[0])] = str(x[1])
# uzycie slownika: dict_id_to_alarmvalue[ID_czujnika] zwraca wartosc alarmowa
#---------------------------------------------------------------------------------------------------------------------
# ########################## dotyczy wykrywania --> BEZDECHU <-- pacjentow ###########################################
arr_5s = np.linspace(100.01,101.50,150) # stworzenie wektora 151 wartosci, 1. wart. to ID czujnika, pozostale 150 to ostatnie wart. pomiarow
self.df_sekw_bezdechu = 100*[arr_5s] # stworzenie df, gdzie kazdy numer wiersza oznacza ID czujnika, a wartosci w tym wierszu to kolejne pobrane pomiary
######################################################################################################################
def execute_single_import():
try:
temp = ser.readline().decode('utf-8')
temp=str(temp)
temp = temp.split()
# # print(temp)
query = ("INSERT INTO pomiary (ID_czujnika, modul, x_axis, y_axis, z_axis) VALUES (%s, %s, %s, %s, %s)")
taxi = (temp[0], temp[1], temp[2], temp[3], temp[4])
cursor.execute(query, taxi)
id_czujnika = temp[0]
mod = temp[1]
x_value = temp[2]
###################################### dopisanie pomiaru do listy i sprawdzenie czy nie ma bezdechu, czyli czy max-min<0,3 przez 5[s]
self.df_sekw_bezdechu[int(id_czujnika)] = np.roll(self.df_sekw_bezdechu[int(id_czujnika)],1) # przesuniecie listy pomiarow w prawo
self.df_sekw_bezdechu[int(id_czujnika)][0] = float(x_value)
np.set_# printoptions(precision=2)
np.set_# printoptions(suppress=True)
# print(self.df_sekw_bezdechu[int(id_czujnika)])
max_value = np.max(self.df_sekw_bezdechu[int(id_czujnika)])
min_value = np.min(self.df_sekw_bezdechu[int(id_czujnika)])
if (max_value-min_value)<0.03:
cursor.execute("SELECT pac.imie, pac.nazwisko\
FROM pacjenci pac\
JOIN przydzial_czujnikow prz ON pac.ID_pacjenta=prz.ID_pacjenta\
JOIN czujniki czu ON prz.ID_czujnika=czu.ID_czujnika\
WHERE czu.ID_czujnika={jakie_id};".format(jakie_id=temp[0]))
myresult = cursor.fetchall()
imie = myresult[0][0]
nazwisko = myresult[0][1]
notification_win.label.setText("\nPacjent {jakie_imie} {jakie_nazwisko} nie wykazuje aktywnosci. Podejrzenie bezdechu.\n".format(jakie_imie=imie,jakie_nazwisko=nazwisko))
notification_win.show()
self.df_sekw_bezdechu[int(id_czujnika)] = arr_5s # wypelnienie sekwencji nie-bezdechem, aby zapobiec "spamowi" komunikatow o bezdechu
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Bezdech - {jakie_imie} {jakie_nazwisko}".format(jakie_imie=imie,jakie_nazwisko=nazwisko), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Bezdech - {jakie_imie} {jakie_nazwisko}, ".format(jakie_imie=imie,jakie_nazwisko=nazwisko)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
###################################### sprawdzenie czy pacjent upadl
#jesli modul przekroczy wartosc alar. otrzymana po podaniu ID_czujnika do slownika przechowujacego wart. alarmowe
try:
# jezeli zmierzona wartosc modulu, czyli temp[1], jest wieksza niz wartosc alarmowa dla tego ID_czujnika, czyli slownik( temp[0] )
if (float(mod)>float(self.dict_id_to_alarmvalue[str(id_czujnika)])):
cursor.execute("SELECT pac.imie, pac.nazwisko\
FROM pacjenci pac\
JOIN przydzial_czujnikow prz ON pac.ID_pacjenta=prz.ID_pacjenta\
JOIN czujniki czu ON prz.ID_czujnika=czu.ID_czujnika\
WHERE czu.ID_czujnika={jakie_id};".format(jakie_id=temp[0]))
myresult = cursor.fetchall()
imie = myresult[0][0]
nazwisko = myresult[0][1]
# print("Pacjent X Y upadl.")
notification_win.label.setText("\nPacjent {jakie_imie} {jakie_nazwisko} upadl.\n".format(jakie_imie=imie,jakie_nazwisko=nazwisko))
notification_win.show()
# print("mod = "+str(float(temp[1]))+", dict_id_to_alarmvalue value = "+self.dict_id_to_alarmvalue [str(x[0])])
# print("taxi: ",taxi)
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Upadek - {jakie_imie} {jakie_nazwisko}".format(jakie_imie=imie,jakie_nazwisko=nazwisko), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Upadek - {jakie_imie} {jakie_nazwisko}, ".format(jakie_imie=imie,jakie_nazwisko=nazwisko)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
except:
pass
######################################
# if temp[1]>wartosc_graniczna_dla_danego_pacjenta
# # print("INSERT wykonany poprawnie")
self.counter = self.counter + 1
# # print("counter zwiekszony, counter = ", self.counter)
if ((self.counter%100)==0):
cnx.commit()
self.counter=0
# print("Zaimportowano 100 rekordow. Wykonano commit w bazie danych.")
except:
pass
self.timer = QTimer()
self.timer.setInterval(10)
self.timer.timeout.connect(lambda: execute_single_import())
self.timer.start()
###################################################### Wczytywanie pacjentow z bazy do Comboboxa Historii
def assignSensorPushButtonClicked(self):
assign_sensor_window.show()
worker = Worker()
self.threadpool.start(worker)
def editSensorButtonClicked(self):
edit_sensor_window.show()
worker = Worker()
self.threadpool.start(worker)
def newSensorButtonClicked(self):
new_sensor_window.show()
worker = Worker()
self.threadpool.start(worker)
def editPatientButtonClicked(self):
edit_patient_window.show()
worker = Worker()
self.threadpool.start(worker)
def editUserButtonClicked(self):
edit_user_window.show()
worker = Worker()
self.threadpool.start(worker)
def pushButtonFilterHistoryPatientClicked(self):
self.patientHistoryComboBox.clear()
worker = Worker()
self.threadpool.start(worker)
# print("Wybor pacjentow... ")
seekHist = self.filterHistoryLineEdit.text()
# print(seekHist)
try:
cursor.execute("SELECT imie, nazwisko FROM pacjenci WHERE imie LIKE BINARY \'%{seek}%\' OR nazwisko LIKE BINARY \'%{seek}%\' OR ID_pacjenta LIKE BINARY \'%{seek}%\'".format(seek=seekHist))
# usunac przedrostek BINARY, jezeli sie chce case_insensitive
# cursor.execute("SELECT imie, nazwisko FROM pacjenci")
# print("...SELECT query succeeded...")
# OK.... ale teraz jak w matplotlibie okreslic DATĘ jako os X, i x_axis jako os Y (x_axis to wartosci, os pionowa)
myresult = cursor.fetchall()
# # print("The length of \'myresult\' is: ", len(myresult)) # pokazuje ile rekordow ma zostac wykorzystanych na wykresie
pacjenci = []
for x in myresult:
pacjenci.append(str(x[0])+" "+str(x[1]))
self.patientHistoryComboBox.addItems(pacjenci)
###################################################################
except:
pass
# print("SELECT query failed")
def pushButtonFilterLivePatientClicked(self):
self.patientLiveComboBox.clear()
worker = Worker()
self.threadpool.start(worker)
# print("Wybor pacjentow... ")
#Connect with database
seekLive = self.filterLiveLineEdit.text()
# print(seekLive)
try:
cursor.execute("SELECT imie, nazwisko FROM pacjenci WHERE imie LIKE BINARY \'%{seek}%\' OR nazwisko LIKE BINARY \'%{seek}%\' OR ID_pacjenta LIKE BINARY \'%{seek}%\'".format(seek=seekLive))
# usunac przedrostek BINARY, jezeli sie chce case_insensitive
# print("...SELECT query succeeded...")
# OK.... ale teraz jak w matplotlibie okreslic DATĘ jako os X, i x_axis jako os Y (x_axis to wartosci, os pionowa)
myresult = cursor.fetchall()
# # print("The length of \'myresult\' is: ", len(myresult)) # pokazuje ile rekordow ma zostac wykorzystanych na wykresie
pacjenci = []
for x in myresult:
pacjenci.append(str(x[0])+" "+str(x[1]))
self.patientLiveComboBox.addItems(pacjenci)
###################################################################
except:
pass
# print("SELECT query failed")
def showEventsButtonClicked(self):
# print("Filter events button clicked...")
worker = Worker()
self.threadpool.start(worker)
self.eventList.clear()
timeFrom = QTime()
timeTo = QTime()
timeFrom = self.eventTimeFrom.time()
timeTo = self.eventTimeTo.time()
timeFromStr = timeFrom.toString() # odczytana godzina w formacie HH:MM:DD
timeToStr = timeTo.toString() # odczytana godzina w formacie HH:MM:DD
dateFrom = QDate()
dateTo = QDate()
dateFrom = self.eventDateFrom.date()
dateTo = self.eventDateTo.date()
dateFromStr = dateFrom.toString("yyyy-MM-dd") # odczytana data w formacie RRRR-MM-DD
dateToStr = dateTo.toString("yyyy-MM-dd") # odczytana data w formacie RRRR-MM-DD
dateTimeFrom = dateFromStr + " " + timeFromStr
dateTimeTo = dateToStr + " " + timeToStr
# dateTimeFrom i dateTimeTo sa uzywane w SELECTie historii, do okreslenia zakresu
# wybraniu zakresu, os X zawiera sekundy, poniewaz w przypadku daty na osi X, bylo wiele pomiarow w jednej sekundzie, wiele kropek w pionie i wykres byl totalnie nieczytalny.
###################################################### Odczytywanie czasu z widgetow ^^^^^^^^^^^^^^^
# print("Filtrowanie zdarzen...")
# print("dateTimeFrom = "+str(dateTimeFrom))
# print("dateTimeTo = "+str(dateTimeTo))
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! WAZNE
# teraz odczytac imie i nazwisko (A MOZE COS JESZCZE?...) i na podstawie tego zJOINOWAC ID_czujnika i na podstawie ID czujnika dodać to do WHERE historii i tak samo LIVE'a
seekEvent = self.eventLineEdit.text()
# # print("SELECT ID_pomiaru, x_axis FROM pomiary WHERE data_i_czas_pomiaru BETWEEN \'{data_i_czas_od}\' AND \'{data_i_czas_do}\' AND WHERE ID_czujnika==1 SELECT ID_czujnika FROM przydzial_czujnikow WHERE ID_czujnika".format(data_i_czas_od=dateTimeFrom,data_i_czas_do=dateTimeTo))
try:
cursor.execute("SELECT per.imie, rej.rodzaj_zdarzenia, rej.data_i_czas_zdarzenia\
FROM rejestr_zdarzen rej\
JOIN personel per\
ON rej.ID_pracownika=per.ID_pracownika\
WHERE rej.data_i_czas_zdarzenia BETWEEN \"{data_i_czas_od}\" AND \"{data_i_czas_do}\"\
AND rej.rodzaj_zdarzenia LIKE \'%{jakie_zdarzenia}%\'".format(data_i_czas_od=dateTimeFrom,data_i_czas_do=dateTimeTo,jakie_zdarzenia=seekEvent))
# print("...SELECT query succeeded...")
# OK.... ale teraz jak w matplotlibie okreslic DATĘ jako os X, i x_axis jako os Y (x_axis to wartosci, os pionowa)
myresult = cursor.fetchall()
# # print("The length of \'myresult\' is: ", len(myresult)) # pokazuje ile rekordow ma zostac wykorzystanych na wykresie
for x in myresult:
# # print(x[5].strftime('%Y-%m-%d %H:%M:%S'))
window.eventList.insertItem(0, str(x[0])+", "+str(x[1])+", "+str(x[2].strftime('%Y-%m-%d %H:%M:%S')))
notification_win.label.setText("Zakonczono importowanie zdarzeń.")
notification_win.show()
except:
pass
# print(e)
# print("SELECT query failed")
notification_win.label.setText("Niepowodzenie dodania zdarzen.")
notification_win.show()
def showHistoryButtonClicked(self):
# print("showHistoryButtonClicked")
worker = Worker()
self.threadpool.start(worker)
timeFrom = QTime()
timeTo = QTime()
timeFrom = self.historyFromTimeEdit.time()
timeTo = self.historyToTimeEdit.time()
timeFromStr = timeFrom.toString() # odczytana godzina w formacie HH:MM:DD
timeToStr = timeTo.toString() # odczytana godzina w formacie HH:MM:DD
dateFrom = QDate()
dateTo = QDate()
dateFrom = self.historyFromDateEdit.date()
dateTo = self.historyToDateEdit.date()
dateFromStr = dateFrom.toString("yyyy-MM-dd") # odczytana data w formacie RRRR-MM-DD
dateToStr = dateTo.toString("yyyy-MM-dd") # odczytana data w formacie RRRR-MM-DD
dateTimeFrom = dateFromStr + " " + timeFromStr
dateTimeTo = dateToStr + " " + timeToStr
# dateTimeFrom i dateTimeTo sa uzywane w SELECTie historii, do okreslenia zakresu
# wybraniu zakresu, os X zawiera sekundy, poniewaz w przypadku daty na osi X, bylo wiele pomiarow w jednej sekundzie, wiele kropek w pionie i wykres byl totalnie nieczytalny.
###################################################### Odczytywanie czasu z widgetow ^^^^^^^^^^^^^^^
# print("Drukowanie wykresu HISTORII wybranego pacjenta...")
# print("dateTimeFrom = "+str(dateTimeFrom))
# print("dateTimeTo = "+str(dateTimeTo))
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! WAZNE
# teraz odczytac imie i nazwisko (A MOZE COS JESZCZE?...) i na podstawie tego zJOINOWAC ID_czujnika i na podstawie ID czujnika dodać to do WHERE historii i tak samo LIVE'a
wybrany_pacjent = self.patientHistoryComboBox.currentText()
try:
wybrany_pacjent = wybrany_pacjent.split()
wybrane_imie = wybrany_pacjent[0]
wybrane_nazwisko = wybrany_pacjent[1]
except:
pass
# # print("SELECT ID_pomiaru, x_axis FROM pomiary WHERE data_i_czas_pomiaru BETWEEN \'{data_i_czas_od}\' AND \'{data_i_czas_do}\' AND WHERE ID_czujnika==1 SELECT ID_czujnika FROM przydzial_czujnikow WHERE ID_czujnika".format(data_i_czas_od=dateTimeFrom,data_i_czas_do=dateTimeTo))
try:
cursor.execute("SELECT ID_pomiaru, x_axis\
FROM pomiary pom\
INNER JOIN czujniki cz\
ON pom.ID_czujnika=cz.ID_czujnika\
INNER JOIN przydzial_czujnikow prz\
ON prz.ID_czujnika=cz.ID_czujnika\
INNER JOIN pacjenci pac\
ON prz.ID_pacjenta=pac.ID_pacjenta\
WHERE pac.imie LIKE \'{imie}\' AND pac.nazwisko LIKE \'{nazwisko}\'\
AND data_i_czas_pomiaru BETWEEN \"{data_i_czas_od}\" AND \"{data_i_czas_do}\"".format(imie=wybrane_imie,nazwisko=wybrane_nazwisko,data_i_czas_od=dateTimeFrom,data_i_czas_do=dateTimeTo))
# print("...SELECT query succeeded...")
# OK.... ale teraz jak w matplotlibie okreslic DATĘ jako os X, i x_axis jako os Y (x_axis to wartosci, os pionowa)
myresult = cursor.fetchall()
# # print("The length of \'myresult\' is: ", len(myresult)) # pokazuje ile rekordow ma zostac wykorzystanych na wykresie
array_x = []
array_y = []
for x in myresult:
array_x.append(float(x[0]))
array_y.append(float(x[1]))
###################################################################
fig = plt.figure(figsize=(18, 16), dpi= 80, facecolor='w', edgecolor='k')
ax = plt.subplot(111)
# NADAC ODPOWIEDNI LABEL ZALEZNIE OD IMIENIA PACJENTA # TODO #
line1, = ax.plot(np.arange(0,len(array_x)*0.03,0.03), array_y, label='Historia ruchu')
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
ax.legend(loc='upper right', bbox_to_anchor=(0.4, 1.0),
ncol=3, fancybox=True, shadow=True)
plt.xlabel("czas [s]")
plt.ylabel("amplituda [g]")
# plt.title("Wykres oddechu pięciu badanych osób")
plt.grid()
# ax.yaxis.set_ticks(np.arange(-0.1,0.5,0.05))
plt.show()
except:
# print("SELECT query failed")
notification_win.label.setText("Niepowodzenie wyswietlania wykresu. Nie wybrano pacjenta lub nie udało się połączyć z bazą danych. \n\nUpewnij się, czy kliknięto przycisk Filtruj.")
notification_win.show()
############################################################## Rysowanie wykresu z historii ^^^^^^^^^^^^^^ @ UP
def v_change(self):
value = str(self.rangeSlider.value())
self.sliderValueLineEdit.setText(value)
worker = Worker()
self.threadpool.start(worker)
def pushButtonObserveClicked(self): # funkcja testowa, usunac lub wymienic na inna
# print("Drukowanie wykresu...")
worker = Worker()
self.threadpool.start(worker)
# dodatkowo uzaleznic wyswietlane rekordy od wybranego pacjenta, imienia, MAC czujnika, ID pacjenta czy cokolwiek # TODO #
# na podstawie slidera okreslic zakres --> dac zamiast 10000 przeskalowana wartosc
if int(self.sliderValueLineEdit.text()) < 10 or int(self.sliderValueLineEdit.text()) > 3000:
self.sliderValueLineEdit.setText("60")
jaki_zakres = self.sliderValueLineEdit.text()
# print("Podany zakres czasu powinien zawierac sie w zakresie od 10 do 3000 sekund.")
wybrany_pacjent = self.patientLiveComboBox.currentText()
try:
wybrany_pacjent = wybrany_pacjent.split()
wybrane_imie = wybrany_pacjent[0]
wybrane_nazwisko = wybrany_pacjent[1]
except:
pass
try:
cursor.execute("SELECT ID_pomiaru,x_axis \
FROM pomiary pom \
INNER JOIN czujniki cz \
ON pom.ID_czujnika=cz.ID_czujnika \
INNER JOIN przydzial_czujnikow prz \
ON prz.ID_czujnika=cz.ID_czujnika \
INNER JOIN pacjenci pac \
ON prz.ID_pacjenta=pac.ID_pacjenta \
WHERE pac.imie LIKE \'{imie}\' \
AND pac.nazwisko LIKE \'{nazwisko}\' \
AND ID_pomiaru > ((SELECT MAX(ID_pomiaru) FROM pomiary)-(33*{sekundy}));".format(imie=wybrane_imie,nazwisko=wybrane_nazwisko,sekundy=jaki_zakres))
# print("...SELECT query succeeded...")
myresult = cursor.fetchall()
# # print("The length of \'myresult\' is: ", len(myresult)) # pokazuje ile rekordow ma zostac wykorzystanych na wykresie
array_x = []
array_y = []
for x in myresult:
array_x.append(float(x[0]))
array_y.append(float(x[1]))
###################################################################
fig = plt.figure(figsize=(18, 16), dpi= 80, facecolor='w', edgecolor='k')
ax = plt.subplot(111)
# NADAC ODPOWIEDNI LABEL ZALEZNIE OD IMIENIA PACJENTA # TODO #
line1, = ax.plot(np.arange(0,len(array_x)*0.03,0.03), array_y, label='{imie_i_nazwisko}'.format(imie_i_nazwisko=wybrane_imie+" "+wybrane_nazwisko))
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
ax.legend(loc='upper right', bbox_to_anchor=(0.4, 1.0),
ncol=3, fancybox=True, shadow=True)
plt.xlabel("czas [s]")
plt.ylabel("amplituda [g]")
# plt.title("Wykres oddechu pięciu badanych osób")
plt.grid()
# ax.yaxis.set_ticks(np.arange(-0.1,0.5,0.05))
plt.show()
self.currentPersonLabel.setText(self.patientLiveComboBox.currentText())
except:
# print("SELECT query failed")
self.currentPersonLabel.setText("---")
notification_win.label.setText("Niepowodzenie wyswietlania wykresu. Nie wybrano pacjenta lub nie udało się połączyć z bazą danych. \n\nUpewnij się, czy kliknięto przycisk Filtruj.")
notification_win.show()
#######################################################################################################################
######################################################################################## funkcje otwierajace nowe okna po kliknieciu przycisku w glownym GUI
def newPatientButtonClicked(self):
# print("Adding new patient...")
new_patient_window.show()
worker = Worker()
self.threadpool.start(worker)
def newUserButtonClicked(self):
# print("Adding new user...")
new_user_window.show()
worker = Worker()
self.threadpool.start(worker)
class new_patient(QMainWindow): #
def __init__(self):
QMainWindow.__init__(self)
loadUi('add_patient_gui.ui', self)
self.setWindowTitle("Dodawanie nowego pacjenta")
self.pushButtonAdd.clicked.connect(self.pushButtonAddClicked)
self.pushButtonAbort.clicked.connect(self.pushButtonAbortClicked)
self.birthDateLineEdit.setPlaceholderText("RRRR-MM-DD")
self.emailLineEdit.setPlaceholderText("[email protected]")
self.sexComboBox.addItem("Mężczyzna")
self.sexComboBox.addItem("Kobieta")
self.threadpool = QThreadPool()
def pushButtonAbortClicked(self):
new_patient_window.hide()
worker = Worker()
self.threadpool.start(worker)
def pushButtonAddClicked(self):
imie = self.nameLineEdit.text()
nazwisko = self.surnameLineEdit.text()
# plec = self.sexLineEdit.text()
plec = self.sexComboBox.currentText()
data_urodzenia = self.birthDateLineEdit.text()
PESEL = self.peselLineEdit.text()
telefon = self.phoneLineEdit.text()
email = self.emailLineEdit.text()
kod_pocztowy = self.cityCodeLineEdit.text()
miejscowosc = self.cityLineEdit.text()
ulica = self.streetLineEdit.text()
wartosc_alarmowa = self.alarmValueLineEdit.text()
worker = Worker()
self.threadpool.start(worker)
#Writing Query to insert data
query = ("INSERT INTO pacjenci (imie, nazwisko, plec, data_urodzenia, PESEL, telefon, email, kod_pocztowy, miejscowosc, ulica, wartosc_alarmowa) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)")
taxi = (imie, nazwisko, plec, data_urodzenia, PESEL ,telefon, email, kod_pocztowy, miejscowosc, ulica, wartosc_alarmowa) # zamiast jedynki mozna wrzucic zmienna pobraną z pola EditText (trzeba takie dodać) gdzie uzytkownik wpisze numer czujnika z palca LUB jego ID
try:
cursor.execute(query, taxi) #Execute the Query
cnx.commit()
# print("Dodano nowego pacjenta.")
# Czyszczenie wprowadzonego tekstu
self.nameLineEdit.setText("")
self.surnameLineEdit.setText("")
# self.sexLineEdit.setText("") # zmienić na combobox?, nie, comboboxa plci NIE TRZEBA czyscic !!!
self.sexComboBox.clear()
self.sexComboBox.addItem("Mężczyzna")
self.sexComboBox.addItem("Kobieta")
self.birthDateLineEdit.setText("")
self.peselLineEdit.setText("")
self.phoneLineEdit.setText("")
self.emailLineEdit.setText("")
self.cityCodeLineEdit.setText("")
self.cityLineEdit.setText("")
self.streetLineEdit.setText("")
self.alarmValueLineEdit.setText("")
notification_win.label.setText("Dodano nowego pacjenta.")
notification_win.show()
# TODO # zarejestrowac ta akcje w logach zdarzen
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Dodanie pacjenta {jakie_imie} {jakie_nazwisko}".format(jakie_imie=imie,jakie_nazwisko=nazwisko), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Dodanie pacjenta {jakie_imie} {jakie_nazwisko}, ".format(jakie_imie=imie,jakie_nazwisko=nazwisko)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
new_patient_window.hide()
except:
notification_win.label.setText("Niepoprawne dane. Zwróć uwagę, czy data urodzenia oraz email mają poprawny format.")
notification_win.show()
cnx.rollback()
class edit_patient(QMainWindow): #
def __init__(self):
QMainWindow.__init__(self)
loadUi('edit_patient_gui.ui', self)
self.setWindowTitle("Edycja danych pacjenta")
self.pushButtonSaveChanges.clicked.connect(self.pushButtonSaveChangesClicked)
self.pushButtonAbort.clicked.connect(self.pushButtonAbortClicked)
self.birthDateLineEdit.setPlaceholderText("RRRR-MM-DD")
self.emailLineEdit.setPlaceholderText("[email protected]")
self.pushButtonFilterEditPatient.clicked.connect(self.pushButtonFilterEditPatientClicked)
self.pushButtonLoadToEditPatient.clicked.connect(self.pushButtonLoadToEditPatientClicked)
self.pushButtonDeletePatient.clicked.connect(self.pushButtonDeletePatientClicked)
self.threadpool = QThreadPool()
def pushButtonFilterEditPatientClicked(self):
# Filtrowanie pacjentow
self.patientToEditComboBox.clear()
worker = Worker()
self.threadpool.start(worker)
# print("Wybor pacjentow... ")
seekToEdit = self.filterToEditLineEdit.text()
# print(seekToEdit)
try:
cursor.execute("SELECT imie, nazwisko FROM pacjenci WHERE imie LIKE BINARY \'%{seek}%\' OR nazwisko LIKE BINARY \'%{seek}%\' OR ID_pacjenta LIKE BINARY \'%{seek}%\'".format(seek=seekToEdit))
# usunac przedrostek BINARY, jezeli sie chce case_insensitive
# cursor.execute("SELECT imie, nazwisko FROM pacjenci")
# print("...SELECT query succeeded...")
# OK.... ale teraz jak w matplotlibie okreslic DATĘ jako os X, i x_axis jako os Y (x_axis to wartosci, os pionowa)
myresult = cursor.fetchall()
# # print("The length of \'myresult\' is: ", len(myresult)) # pokazuje ile rekordow ma zostac wykorzystanych na wykresie
pacjenci = []
for x in myresult:
pacjenci.append(str(x[0])+" "+str(x[1]))
self.patientToEditComboBox.addItems(pacjenci)
###################################################################
except:
pass
# print("SELECT query failed")
def pushButtonLoadToEditPatientClicked(self):
# print("Ladowanie danych pacjenta... ")
worker = Worker()
self.threadpool.start(worker)
# seekHist = self.filterToEditLineEdit.text()
# # print(seekHist)
wybrany_pacjent = self.patientToEditComboBox.currentText()
try:
wybrany_pacjent = wybrany_pacjent.split()
wybrane_imie = wybrany_pacjent[0]
wybrane_nazwisko = wybrany_pacjent[1]
except:
pass
try:
cursor.execute("SELECT imie, nazwisko, plec, data_urodzenia, PESEL, telefon, email, kod_pocztowy, miejscowosc, ulica, wartosc_alarmowa FROM pacjenci WHERE imie LIKE \'%{imie}%\' AND nazwisko LIKE \'%{nazwisko}%\'".format(imie=wybrane_imie, nazwisko=wybrane_nazwisko))
# usunac przedrostek BINARY, jezeli sie chce case_insensitive
# cursor.execute("SELECT imie, nazwisko FROM pacjenci")
# print("...SELECT query succeeded...")
# OK.... ale teraz jak w matplotlibie okreslic DATĘ jako os X, i x_axis jako os Y (x_axis to wartosci, os pionowa)
myresult = cursor.fetchall()
# # print("The length of \'myresult\' is: ", len(myresult)) # pokazuje ile rekordow ma zostac wykorzystanych na wykresie
# pacjenci = []
for x in myresult:
# pacjenci.append(str(x[0])+" "+str(x[1]))
self.nameLineEdit.setText(str(x[0]))
self.surnameLineEdit.setText(str(x[1]))
# self.sexLineEdit.setText(str(x[2]))
self.sexComboBox.clear()
self.sexComboBox.addItem(str(x[2]))
if self.sexComboBox.currentText()[0]=="M":
self.sexComboBox.addItem("Kobieta")
else:
self.sexComboBox.addItem("Mezczyzna")
self.birthDateLineEdit.setText(str(x[3]))
self.peselLineEdit.setText(str(x[4]))
self.phoneLineEdit.setText(str(x[5]))
self.emailLineEdit.setText(str(x[6]))
self.cityCodeLineEdit.setText(str(x[7]))
self.cityLineEdit.setText(str(x[8]))
self.streetLineEdit.setText(str(x[9]))
self.alarmValueLineEdit.setText(str(x[10]))
###################################################################
except:
pass
# print("SELECT query failed")
def pushButtonAbortClicked(self):
edit_patient_window.hide()
worker = Worker()
self.threadpool.start(worker)
def pushButtonSaveChangesClicked(self):
noweImie = self.nameLineEdit.text()
noweNazwisko = self.surnameLineEdit.text()
# nowaPlec = self.sexLineEdit.text()
nowaPlec = self.sexComboBox.currentText()
nowaData_urodzenia = self.birthDateLineEdit.text()
nowyPESEL = self.peselLineEdit.text()
nowyTelefon = self.phoneLineEdit.text()
nowyEmail = self.emailLineEdit.text()
nowyKod_pocztowy = self.cityCodeLineEdit.text()
nowaMiejscowosc = self.cityLineEdit.text()
nowaUlica = self.streetLineEdit.text()
nowaWartoscAlarmowa = self.alarmValueLineEdit.text()
worker = Worker()
self.threadpool.start(worker)
#Writing Query to insert data
# Przekazanie, ktora osoba ma zostac edytowana do buttona potwierdzajacego i wykonujacego UPDATE
# Pobranie tych danych z aktualnego ComboBoxa
wybrany_pacjent = self.patientToEditComboBox.currentText()
try:
wybrany_pacjent = wybrany_pacjent.split()
wybrane_imie = wybrany_pacjent[0]
wybrane_nazwisko = wybrany_pacjent[1]
except:
pass
query = ("UPDATE pacjenci SET imie=\'{imie2}\', nazwisko=\'{nazwisko2}\', plec=\'{plec2}\', data_urodzenia=\'{data_urodzenia2}\', PESEL=\'{PESEL2}\',\
telefon=\'{telefon2}\', email=\'{email2}\', kod_pocztowy=\'{kod_pocztowy2}\', miejscowosc=\'{miejscowosc2}\', ulica=\'{ulica2}\', wartosc_alarmowa=\'{wartosc_alarmowa2}\' WHERE imie LIKE\
\'{jakie_imie}\' AND nazwisko LIKE '\{jakie_nazwisko}\'".format(imie2=noweImie,nazwisko2=noweNazwisko,plec2=nowaPlec,\
data_urodzenia2=nowaData_urodzenia,PESEL2=nowyPESEL,telefon2=nowyTelefon,email2=nowyEmail,kod_pocztowy2=nowyKod_pocztowy,\
miejscowosc2=nowaMiejscowosc,ulica2=nowaUlica,wartosc_alarmowa2=nowaWartoscAlarmowa,jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko))
# taxi = (imie, nazwisko, plec, data_urodzenia, PESEL ,telefon, email, kod_pocztowy, miejscowosc, ulica) # zamiast jedynki mozna wrzucic zmienna pobraną z pola EditText (trzeba takie dodać) gdzie uzytkownik wpisze numer czujnika z palca LUB jego ID
try:
cursor.execute(query) #Execute the Query
cnx.commit()
# print("Zmieniono dane pacjenta.")
# Czyszczenie wprowadzonego tekstu
self.nameLineEdit.setText("")
self.surnameLineEdit.setText("")
# self.sexLineEdit.setText("")
self.sexComboBox.clear()
self.birthDateLineEdit.setText("")
self.peselLineEdit.setText("")
self.phoneLineEdit.setText("")
self.emailLineEdit.setText("")
self.cityCodeLineEdit.setText("")
self.cityLineEdit.setText("")
self.streetLineEdit.setText("")
self.alarmValueLineEdit.setText("")
notification_win.label.setText("Zmieniono dane pacjenta.")
notification_win.show()
# TODO # zarejestrowac ta akcje w logach zdarzen
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Zmiana danych pacjenta {jakie_imie} {jakie_nazwisko}".format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Zmiana danych pacjenta {jakie_imie} {jakie_nazwisko}, ".format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
except:
notification_win.label.setText("Niepoprawne dane. Zwróć uwagę, czy data urodzenia oraz email mają poprawny format.")
notification_win.show()
cnx.rollback()
def pushButtonDeletePatientClicked(self):
worker = Worker()
self.threadpool.start(worker)
#Writing Query to insert data
# Przekazanie, ktora osoba ma zostac edytowana do buttona potwierdzajacego i wykonujacego UPDATE
# Pobranie tych danych z aktualnego ComboBoxa
### Czekanie na potwierdzenie ...
# TODO pytanie o potwierdzenie skasowania pacjenta
# w zamysle po potwierdzeniu usuniecia w oknie delete_confirm_window powinno sie zamknac to okno i kontynuowac operacje ponizej czyli usuniecie pacjenta
confirmed = 1
# delete_confirm_window.show()
qm = QMessageBox
ret = qm.question(self,'', "Czy na pewno chcesz usunąć tego pacjenta?", qm.Yes | qm.No)
if ret == qm.Yes:
try:
wybrany_pacjent = self.patientToEditComboBox.currentText()
wybrany_pacjent = wybrany_pacjent.split()
wybrane_imie = wybrany_pacjent[0]
wybrane_nazwisko = wybrany_pacjent[1]
query = ("DELETE FROM pacjenci WHERE imie LIKE \'{jakie_imie}\' AND nazwisko LIKE '\{jakie_nazwisko}\'".\
format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko))
# taxi = (imie, nazwisko, plec, data_urodzenia, PESEL ,telefon, email, kod_pocztowy, miejscowosc, ulica) # zamiast jedynki mozna wrzucic zmienna pobraną z pola EditText (trzeba takie dodać) gdzie uzytkownik wpisze numer czujnika z palca LUB jego ID
cursor.execute(query) #Execute the Query
cnx.commit()
# print("Usunieto pacjenta {jakie_imie} {jakie_nazwisko}.".format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko))
# Czyszczenie wprowadzonego tekstu
self.nameLineEdit.setText("")
self.surnameLineEdit.setText("")
# self.sexLineEdit.setText("")
self.sexComboBox.clear()
self.birthDateLineEdit.setText("")
self.peselLineEdit.setText("")
self.phoneLineEdit.setText("")
self.emailLineEdit.setText("")
self.cityCodeLineEdit.setText("")
self.cityLineEdit.setText("")
self.streetLineEdit.setText("")
self.alarmValueLineEdit.setText("")
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Usuniecie pacjenta {jakie_imie} {jakie_nazwisko}".format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Usuniecie pacjenta {jakie_imie} {jakie_nazwisko}, ".format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
notification_win.label.setText("Usunieto pacjenta {jakie_imie} {jakie_nazwisko}.".format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko))
notification_win.show()
# TODO # zarejestrowac ta akcje w logach zdarzen
except:
notification_win.label.setText("Wystapil problem podczas usuwania pacjenta. Sprawdz czy pacjent zostal wybrany.")
notification_win.show()
cnx.rollback()
pass
class new_sensor(QMainWindow): #
def __init__(self):
QMainWindow.__init__(self)
loadUi('add_sensor_gui.ui', self)
self.setWindowTitle("Dodawanie nowego czujnika")
self.pushButtonAdd.clicked.connect(self.pushButtonAddClicked)
self.pushButtonAddDefaultID.clicked.connect(self.pushButtonAddDefaultIDClicked)
self.pushButtonAbort.clicked.connect(self.pushButtonAbortClicked)
self.macLineEdit.setPlaceholderText("AABBCCDDEEFF")
self.threadpool = QThreadPool()
def pushButtonAbortClicked(self):
new_patient_window.hide()
worker = Worker()
self.threadpool.start(worker)
def pushButtonAddDefaultIDClicked(self):
worker = Worker()
self.threadpool.start(worker)
mac_address = self.macLineEdit.text()
#Writing Query to insert data
query = ("INSERT INTO czujniki (MAC_czujnika) VALUES (\'{jaki_mac}\')".format(jaki_mac=mac_address))
# taxi = (mac_address) # zamiast jedynki mozna wrzucic zmienna pobraną z pola EditText (trzeba takie dodać) gdzie uzytkownik wpisze numer czujnika z palca LUB jego ID
try:
cursor.execute(query) #Execute the Query
cnx.commit()
# print("Dodano nowy czujnik.")
# Czyszczenie wprowadzonego tekstu
self.macLineEdit.setText("")
self.sensorIDLineEdit.setText("")
notification_win.label.setText("Dodano nowy czujnik.")
notification_win.show()
# TODO # zarejestrowac ta akcje w logach zdarzen
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Dodano czujnik, MAC: {jaki_mac}".format(jaki_mac=mac_address), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Dodano czujnik, MAC: {jaki_mac}, ".format(jaki_mac=mac_address)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
new_sensor_window.hide()
except:
notification_win.label.setText("Niepoprawne dane. Zwróć uwagę, czy data urodzenia oraz email mają poprawny format.")
notification_win.show()
# TODO zmienic komunikat, optymalnie wymusic znaki 0-9, A-F
cnx.rollback()
def pushButtonAddClicked(self):
worker = Worker()
self.threadpool.start(worker)
mac_address = self.macLineEdit.text()
sensor_id = self.sensorIDLineEdit.text()
#Writing Query to insert data
query = ("INSERT INTO czujniki (ID_czujnika, MAC_czujnika) VALUES (\'{jakie_id}\', \'{jaki_mac}\')".format(jakie_id = sensor_id,jaki_mac=mac_address))
# taxi = (mac_address) # zamiast jedynki mozna wrzucic zmienna pobraną z pola EditText (trzeba takie dodać) gdzie uzytkownik wpisze numer czujnika z palca LUB jego ID
try:
cursor.execute(query) #Execute the Query
cnx.commit()
# print("Dodano nowy czujnik.")
# Czyszczenie wprowadzonego tekstu
self.macLineEdit.setText("")
self.sensorIDLineEdit.setText("")
notification_win.label.setText("Dodano nowy czujnik.")
notification_win.show()
# TODO # zarejestrowac ta akcje w logach zdarzen
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Dodano czujnik, MAC: {jaki_mac}".format(jaki_mac=mac_address), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Dodano czujnik, MAC: {jaki_mac}, ".format(jaki_mac=mac_address)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
new_sensor_window.hide()
except:
notification_win.label.setText("Nie udało się dodać czujnika.\nPodane ID czujnika może już istnieć w bazie danych.")
notification_win.show()
# TODO zmienic komunikat, optymalnie wymusic znaki 0-9, A-F
cnx.rollback()
class edit_sensor(QMainWindow): #
def __init__(self):
QMainWindow.__init__(self)
loadUi('edit_sensor_gui.ui', self)
self.setWindowTitle("Edytowanie danych czujnika")
self.pushButtonSaveChanges.clicked.connect(self.pushButtonSaveChangesClicked)
self.pushButtonAbort.clicked.connect(self.pushButtonAbortClicked)
self.macLineEdit.setPlaceholderText("AABBCCDDEEFF")
self.pushButtonFilter.clicked.connect(self.pushButtonFilterClicked)
self.pushButtonLoad.clicked.connect(self.pushButtonLoadClicked)
self.pushButtonDelete.clicked.connect(self.pushButtonDeleteClicked)
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
self.pushButtonDelete.setSizePolicy(sizePolicy)
self.threadpool = QThreadPool()
self.previous_mac = "Default mac string"
def pushButtonFilterClicked(self):
worker = Worker()
self.threadpool.start(worker)
# Filtrowanie pacjentow
self.chooseToEditComboBox.clear()
# print("Wybor czujnika... ")
seekToEdit = self.filterToEditLineEdit.text()
# print(seekToEdit)
try:
cursor.execute("SELECT cz.ID_czujnika, cz.MAC_czujnika, IFNULL(pac.imie,'-'), IFNULL(pac.nazwisko,'-')\
FROM czujniki cz\
LEFT JOIN przydzial_czujnikow prz\
ON cz.ID_czujnika=prz.ID_czujnika\
LEFT JOIN pacjenci pac\
ON prz.ID_pacjenta=pac.ID_pacjenta\
WHERE cz.ID_czujnika LIKE \'%{seek}%\' OR cz.MAC_czujnika LIKE \'%{seek}%\'\
OR pac.imie LIKE \'%{seek}%\' OR pac.nazwisko LIKE \'%{seek}%\'".format(seek=seekToEdit))
# LEFT JOIN ma na celu pokazanie rowniez czujnikow nie przypisanych do zadnego pacjenta
# wyswietlanie imienia i nazwiska obok ID oraz MAC ma na celu podpowiedzenie uzytkownikowi, kogo dotyczy wybrany czujnik, czy jest "wolny"
# usunac przedrostek BINARY, jezeli sie chce case_insensitive
# cursor.execute("SELECT imie, nazwisko FROM pacjenci")
# print("...SELECT query succeeded...")
# OK.... ale teraz jak w matplotlibie okreslic DATĘ jako os X, i x_axis jako os Y (x_axis to wartosci, os pionowa)
myresult = cursor.fetchall()
# # print("The length of \'myresult\' is: ", len(myresult)) # pokazuje ile rekordow ma zostac wykorzystanych na wykresie
czujniki = []
for x in myresult:
czujniki.append(str(x[0])+" "+str(x[1])+" "+str(x[2])+" "+str(x[3]))
self.chooseToEditComboBox.addItems(czujniki)
except:
pass
# print("SELECT query failed")
def pushButtonLoadClicked(self):
worker = Worker()
self.threadpool.start(worker)
# print("Ladowanie danych czujnika... ")
# seekHist = self.filterToEditLineEdit.text()
# # print(seekHist)
wybrany_czujnik = self.chooseToEditComboBox.currentText()
try:
wybrany_czujnik = wybrany_czujnik.split()
wybrane_id = wybrany_czujnik[0]
except:
pass
try:
cursor.execute("SELECT ID_czujnika, MAC_czujnika FROM czujniki WHERE ID_czujnika={jakie_id}".format(jakie_id=wybrane_id))
# usunac przedrostek BINARY, jezeli sie chce case_insensitive
# cursor.execute("SELECT imie, nazwisko FROM pacjenci")
# print("...SELECT query succeeded...")
# OK.... ale teraz jak w matplotlibie okreslic DATĘ jako os X, i x_axis jako os Y (x_axis to wartosci, os pionowa)
myresult = cursor.fetchall()
# # print("The length of \'myresult\' is: ", len(myresult)) # pokazuje ile rekordow ma zostac wykorzystanych na wykresie
# pacjenci = []
for x in myresult:
# pacjenci.append(str(x[0])+" "+str(x[1]))
self.idLineEdit.setText(str(x[0]))
self.macLineEdit.setText(str(x[1]))
###################################################################
except:
pass
# print("SELECT query failed")
self.previous_mac = self.macLineEdit.text()
def pushButtonAbortClicked(self):
edit_patient_window.hide()
worker = Worker()
self.threadpool.start(worker)
def pushButtonSaveChangesClicked(self):
worker = Worker()
self.threadpool.start(worker)
noweID = self.idLineEdit.text()
nowyMAC = self.macLineEdit.text()
#Writing Query to insert data
# Przekazanie, ktora osoba ma zostac edytowana do buttona potwierdzajacego i wykonujacego UPDATE
# Pobranie tych danych z aktualnego ComboBoxa
wybrany_czujnik = self.chooseToEditComboBox.currentText()
try:
wybrany_czujnik = wybrany_czujnik.split()
wybrane_id = wybrany_czujnik[0]
except:
pass
query = ("UPDATE czujniki SET ID_czujnika={ID_czujnika2}, MAC_czujnika=\'{MAC_czujnika2}\' WHERE ID_czujnika={jakie_id}"\
.format(ID_czujnika2=noweID,MAC_czujnika2=nowyMAC,jakie_id=wybrane_id))
# taxi = (imie, nazwisko, plec, data_urodzenia, PESEL ,telefon, email, kod_pocztowy, miejscowosc, ulica) # zamiast jedynki mozna wrzucic zmienna pobraną z pola EditText (trzeba takie dodać) gdzie uzytkownik wpisze numer czujnika z palca LUB jego ID
try:
cursor.execute(query) #Execute the Query
cnx.commit()
# print("Zmieniono dane czujnika.")
# Czyszczenie wprowadzonego tekstu
self.idLineEdit.setText("")
self.macLineEdit.setText("")
self.pushButtonFilterClicked()
notification_win.label.setText("Zmieniono dane czujnika.")
notification_win.show()
# TODO # zarejestrowac ta akcje w logach zdarzen
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Zmiana MAC czujnika z {stary_mac} na {jaki_mac}".format(stary_mac=self.previous_mac, jaki_mac=nowyMAC), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Zmiana MAC czujnika z {stary_mac} na {jaki_mac}, ".format(stary_mac=self.previous_mac, jaki_mac=nowyMAC)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
except:
notification_win.label.setText("Niepoprawne dane. Zwróć uwagę, czy data urodzenia oraz email mają poprawny format.")
notification_win.show()
cnx.rollback()
def pushButtonDeleteClicked(self):
worker = Worker()
self.threadpool.start(worker)
#Writing Query to insert data
# Przekazanie, ktora osoba ma zostac edytowana do buttona potwierdzajacego i wykonujacego UPDATE
# Pobranie tych danych z aktualnego ComboBoxa
### Czekanie na potwierdzenie ...
# TODO pytanie o potwierdzenie skasowania pacjenta
# w zamysle po potwierdzeniu usuniecia w oknie delete_confirm_window powinno sie zamknac to okno i kontynuowac operacje ponizej czyli usuniecie pacjenta
confirmed = 1
# delete_confirm_window.show()
qm = QMessageBox
ret = qm.question(self,'', "Czy na pewno chcesz usunąć ten czujnik?", qm.Yes | qm.No)
if ret == qm.Yes:
try:
wybrany_czujnik = self.chooseToEditComboBox.currentText()
wybrany_czujnik = wybrany_czujnik.split()
wybrane_id = wybrany_czujnik[0]
mac_usuwanego_czujnika = wybrany_czujnik[1]
query = ("DELETE FROM czujniki WHERE ID_czujnika={jakie_id}".\
format(jakie_id=int(wybrane_id)))
# taxi = (imie, nazwisko, plec, data_urodzenia, PESEL ,telefon, email, kod_pocztowy, miejscowosc, ulica) # zamiast jedynki mozna wrzucic zmienna pobraną z pola EditText (trzeba takie dodać) gdzie uzytkownik wpisze numer czujnika z palca LUB jego ID
cursor.execute(query) #Execute the Query
cnx.commit()
# print("Usunieto czujnik z bazy, MAC: {jaki_mac}.".format(jaki_mac=mac_usuwanego_czujnika))
# Czyszczenie wprowadzonego tekstu
self.idLineEdit.setText("")
self.macLineEdit.setText("")
self.pushButtonFilterClicked()
notification_win.label.setText("Usunieto czujnik z bazy danych. MAC: {jaki_mac}.".format(jaki_mac=mac_usuwanego_czujnika))
notification_win.show()
# TODO # zarejestrowac ta akcje w logach zdarzen
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Usunieto czujnik, MAC: {jaki_mac}".format(jaki_mac=mac_usuwanego_czujnika), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Usunieto czujnik, MAC: {jaki_mac}, ".format(jaki_mac=mac_usuwanego_czujnika)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
except:
notification_win.label.setText("Wystapil problem podczas usuwania czujnika. Sprawdz czy pacjent zostal wybrany.")
notification_win.show()
cnx.rollback()
class assign_sensor(QMainWindow): #
def __init__(self):
QMainWindow.__init__(self)
loadUi('assign_sensor_gui.ui', self)
self.setWindowTitle("Zmiana przypisania czujnikow")
self.pushButtonAbort.clicked.connect(self.pushButtonAbortClicked)
self.pushButtonFilter.clicked.connect(self.pushButtonFilterClicked)
self.pushButtonAssign.clicked.connect(self.pushButtonAssignClicked)
self.pushButtonFilterEditPatient.clicked.connect(self.pushButtonFilterEditPatientClicked)
self.pushButtonDelete.clicked.connect(self.pushButtonDeleteClicked)
#-------------------------------------------------------------------------- nie dokonczone skalowanie okna
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
self.pushButtonDelete.setSizePolicy(sizePolicy)
self.threadpool = QThreadPool()
self.MAC_assigned = "Replace with ID..."
self.assigned_to_name = "Replace with name"
self.assigned_to_surname = "Replace with surname"
def pushButtonFilterClicked(self):
# Filtrowanie pacjentow
self.chooseToEditComboBox.clear()
# print("Wybor czujnika... ")
worker = Worker()
self.threadpool.start(worker)
seekToEdit = self.filterToEditLineEdit.text()
# print(seekToEdit)
try:
cursor.execute("SELECT cz.ID_czujnika, cz.MAC_czujnika, IFNULL(pac.imie,'-'), IFNULL(pac.nazwisko,'-')\
FROM czujniki cz\
LEFT JOIN przydzial_czujnikow prz\
ON cz.ID_czujnika=prz.ID_czujnika\
LEFT JOIN pacjenci pac\
ON prz.ID_pacjenta=pac.ID_pacjenta\
WHERE cz.ID_czujnika LIKE BINARY \'%{seek}%\' OR cz.MAC_czujnika LIKE BINARY \'%{seek}%\'\
OR pac.imie LIKE BINARY \'%{seek}%\' OR pac.nazwisko LIKE BINARY \'%{seek}%\'".format(seek=seekToEdit))
# LEFT JOIN ma na celu pokazanie rowniez czujnikow nie przypisanych do zadnego pacjenta
# wyswietlanie imienia i nazwiska obok ID oraz MAC ma na celu podpowiedzenie uzytkownikowi, kogo dotyczy wybrany czujnik, czy jest "wolny"
# usunac przedrostek BINARY, jezeli sie chce case_insensitive
# cursor.execute("SELECT imie, nazwisko FROM pacjenci")
# print("...SELECT query succeeded...")
# OK.... ale teraz jak w matplotlibie okreslic DATĘ jako os X, i x_axis jako os Y (x_axis to wartosci, os pionowa)
myresult = cursor.fetchall()
# # print("The length of \'myresult\' is: ", len(myresult)) # pokazuje ile rekordow ma zostac wykorzystanych na wykresie
czujniki = []
for x in myresult:
czujniki.append(str(x[0])+" "+str(x[1])+" "+str(x[2])+" "+str(x[3]))
self.chooseToEditComboBox.addItems(czujniki)
except:
pass
# print("SELECT query failed")
def pushButtonFilterEditPatientClicked(self):
# Filtrowanie pacjentow
self.patientToEditComboBox.clear()
worker = Worker()
self.threadpool.start(worker)
# print("Wybor pacjentow... ")
seekToEdit = self.filterPatientLineEdit.text()
# print(seekToEdit)
try:
cursor.execute("SELECT ID_pacjenta, imie, nazwisko FROM pacjenci WHERE imie LIKE BINARY \'%{seek}%\' OR nazwisko LIKE BINARY \'%{seek}%\' OR ID_pacjenta LIKE BINARY \'%{seek}%\'".format(seek=seekToEdit))
# usunac przedrostek BINARY, jezeli sie chce case_insensitive
# cursor.execute("SELECT imie, nazwisko FROM pacjenci")
# print("...SELECT query succeeded...")
# OK.... ale teraz jak w matplotlibie okreslic DATĘ jako os X, i x_axis jako os Y (x_axis to wartosci, os pionowa)
myresult = cursor.fetchall()
# # print("The length of \'myresult\' is: ", len(myresult)) # pokazuje ile rekordow ma zostac wykorzystanych na wykresie
pacjenci = []
for x in myresult:
pacjenci.append(str(x[0])+" "+str(x[1])+" "+str(x[2]))
self.patientToEditComboBox.addItems(pacjenci)
###################################################################
except:
pass
def pushButtonAbortClicked(self):
assign_sensor_window.hide()
worker = Worker()
self.threadpool.start(worker)
def pushButtonAssignClicked(self):
worker = Worker()
self.threadpool.start(worker)
#Writing Query to insert data
# Przekazanie, ktora osoba ma zostac edytowana do buttona potwierdzajacego i wykonujacego UPDATE
# Pobranie tych danych z aktualnego ComboBoxa
wybrany_czujnik = self.chooseToEditComboBox.currentText()
wybrany_pacjent = self.patientToEditComboBox.currentText()
# do ustawienia comboboxow na okreslonych elementach, po wykonaniu zmian
id_of_assigned = self.chooseToEditComboBox.currentIndex()
id_of_chosen_patient = self.patientToEditComboBox.currentIndex()
try:
wybrany_czujnik = wybrany_czujnik.split()
wybrane_id = wybrany_czujnik[0]
MAC_assigned = wybrany_czujnik[1]
wybrany_pacjent = wybrany_pacjent.split()
wybrane_id_pacjenta = wybrany_pacjent[0]
wybrane_imie = wybrany_pacjent[1]
wybrane_nazwisko = wybrany_pacjent[2]
# print("Udalo sie odczytac dane z ComboBoxow")
except:
pass
query = ("INSERT INTO przydzial_czujnikow (ID_pacjenta,ID_czujnika,status) VALUES ({ID_pacjenta_2},{ID_czujnika_2},'default')"\
.format(ID_pacjenta_2=wybrane_id_pacjenta,ID_czujnika_2=wybrane_id))
# print("query: "+query)
# taxi = (imie, nazwisko, plec, data_urodzenia, PESEL ,telefon, email, kod_pocztowy, miejscowosc, ulica) # zamiast jedynki mozna wrzucic zmienna pobraną z pola EditText (trzeba takie dodać) gdzie uzytkownik wpisze numer czujnika z palca LUB jego ID
try:
cursor.execute(query) #Execute the Query
cnx.commit()
# print("Dodano nowe przypisanie.")
# Czyszczenie wprowadzonego tekstu
self.filterToEditLineEdit.setText("")
self.filterPatientLineEdit.setText("")
self.pushButtonFilterClicked()
self.chooseToEditComboBox.setCurrentIndex(id_of_assigned)
self.pushButtonFilterEditPatientClicked()
self.patientToEditComboBox.setCurrentIndex(id_of_chosen_patient)
notification_win.label.setText("Dodano nowe przypisanie.")
notification_win.show()
# TODO # zarejestrowac ta akcje w logach zdarzen
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Przypisano czujnik, MAC: {jaki_mac} pacjentowi {jakie_imie} {jakie_nazwisko}, ".format(jaki_mac=MAC_assigned,jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Przypisano czujnik, MAC: {jaki_mac} pacjentowi {jakie_imie} {jakie_nazwisko}, ".format(jaki_mac=MAC_assigned,jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
except:
notification_win.label.setText("Nie udalo się dodać przypisania.\nWybrany czujnik może już być przypisany do innego pacjenta.\n\nUsuń przypisanie i spróbuj ponownie.")
notification_win.show()
cnx.rollback()
pass
def pushButtonDeleteClicked(self):
worker = Worker()
self.threadpool.start(worker)
#Writing Query to insert data
# Przekazanie, ktora osoba ma zostac edytowana do buttona potwierdzajacego i wykonujacego UPDATE
# Pobranie tych danych z aktualnego ComboBoxa
### Czekanie na potwierdzenie ...
# TODO pytanie o potwierdzenie skasowania pacjenta
# w zamysle po potwierdzeniu usuniecia w oknie delete_confirm_window powinno sie zamknac to okno i kontynuowac operacje ponizej czyli usuniecie pacjenta
confirmed = 1
# delete_confirm_window.show()
qm = QMessageBox
ret = qm.question(self,'', "Czy na pewno chcesz usunąć to przypisanie?", qm.Yes | qm.No)
if ret == qm.Yes:
try:
wybrany_czujnik = self.chooseToEditComboBox.currentText()
wybrany_czujnik = wybrany_czujnik.split()
wybrane_id = wybrany_czujnik[0]
id_of_deleted = self.chooseToEditComboBox.currentIndex()
id_of_chosen_patient = self.patientToEditComboBox.currentIndex()
query = ("DELETE FROM przydzial_czujnikow WHERE ID_czujnika={jakie_id}".\
format(jakie_id=int(wybrane_id)))
# taxi = (imie, nazwisko, plec, data_urodzenia, PESEL ,telefon, email, kod_pocztowy, miejscowosc, ulica) # zamiast jedynki mozna wrzucic zmienna pobraną z pola EditText (trzeba takie dodać) gdzie uzytkownik wpisze numer czujnika z palca LUB jego ID
cursor.execute(query) #Execute the Query
cnx.commit()
# print("Usunieto czujnik z bazy.")
######################################### potrzebne do rejestru zdarzen
wybrany_czujnik = self.chooseToEditComboBox.currentText()
wybrany_pacjent = self.patientToEditComboBox.currentText()
try:
wybrany_czujnik = wybrany_czujnik.split()
wybrane_id = wybrany_czujnik[0]
MAC_assigned = wybrany_czujnik[1]
wybrany_pacjent = wybrany_pacjent.split()
wybrane_id_pacjenta = wybrany_pacjent[0]
wybrane_imie = wybrany_pacjent[1]
wybrane_nazwisko = wybrany_pacjent[2]
except:
pass
#########################################
# Czyszczenie wprowadzonego tekstu
self.filterToEditLineEdit.setText("")
self.filterPatientLineEdit.setText("")
self.pushButtonFilterClicked()
self.chooseToEditComboBox.setCurrentIndex(id_of_deleted)
self.pushButtonFilterEditPatientClicked()
self.patientToEditComboBox.setCurrentIndex(id_of_chosen_patient)
notification_win.label.setText("Usunieto przypisanie z bazy.")
notification_win.show()
# TODO # zarejestrowac ta akcje w logach zdarzen
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Usunieto przypisanie czujnika, MAC: {jaki_mac} , pacjent: {jakie_imie} {jakie_nazwisko}, ".format(jaki_mac=MAC_assigned,jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Usunieto przypisanie czujnika, MAC: {jaki_mac} , pacjent: {jakie_imie} {jakie_nazwisko}, ".format(jaki_mac=MAC_assigned,jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
except:
notification_win.label.setText("Wystapil problem podczas usuwania przypisania. Sprawdz czy pacjent zostal wybrany.")
notification_win.show()
cnx.rollback()
class new_user(QMainWindow): #
def __init__(self):
QMainWindow.__init__(self)
loadUi('add_user_gui.ui', self)
self.setWindowTitle("Dodawanie nowego pracownika")
self.pushButtonAdd.clicked.connect(self.pushButtonAddClicked)
self.pushButtonAbort.clicked.connect(self.pushButtonAbortClicked)
self.birthDateLineEdit.setPlaceholderText("RRRR-MM-DD")
self.hireDateLineEdit.setPlaceholderText("RRRR-MM-DD")
self.emailLineEdit.setPlaceholderText("[email protected]")
self.passwordLineEdit.setEchoMode(QtWidgets.QLineEdit.Password)
self.sexComboBox.addItem("Mężczyzna")
self.sexComboBox.addItem("Kobieta")
# TODO # wymagac od loginu minimum 5 znakow, od hasla optymalnie 8+ znakow i A-Z, a-z, 0-9
self.threadpool = QThreadPool()
def pushButtonAbortClicked(self):
new_patient_window.hide()
worker = Worker()
self.threadpool.start(worker)
def pushButtonAddClicked(self):
imie = self.nameLineEdit.text()
nazwisko = self.surnameLineEdit.text()
# plec = self.sexLineEdit.text()
plec = self.sexComboBox.currentText()
data_urodzenia = self.birthDateLineEdit.text()
PESEL = self.peselLineEdit.text()
data_zatrudnienia = self.hireDateLineEdit.text()
login = self.loginLineEdit.text()
zaszyfrowane_haslo = encrypt_string(self.passwordLineEdit.text()) # zamiana hasla jawnego na hash
# print(zaszyfrowane_haslo) # TODO # mozna skasowac, wyswietlenie kontrolne
telefon = self.nameLineEdit.text()
email = self.emailLineEdit.text()
kod_pocztowy = self.cityCodeLineEdit.text()
miejscowosc = self.cityLineEdit.text()
ulica = self.streetLineEdit.text()
worker = Worker()
self.threadpool.start(worker)
#Writing Query to insert data
query = ("INSERT INTO personel (imie, nazwisko, plec, data_urodzenia, PESEL, data_zatrudnienia, login, zaszyfrowane_haslo, telefon, email, kod_pocztowy, miejscowosc, ulica) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)")
taxi = (imie, nazwisko, plec, data_urodzenia, PESEL , data_zatrudnienia, login, zaszyfrowane_haslo, telefon, email, kod_pocztowy, miejscowosc, ulica) # zamiast jedynki mozna wrzucic zmienna pobraną z pola EditText (trzeba takie dodać) gdzie uzytkownik wpisze numer czujnika z palca LUB jego ID
try:
cursor.execute(query, taxi) #Execute the Query
cnx.commit()
# print("Dodano nowego pracownika.")
self.nameLineEdit.setText("")
self.surnameLineEdit.setText("")
# self.sexLineEdit.setText("")
self.sexComboBox.clear()
self.sexComboBox.addItem("Mężczyzna")
self.sexComboBox.addItem("Kobieta")
self.birthDateLineEdit.setText("")
self.peselLineEdit.setText("")
self.hireDateLineEdit.setText("")
self.loginLineEdit.setText("")
self.passwordLineEdit.setText("")
self.phoneLineEdit.setText("")
self.emailLineEdit.setText("")
self.cityCodeLineEdit.setText("")
self.cityLineEdit.setText("")
self.streetLineEdit.setText("")
notification_win.label.setText("Dodano nowego pracownika.")
notification_win.show()
# TODO # zarejestrowac ta akcje w logach zdarzen
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Dodano pracownika {jakie_imie} {jakie_nazwisko}, ".format(jakie_imie=imie,jakie_nazwisko=nazwisko), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Dodano pracownika {jakie_imie} {jakie_nazwisko}, ".format(jakie_imie=imie,jakie_nazwisko=nazwisko)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
new_user_window.hide()
except:
# print("Niepoprawne dane. Zwróć uwagę, czy data urodzenia oraz email mają poprawny format.")
cnx.rollback()
class edit_user(QMainWindow): #
def __init__(self):
QMainWindow.__init__(self)
loadUi('edit_user_gui.ui', self)
self.setWindowTitle("Edycja danych pracownika")
self.pushButtonSaveChanges.clicked.connect(self.pushButtonSaveChangesClicked)
self.pushButtonAbort.clicked.connect(self.pushButtonAbortClicked)
self.birthDateLineEdit.setPlaceholderText("RRRR-MM-DD")
self.hireDateLineEdit.setPlaceholderText("RRRR-MM-DD")
self.emailLineEdit.setPlaceholderText("[email protected]")
self.pushButtonFilterEditUser.clicked.connect(self.pushButtonFilterEditUserClicked)
self.pushButtonLoadToEditUser.clicked.connect(self.pushButtonLoadToEditUserClicked)
self.pushButtonDeleteUser.clicked.connect(self.pushButtonDeleteUserClicked)
self.pushButtonChangePass.clicked.connect(self.pushButtonChangePassClicked)
self.oldPassLineEdit.setEchoMode(QtWidgets.QLineEdit.Password)
self.newPassLineEdit.setEchoMode(QtWidgets.QLineEdit.Password)
self.newPassRepeatLineEdit.setEchoMode(QtWidgets.QLineEdit.Password)
self.sexComboBox.addItem("Mężczyzna")
self.sexComboBox.addItem("Kobieta")
self.threadpool = QThreadPool()
def pushButtonChangePassClicked(self):
worker = Worker()
self.threadpool.start(worker)
oldPass = encrypt_string(self.oldPassLineEdit.text())
newPass = encrypt_string(self.newPassLineEdit.text())
newPassRepeat = encrypt_string(self.newPassRepeatLineEdit.text())
login = self.loginLineEdit.text()
cursor.execute("SELECT zaszyfrowane_haslo FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=login)) #Execute the Query
myresult = cursor.fetchall() # przeczytany hasz wlasciwego hasla # zakomentowac oba wiersze
myresult = myresult[0][0]
# # print(myresult) # kontrolnie, pokazanie HASZU hasla z bazy
# # print(oldPass)
if myresult==oldPass:
if newPass==newPassRepeat:
try:
cursor.execute("UPDATE personel SET zaszyfrowane_haslo = \'{new_password}\' WHERE login LIKE \"{jaki_login}\"".format(new_password=newPass,jaki_login=login))
self.loginLineEdit.setText("")
self.oldPassLineEdit.setText("")
self.newPassLineEdit.setText("")
self.newPassRepeatLineEdit.setText("")
notification_win.label.setText("Haslo zostalo zmienione.")
notification_win.show()
# TODO # zarejestrowac ta akcje w logach zdarzen
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user)) # informacja o tym, na czyim koncie dokonano zmian
ID_pracownika = cursor.fetchall()[0][0]
cursor.execute("SELECT imie, nazwisko FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=login)) # imie i nazwisko osoby, ktorej haslo zostalo zmienione
imie_i_nazwisko = cursor.fetchall()
wybrane_imie = imie_i_nazwisko[0][0]
wybrane_nazwisko = imie_i_nazwisko[0][1]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Zmieniono haslo pracownika {jakie_imie} {jakie_nazwisko}".format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Zmieniono haslo pracownika {jakie_imie} {jakie_nazwisko}, ".format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
except:
notification_win.label.setText("Nie udalo sie zmienic hasla.")
notification_win.show()
window.eventList.insertItem(0, "Blad podczas proby zmiany hasla, "+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
else:
notification_win.label.setText("Nieudana proba zmiany hasla.")
notification_win.show()
window.eventList.insertItem(0, "Nieudana proba zmiany hasla, "+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
def pushButtonFilterEditUserClicked(self):
# Filtrowanie pacjentow
self.userToEditComboBox.clear()
worker = Worker()
self.threadpool.start(worker)
# print("Wybor pracownikow... ")
seekToEdit = self.filterToEditLineEdit.text()
# print(seekToEdit)
try:
cursor.execute("SELECT imie, nazwisko FROM personel WHERE imie LIKE BINARY \'%{seek}%\' OR nazwisko LIKE BINARY \'%{seek}%\' OR ID_pracownika LIKE BINARY \'%{seek}%\'".format(seek=seekToEdit))
# usunac przedrostek BINARY, jezeli sie chce case_insensitive
# cursor.execute("SELECT imie, nazwisko FROM pacjenci")
# print("...SELECT query succeeded...")
# OK.... ale teraz jak w matplotlibie okreslic DATĘ jako os X, i x_axis jako os Y (x_axis to wartosci, os pionowa)
myresult = cursor.fetchall()
# # print("The length of \'myresult\' is: ", len(myresult)) # pokazuje ile rekordow ma zostac wykorzystanych na wykresie
pracownicy = []
for x in myresult:
pracownicy.append(str(x[0])+" "+str(x[1]))
self.userToEditComboBox.addItems(pracownicy)
###################################################################
except:
pass
def pushButtonLoadToEditUserClicked(self):
worker = Worker()
self.threadpool.start(worker)
# print("Ladowanie danych pracownika... ")
# seekHist = self.filterToEditLineEdit.text()
# # print(seekHist)
wybrany_pracownik = self.userToEditComboBox.currentText()
try:
wybrany_pracownik = wybrany_pracownik.split()
wybrane_imie = wybrany_pracownik[0]
wybrane_nazwisko = wybrany_pracownik[1]
except:
pass
try:
cursor.execute("SELECT imie, nazwisko, plec, data_urodzenia, PESEL, data_zatrudnienia, telefon, email, kod_pocztowy, miejscowosc, ulica FROM personel WHERE imie LIKE \'%{imie}%\' AND nazwisko LIKE \'%{nazwisko}%\'".format(imie=wybrane_imie, nazwisko=wybrane_nazwisko))
# usunac przedrostek BINARY, jezeli sie chce case_insensitive
# cursor.execute("SELECT imie, nazwisko FROM pacjenci")
# print("...SELECT query succeeded...")
# OK.... ale teraz jak w matplotlibie okreslic DATĘ jako os X, i x_axis jako os Y (x_axis to wartosci, os pionowa)
myresult = cursor.fetchall()
# # print("The length of \'myresult\' is: ", len(myresult)) # pokazuje ile rekordow ma zostac wykorzystanych na wykresie
# pacjenci = []
for x in myresult:
# pacjenci.append(str(x[0])+" "+str(x[1]))
self.nameLineEdit.setText(str(x[0]))
self.surnameLineEdit.setText(str(x[1]))
# self.sexLineEdit.setText(str(x[2]))
self.sexComboBox.clear()
self.sexComboBox.addItem(str(x[2]))
if self.sexComboBox.currentText()[0]=="M":
self.sexComboBox.addItem("Kobieta")
else:
self.sexComboBox.addItem("Mezczyzna")
self.birthDateLineEdit.setText(str(x[3]))
self.peselLineEdit.setText(str(x[4]))
self.hireDateLineEdit.setText(str(x[5]))
self.phoneLineEdit.setText(str(x[6]))
self.emailLineEdit.setText(str(x[7]))
self.cityCodeLineEdit.setText(str(x[8]))
self.cityLineEdit.setText(str(x[9]))
self.streetLineEdit.setText(str(x[10]))
###################################################################
except:
pass
def pushButtonAbortClicked(self):
edit_patient_window.hide()
worker = Worker()
self.threadpool.start(worker)
def pushButtonSaveChangesClicked(self):
noweImie = self.nameLineEdit.text()
noweNazwisko = self.surnameLineEdit.text()
nowaPlec = self.sexComboBox.currentText()
nowaData_urodzenia = self.birthDateLineEdit.text()
nowyPESEL = self.peselLineEdit.text()
nowaData_zatrudnienia = self.hireDateLineEdit.text()
nowyTelefon = self.phoneLineEdit.text()
nowyEmail = self.emailLineEdit.text()
nowyKod_pocztowy = self.cityCodeLineEdit.text()
nowaMiejscowosc = self.cityLineEdit.text()
nowaUlica = self.streetLineEdit.text()
worker = Worker()
self.threadpool.start(worker)
#Writing Query to insert data
# Przekazanie, ktora osoba ma zostac edytowana do buttona potwierdzajacego i wykonujacego UPDATE
# Pobranie tych danych z aktualnego ComboBoxa
wybrany_pacjent = self.userToEditComboBox.currentText()
try:
wybrany_pacjent = wybrany_pacjent.split()
wybrane_imie = wybrany_pacjent[0]
wybrane_nazwisko = wybrany_pacjent[1]
except:
pass
query = ("UPDATE personel SET imie=\'{imie2}\', nazwisko=\'{nazwisko2}\', plec=\'{plec2}\', data_urodzenia=\'{data_urodzenia2}\', PESEL=\'{PESEL2}\',\
data_zatrudnienia=\'{data_zatrudnienia2}\', telefon=\'{telefon2}\', email=\'{email2}\', kod_pocztowy=\'{kod_pocztowy2}\', miejscowosc=\'{miejscowosc2}\', ulica=\'{ulica2}\' WHERE imie LIKE\
\'{jakie_imie}\' AND nazwisko LIKE '\{jakie_nazwisko}\'".format(imie2=noweImie,nazwisko2=noweNazwisko,plec2=nowaPlec,\
data_urodzenia2=nowaData_urodzenia,PESEL2=nowyPESEL,data_zatrudnienia2=nowaData_zatrudnienia,telefon2=nowyTelefon,email2=nowyEmail,kod_pocztowy2=nowyKod_pocztowy,\
miejscowosc2=nowaMiejscowosc,ulica2=nowaUlica,jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko))
# taxi = (imie, nazwisko, plec, data_urodzenia, PESEL ,telefon, email, kod_pocztowy, miejscowosc, ulica) # zamiast jedynki mozna wrzucic zmienna pobraną z pola EditText (trzeba takie dodać) gdzie uzytkownik wpisze numer czujnika z palca LUB jego ID
try:
cursor.execute(query) #Execute the Query
cnx.commit()
# print("Zmieniono dane pracownika.")
# Czyszczenie wprowadzonego tekstu
self.nameLineEdit.setText("")
self.surnameLineEdit.setText("")
# self.sexLineEdit.setText("")
self.sexComboBox.clear()
self.sexComboBox.addItem("Mężczyzna")
self.sexComboBox.addItem("Kobieta")
self.birthDateLineEdit.setText("")
self.peselLineEdit.setText("")
self.hireDateLineEdit.setText("")
self.phoneLineEdit.setText("")
self.emailLineEdit.setText("")
self.cityCodeLineEdit.setText("")
self.cityLineEdit.setText("")
self.streetLineEdit.setText("")
notification_win.label.setText("Zmieniono dane pracownika.")
notification_win.show()
# TODO # zarejestrowac ta akcje w logach zdarzen
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Zmieniono dane pracownika {jakie_imie} {jakie_nazwisko}".format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Zmieniono dane pracownika {jakie_imie} {jakie_nazwisko}, ".format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
except:
notification_win.label.setText("Niepoprawne dane. Zwróć uwagę, czy data urodzenia, data zatrudnienia oraz email mają poprawny format.")
notification_win.show()
cnx.rollback()
def pushButtonDeleteUserClicked(self):
worker = Worker()
self.threadpool.start(worker)
result = None
while result is None:
try:
# auth = input("Podaj haslo do bazy:\n")
cnx = mysql.connector.connect(user = 'user', password = 'userpass',
host = 'localhost',
database = 'main_db')
result = cnx
except:
pass
cursor = cnx.cursor(buffered=True)
#Writing Query to insert data
# Przekazanie, ktora osoba ma zostac edytowana do buttona potwierdzajacego i wykonujacego UPDATE
# Pobranie tych danych z aktualnego ComboBoxa
### Czekanie na potwierdzenie ...
# TODO pytanie o potwierdzenie skasowania pacjenta
# w zamysle po potwierdzeniu usuniecia w oknie delete_confirm_window powinno sie zamknac to okno i kontynuowac operacje ponizej czyli usuniecie pacjenta
confirmed = 1
# delete_confirm_window.show()
# print("Polaczono z baza danych...")
qm = QMessageBox
ret = qm.question(self,'', "Czy na pewno chcesz usunąć tego pracownika?", qm.Yes | qm.No)
if ret == qm.Yes:
try:
wybrany_pracownik = self.userToEditComboBox.currentText()
wybrany_pracownik = wybrany_pracownik.split()
wybrane_imie = wybrany_pracownik[0]
wybrane_nazwisko = wybrany_pracownik[1]
query = ("DELETE FROM personel WHERE imie LIKE \'{jakie_imie}\' AND nazwisko LIKE '\{jakie_nazwisko}\'".\
format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko))
# taxi = (imie, nazwisko, plec, data_urodzenia, PESEL ,telefon, email, kod_pocztowy, miejscowosc, ulica) # zamiast jedynki mozna wrzucic zmienna pobraną z pola EditText (trzeba takie dodać) gdzie uzytkownik wpisze numer czujnika z palca LUB jego ID
cursor.execute(query) #Execute the Query
cnx.commit()
# print("Usunieto pracownika.")
# Czyszczenie wprowadzonego tekstu
self.nameLineEdit.setText("")
self.surnameLineEdit.setText("")
# self.sexLineEdit.setText("")
self.sexComboBox.clear()
self.sexComboBox.addItem("Mężczyzna")
self.sexComboBox.addItem("Kobieta")
self.birthDateLineEdit.setText("")
self.peselLineEdit.setText("")
self.hireDateLineEdit.setText("")
self.phoneLineEdit.setText("")
self.emailLineEdit.setText("")
self.cityCodeLineEdit.setText("")
self.cityLineEdit.setText("")
self.streetLineEdit.setText("")
notification_win.label.setText("Usunieto pracownika {jakie_imie} {jakie_nazwisko}, ".format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko))
notification_win.show()
# TODO # zarejestrowac ta akcje w logach zdarzen
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Usunieto pracownika {jakie_imie} {jakie_nazwisko}".format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Usunieto pracownika {jakie_imie} {jakie_nazwisko}, ".format(jakie_imie=wybrane_imie,jakie_nazwisko=wybrane_nazwisko)+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
except:
notification_win.label.setText("Wystapil problem podczas usuwania pracownika. Sprawdz czy pracownik zostal wybrany.")
notification_win.show()
cnx.rollback()
class auth(QMainWindow): # OKNO LOGOWANIA DO APLIKACJI ###### PO POMYSLNEJ AUTORYZACJI POKAZUJE SIE GLOWNE OKNO PROGRAMU
def __init__(self):
QMainWindow.__init__(self)
loadUi('auth_gui.ui', self)
self.setWindowTitle("Logowanie do systemu monitoringu")
self.loginButton.clicked.connect(self.loginButtonClicked)
self.abortButton.clicked.connect(self.abortButtonClicked)
self.passwordLineEdit.setEchoMode(QtWidgets.QLineEdit.Password)
################################################################## DO TESTOW ##### POZNIEJ SKASOWAC TE LINIE # TODO
# self.loginLineEdit.setText("admin")
# self.passwordLineEdit.setText("admin")
self.threadpool = QThreadPool()
def loginButtonClicked(self):
worker = Worker()
self.threadpool.start(worker)
login = self.loginLineEdit.text()
password = self.passwordLineEdit.text()
################################################# LOGOWANIE DO APLIKACJI - login i hasło z bazy danych, tabela personel
result0 = None
while result0 is None: # wykonuje sie bez konca, jezeli nie uda sie polaczyc, potrzebne do logowania, ale infinite loop
try:
# auth = input("Podaj haslo do bazy:\n") # przeniesc to do "maina", wykonanie przed poczatkiem programu
cnx = mysql.connector.connect(user = 'user', password = 'userpass', host = 'localhost', database = 'main_db')
result0 = cnx
cursor = cnx.cursor()
# print("...Connection established...")
except:
notification_win.label.setText("Blad polaczenia. Sprawdz czy serwer bazy danych jest uruchomiony.")
notification_win.show()
pass
try:
cursor.execute("SELECT zaszyfrowane_haslo FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=login)) #Execute the Query
myresult = cursor.fetchall() # przeczytany hasz wlasciwego hasla # zakomentowac oba wiersze
myresult = myresult[0][0]
# # print(myresult) # kontrolnie, pokazanie HASZU hasla z bazy
# # print(encrypt_string(password))
if myresult==encrypt_string(password):
window.show()
auth_win.hide()
# print("Logowanie pomyslne.")
window.current_user = login
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "pomyslne logowanie", "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "pomyslne logowanie, "+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
except:
# print("Login attempt failed.")
notification_win.label.setText("Niepoprawny login lub hasło.")
notification_win.show()
###################### #log #rejestr #zdarzenie ########################################################################################
query = ("INSERT INTO rejestr_zdarzen (rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s)")
taxi = ("nieudana proba logowania", "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "nieudana proba logowania, "+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
def abortButtonClicked(self):
worker = Worker()
self.threadpool.start(worker)
auth_win.close()
class notification(QMainWindow): #
def __init__(self):
QMainWindow.__init__(self)
loadUi('notification.ui', self)
self.setWindowTitle("Informacja")
self.pushButtonOK.clicked.connect(self.pushButtonOKClicked)
self.threadpool = QThreadPool()
def pushButtonOKClicked(self):
worker = Worker()
self.threadpool.start(worker)
self.hide()
class python_to_arduino_msg(QMainWindow): #
def __init__(self):
QMainWindow.__init__(self)
loadUi('komunikat_zwrotny.ui', self)
self.setWindowTitle("Informacja zwrotna do układu pomiarowego")
self.pushButtonFilterEditPatient.clicked.connect(self.pushButtonFilterEditPatientClicked)
self.pushButtonSend.clicked.connect(self.pushButtonSendClicked)
self.msgComboBox.clear()
self.msgComboBox.addItem("1 - Zmień tryb transmisji na ciągły")
self.msgComboBox.addItem("2 - Zmień tryb transmisji na zdarzeniowy")
self.msgComboBox.addItem("3 - Przerwij wysyłanie pomiarów")
self.msgComboBox.addItem("4 - Wznów wysyłanie pomiarów")
self.wybrane_id_czujnika_pacjenta = 0
self.threadpool = QThreadPool()
def pushButtonFilterEditPatientClicked(self):
# Filtrowanie pacjentow
self.patientToEditComboBox.clear()
worker = Worker()
self.threadpool.start(worker)
# print("Wybor pacjentow... ")
seekToEdit = self.filterToEditLineEdit.text()
# print(seekToEdit)
try:
cursor.execute("SELECT pac.imie, pac.nazwisko, prz.ID_czujnika FROM pacjenci pac JOIN przydzial_czujnikow prz ON pac.ID_pacjenta=prz.ID_pacjenta WHERE pac.imie LIKE BINARY \'%{seek}%\' OR pac.nazwisko LIKE BINARY \'%{seek}%\' OR pac.ID_pacjenta LIKE BINARY \'%{seek}%\'".format(seek=seekToEdit))
# usunac przedrostek BINARY, jezeli sie chce case_insensitive
# cursor.execute("SELECT imie, nazwisko FROM pacjenci")
# print("...SELECT query succeeded...")
# OK.... ale teraz jak w matplotlibie okreslic DATĘ jako os X, i x_axis jako os Y (x_axis to wartosci, os pionowa)
myresult = cursor.fetchall()
# # print("The length of \'myresult\' is: ", len(myresult)) # pokazuje ile rekordow ma zostac wykorzystanych na wykresie
pacjenci = []
for x in myresult:
pacjenci.append(str(x[0])+" "+str(x[1])+" czujnik: "+str(x[2]))
self.patientToEditComboBox.addItems(pacjenci)
###################################################################
except:
pass
def pushButtonSendClicked(self):
# print("Wysylanie wiadomosci... ")
worker = Worker()
self.threadpool.start(worker)
# seekHist = self.filterToEditLineEdit.text()
# # print(seekHist)
wybrany_komunikat = self.msgComboBox.currentText()
pelny_komunikat = wybrany_komunikat
try:
wybrany_komunikat = wybrany_komunikat.split()
wybrane_id_komunikatu = wybrany_komunikat[0]
except:
pass
# print("Wybrane ID komunikatu: "+wybrane_id_komunikatu)
ser.close()
board = Arduino(port)
####################################### zakodowanie rodzaju komunikatu na pinach arduino
if wybrane_id_komunikatu==1:
board.digital[6].write(1) # najmlodszy bit z 4 przydzielonych do zakodowania wiadomosci
elif wybrane_id_komunikatu==2:
board.digital[5].write(1)
elif wybrane_id_komunikatu==3:
board.digital[6].write(1)
board.digital[5].write(1)
elif wybrane_id_komunikatu==3:
board.digital[4].write(1)
######################################## zakodowanie ID czujnika
wybrany_pacjent = self.patientToEditComboBox.currentText()
try:
wybrany_pacjent = wybrany_pacjent.split()
###
jaki_pacjent = wybrany_pacjent[0:2]
notification_win.label.setText("Wysłano komunikat nr "+str(pelny_komunikat)+"\ndo czujnika należącego do pacjenta "+(jaki_pacjent[0])+" "+(jaki_pacjent[1]))
notification_win.show()
###################### #log #rejestr #zdarzenie ########################################################################################
cursor.execute("SELECT ID_pracownika FROM personel WHERE login LIKE \"{jaki_login}\"".format(jaki_login=window.current_user))
ID_pracownika = cursor.fetchall()[0][0]
# # print("Wyswietlanie ID pracownika na podstawie loginu...")
# # print(ID_pracownika)
query = ("INSERT INTO rejestr_zdarzen (ID_pracownika,rodzaj_zdarzenia,opis_zdarzenia) VALUES (%s, %s, %s)")
taxi = (ID_pracownika, "Wysłano komunikat nr "+str(pelny_komunikat)+" do czujnika należącego do pacjenta "+(jaki_pacjent[0])+" "+(jaki_pacjent[1]), "")
cursor.execute(query, taxi)
cnx.commit()
window.eventList.insertItem(0, "Wysłano komunikat nr "+str(pelny_komunikat)+" do czujnika należącego do pacjenta "+(jaki_pacjent[0])+" "+(jaki_pacjent[1])+", "+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
########################################################################################################################################
###
self.wybrane_id_czujnika_pacjenta = wybrany_pacjent[3]
except:
pass
if self.wybrane_id_czujnika_pacjenta==1:
board.digital[11].write(1)
elif self.wybrane_id_czujnika_pacjenta==2:
board.digital[10].write(1)
elif self.wybrane_id_czujnika_pacjenta==3:
board.digital[11].write(1)
board.digital[10].write(1)
elif self.wybrane_id_czujnika_pacjenta==4:
board.digital[9].write(1)
elif self.wybrane_id_czujnika_pacjenta==5:
board.digital[9].write(1)
board.digital[11].write(1)
elif self.wybrane_id_czujnika_pacjenta==6:
board.digital[9].write(1)
board.digital[10].write(1)
elif self.wybrane_id_czujnika_pacjenta==7:
board.digital[9].write(1)
board.digital[10].write(1)
board.digital[11].write(1)
elif self.wybrane_id_czujnika_pacjenta==8:
board.digital[8].write(1)
elif self.wybrane_id_czujnika_pacjenta==9:
board.digital[8].write(1)
board.digital[11].write(1)
elif self.wybrane_id_czujnika_pacjenta==10:
board.digital[8].write(1)
board.digital[10].write(1)
board.exit()
ser.open()
if __name__ == '__main__':
app=QApplication(sys.argv)
app.setStyle('Breeze')
window = main_window()
# window.show() # ten wiersz jest ukryty, bo okno ma się pokazać dopiero po zalogowaniu, mozna odkomentowac do obejscia hasla
new_patient_window = new_patient() # stworzenie okna dodawania nowego pacjenta
edit_patient_window = edit_patient()
new_user_window = new_user()
edit_user_window = edit_user()
new_sensor_window = new_sensor()
edit_sensor_window = edit_sensor()
# delete_confirm_window = delete_patient_confirm() nie jest uzywane, tymczasowo(lub na stałe zastapione poprzez QMessageBox)
auth_win = auth()
auth_win.show()
notification_win = notification()
assign_sensor_window = assign_sensor()
python_to_arduino_msg_win = python_to_arduino_msg()
# new_user_window = new_user()
sys.exit(app.exec_())
| 52.839207 | 308 | 0.556605 |
c749ac16a8cd30198397fdec33f935761ede12d9
| 492 |
py
|
Python
|
examples/tensorflow-yolo/yolo_skil_web_cam.py
|
pcrete/skil-python
|
672a1aa9e8af020c960ab9ee280cbb6b194afc3f
|
[
"Apache-2.0"
] | 23 |
2018-09-19T13:34:27.000Z
|
2022-02-14T09:49:35.000Z
|
examples/tensorflow-yolo/yolo_skil_web_cam.py
|
pcrete/skil-python
|
672a1aa9e8af020c960ab9ee280cbb6b194afc3f
|
[
"Apache-2.0"
] | 33 |
2018-10-18T07:58:05.000Z
|
2019-05-16T08:24:12.000Z
|
examples/tensorflow-yolo/yolo_skil_web_cam.py
|
pcrete/skil-python
|
672a1aa9e8af020c960ab9ee280cbb6b194afc3f
|
[
"Apache-2.0"
] | 11 |
2018-10-21T18:58:57.000Z
|
2022-02-14T09:49:36.000Z
|
import skil
import cv2
skil_server = skil.Skil()
model = skil.Model('yolo_v2.pb', name='yolo-tf', model_id='yolo-3493723')
deployment = skil.Deployment(skil_server, 'yolo')
service = model.deploy(deployment, input_names=[
'input'], output_names=['output'], scale=2)
cap = cv2.VideoCapture(0)
while True:
_, image = cap.read()
detection = service.detect_objects(image)
image = skil.utils.yolo.annotate_image(image, detection)
cv2.imshow('yolo', image)
| 30.75 | 73 | 0.684959 |
409fbe183ea9ee7dc90f361e5e3982985cbfc0be
| 785 |
py
|
Python
|
app/main.py
|
openbikebox/websocket-client
|
50b61a70ffcff1acdc13ba69c017e671bd3f983f
|
[
"MIT"
] | null | null | null |
app/main.py
|
openbikebox/websocket-client
|
50b61a70ffcff1acdc13ba69c017e671bd3f983f
|
[
"MIT"
] | null | null | null |
app/main.py
|
openbikebox/websocket-client
|
50b61a70ffcff1acdc13ba69c017e671bd3f983f
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
"""
openbikebox websocket-client
Copyright (c) 2021, binary butterfly GmbH
Use of this source code is governed by an MIT-style license that can be found in the LICENSE file.
"""
import asyncio
from .websocket import websocket
from .cardreader import cardreader
from .websocket_queue import websocket_queue
class Main:
def __init__(self):
asyncio.run(self.startup())
async def startup(self):
websocket_queue.startup()
websocket_task = asyncio.create_task(websocket.startup())
cardreader_task = asyncio.create_task(cardreader.startup())
done, pending = await asyncio.wait(
[websocket_task],
return_when=asyncio.ALL_COMPLETED
)
for task in pending:
task.cancel()
| 25.322581 | 98 | 0.690446 |
40aea581262d185a3e4389235cdc2a1715b0429e
| 342 |
py
|
Python
|
setup.py
|
raicheff/flask-sri
|
83e062b80e4a3c309906bf1dc8bdbb42e529576b
|
[
"MIT"
] | null | null | null |
setup.py
|
raicheff/flask-sri
|
83e062b80e4a3c309906bf1dc8bdbb42e529576b
|
[
"MIT"
] | null | null | null |
setup.py
|
raicheff/flask-sri
|
83e062b80e4a3c309906bf1dc8bdbb42e529576b
|
[
"MIT"
] | null | null | null |
#
# Flask-SRI
#
# Copyright (C) 2017 Boris Raicheff
# All rights reserved
#
from setuptools import setup
setup(
name='Flask-SRI',
version='0.2.0',
description='Flask-SRI',
author='Boris Raicheff',
author_email='[email protected]',
url='https://github.com/raicheff/flask-sri',
py_modules=('flask_sri',),
)
# EOF
| 14.25 | 48 | 0.646199 |
40af3fbbbd88e194fbd91699d3a5972a9cd8b7cf
| 1,617 |
py
|
Python
|
Interview Preparation Kits/Interview Preparation Kit/Search/Making Candies/making_candies.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | 1 |
2021-02-22T17:37:45.000Z
|
2021-02-22T17:37:45.000Z
|
Interview Preparation Kits/Interview Preparation Kit/Search/Making Candies/making_candies.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
Interview Preparation Kits/Interview Preparation Kit/Search/Making Candies/making_candies.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the minimumPasses function below.
def minimumPasses(machines, workers, price, candies):
current_machines = machines
current_workers = workers
current_candies = 0
current_passes = 0
final_passes = sys.maxsize
while current_candies < candies:
passes = (price - current_candies) // (current_machines * current_workers)
if passes <= 0:
increase_units = current_candies // price
current_candies %= price
total_units = current_machines + current_workers + increase_units
half_units = math.ceil(total_units/2)
if current_machines >= current_workers:
current_machines = max(half_units, current_machines)
current_workers = total_units - current_machines
else:
current_workers = max(half_units, current_workers)
current_machines = total_units - current_workers
passes = 1
current_candies += passes * current_machines * current_workers
current_passes += passes
final_passes = min(final_passes, current_passes + math.ceil((candies - current_candies) / (current_machines * current_workers)))
return min(final_passes, current_passes)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
mwpn = input().split()
m = int(mwpn[0])
w = int(mwpn[1])
p = int(mwpn[2])
n = int(mwpn[3])
result = minimumPasses(m, w, p, n)
fptr.write(str(result) + '\n')
fptr.close()
| 29.4 | 136 | 0.644403 |
46ad0cfc32a53eab5cd77517fa87784ea15b9786
| 1,547 |
py
|
Python
|
Packs/EWS/Scripts/GetEWSFolder/GetEWSFolder.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/EWS/Scripts/GetEWSFolder/GetEWSFolder.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/EWS/Scripts/GetEWSFolder/GetEWSFolder.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from typing import List, Dict
from CommonServerPython import *
def convert_mail_to_json(item, folder):
return {
'subject': item.get('subject', ''),
'textBody': item.get('textBody', ''),
'body': item.get('body', ''),
'folder': folder
}
def main():
folders_paths_str = demisto.args()['foldersPaths']
folders_paths_list = [folder.strip() for folder in folders_paths_str.split(',')]
path_to_mails = {folder: [] for folder in folders_paths_list} # type: Dict[str, List[str]]
for folder in folders_paths_list:
res = demisto.executeCommand('ews-get-items-from-folder', {
'folder-path': folder,
'limit': demisto.args().get('limit'),
'target-mailbox': demisto.args().get('targetMailbox'),
'is-public': 'False' if demisto.args().get('isPublic') == 'false' else 'True'
})
if is_error(res):
return_error(get_error(res))
items = res[0]['Contents']
if isinstance(items, str) and items == 'There is no output results':
mails_at_folder = []
else:
mails_at_folder = [convert_mail_to_json(i, folder) for i in items]
path_to_mails[folder] = mails_at_folder
mails_from_all_folders_list = [mail_json for folder_mails in path_to_mails.values() for mail_json in folder_mails]
return fileResult("all_mails.json", json.dumps(mails_from_all_folders_list))
if __name__ == "__builtin__" or __name__ == "builtins":
entry = main()
demisto.results(entry)
| 35.976744 | 118 | 0.634131 |
9ef5e410e2ad41ed19824f3d5a0e88f29628bf16
| 96 |
py
|
Python
|
public_identifiers/__init__.py
|
strange-dv/django-public-identifiers
|
1d3f7752b16f48d1004328b5dad7acf6e83a9703
|
[
"MIT"
] | null | null | null |
public_identifiers/__init__.py
|
strange-dv/django-public-identifiers
|
1d3f7752b16f48d1004328b5dad7acf6e83a9703
|
[
"MIT"
] | null | null | null |
public_identifiers/__init__.py
|
strange-dv/django-public-identifiers
|
1d3f7752b16f48d1004328b5dad7acf6e83a9703
|
[
"MIT"
] | null | null | null |
from .fields import ISBNField, DOIField, ISSNField
__all__ = [ISBNField, DOIField, ISSNField]
| 19.2 | 50 | 0.78125 |
731e863e9c88a40cadbbd6e080e4a25655161d33
| 28,260 |
py
|
Python
|
src/sick_ldmrs/dataproc.py
|
achambers16/sick_ldmrs
|
a65f2239f474263a16b382bae6fc4cb5e5505ba5
|
[
"BSD-2-Clause"
] | 1 |
2017-12-29T07:58:31.000Z
|
2017-12-29T07:58:31.000Z
|
src/sick_ldmrs/dataproc.py
|
achambers16/sick_ldmrs
|
a65f2239f474263a16b382bae6fc4cb5e5505ba5
|
[
"BSD-2-Clause"
] | null | null | null |
src/sick_ldmrs/dataproc.py
|
achambers16/sick_ldmrs
|
a65f2239f474263a16b382bae6fc4cb5e5505ba5
|
[
"BSD-2-Clause"
] | null | null | null |
#* Software License Agreement (BSD License)
#*
#* Copyright (c) 2010, CSIRO Autonomous Systems Laboratory
#* All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions
#* are met:
#*
#* * Redistributions of source code must retain the above copyright
#* notice, this list of conditions and the following disclaimer.
#* * Redistributions in binary form must reproduce the above
#* copyright notice, this list of conditions and the following
#* disclaimer in the documentation and/or other materials provided
#* with the distribution.
#* * Neither the name of the CSIRO nor the names of its
#* contributors may be used to endorse or promote products derived
#* from this software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
#* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#***********************************************************
# Author: Fred Pauling
#$Id$
from struct import *
import numpy as np
import roslib
roslib.load_manifest('sick_ldmrs')
import rospy
from sensor_msgs.msg import *
import utils as util
from rospy.numpy_msg import numpy_msg
from params import LDMRSParams
class ProcessLDMRSData:
"""
Class for processing and packaging Sick LD-MRS Laser Scan data messages into ROS messages.
Converts raw scan data into ROS PointCloud2 and LaserScan messages and publishes them on request.
The primary method you need to call is process_msg. This method processes the scan data
and publishes LaserScan and/or PointCloud2 messages of the (transformed) data.
You will also need to call set_timestamp_delta() to set the difference between the
current time and the LD-MRS on-board time (which you can extract from the message header)
"""
header_format = '<HHHQQHhhHhhhhhhH' # format string for data header
header_struct = Struct(header_format) # precompile for speed
# setup structure for pulling out header components
header_keys = ['ScanNumber', 'ScannerStatus',
'SyncPhaseOffset', 'ScanStartTimeNTP',
'ScanEndTimeNTP', 'AngleTicksPerRot',
'StartAngle', 'EndAngle',
'NumScanPoints', 'Reserved1',
'Reserved2', 'Reserved3',
'Reserved4', 'Reserved5',
'Reserved6', 'Reserved7']
num_header_bytes = 44 # 44 bytes for the header
num_point_bytes = 10 # 10 bytes for each point
# lookup table for beam elevation trig factors
# taking centre of beam as elevation angle (actual limits are [-1.6, -0.8, 0, 0.8, 1.6])
elev_angles = np.array([-1.2, -0.4, 0.4, 1.2]) * ((2*np.pi)/360.0) # rad
v_sin_lut = np.sin(elev_angles)
v_cos_lut = np.cos(elev_angles)
def __init__(self, topics, params):
""" Constructor
@param topics: dictionary mapping topic names to publisher handles
valid topic names are {"cloud", "scan0", "scan1", "scan2", "scan3"}
@type topics: dict {topic_name:publisher_handle}
@param params: dictionary mapping parameter names to values.
Where applicable, the parameters must be in device units (e.g. ticks)
@type params: dict {ros_parameter_string:value}
"""
self.params = params
self.topics = topics
# LaserScan messages (numbered 0-3, 0 is the lowest scan plane)
# rewire for numpy serialization on publish
self.scans = [numpy_msg(LaserScan)(),
numpy_msg(LaserScan)(),
numpy_msg(LaserScan)(),
numpy_msg(LaserScan)()]
self._init_scans()
# PointCloud2 message
# rewire for numpy serialization on publish
self.point_cloud = numpy_msg(PointCloud2)()
self._init_point_cloud()
# put variables into the namespace to prevent
# attribute exceptions when the class is abused
self.header = None
self.x = None
self.y = None
self.z = None
self.echo = None
self.layer = None
self.flags = None
self.echo_w = None
self.h_angle = None
self.rads_per_tick = None
self.pc_data = None
self.last_start_time = None
self.rads_per_tick = None
self.seq_num = -1
self.time_delta = rospy.Duration()
self.n_points = 0
# timestamp smoothing
self.smoothtime = None
self.smoothtime_prev = None
self.recv_time_prev = None
self.known_delta_t = rospy.Duration(256.0/self.params['scan_frequency'])
self.time_smoothing_factor = self.params['time_smoothing_factor']
self.time_error_threshold = self.params['time_error_threshold']
self.total_err = 0.0 #integrate errors
self.header = {}.fromkeys(self.header_keys)
def process_msg(self, msg, recv_time):
""" Process an incoming data message. Convert to (and publish)
PointCloud2 and LaserScan ROS messages depending
on whether the associated topics
('cloud', 'scan0', 'scan1', 'scan2', 'scan3') resp.
are subscribed.
@param msg: the scan data message from the LD-MRS to be processed
@type msg: read-only byte buffer (e.g. a python string)
"""
# smooth the timestamp using the expected rate
self.smooth_timestamp(recv_time)
self.msg = msg
subscribers = self.num_subscribers()
# any subscribers? if not just return, don't waste cpu cycles
if any([n > 0 for n in subscribers.itervalues()]):
self.unpack_data(msg)
# is anyone subscribed to the cloud topic?
if subscribers["cloud"]:
self.make_point_cloud()
self.publish_point_cloud()
# is anyone subscribed to a scan topic?
if any(["scan" in topic and num_subs > 0 for topic, num_subs in subscribers.iteritems()]):
self.make_scans()
self.publish_scans()
return None
def num_subscribers(self):
""" Get the number of subscribers for each ROS topic defined in self.topics
"""
subscribers = {}
for topic, handle in self.topics.iteritems():
subscribers[topic] = handle.get_num_connections()
return subscribers
def compute_tick_freq(self):
""" Compute the tick frequency from the start/end angles and times.
This is typically within 1% of the nominal values.
Note: we are using the device supplied tick resolution of 1/32nd degree
@return: tickfrequency - a floating point number (ticks per second)
"""
delta_t = (self.header['ScanEndTimeNTP'] - self.header['ScanStartTimeNTP'])
delta_ticks = self.header['StartAngle'] - self.header['EndAngle']
tick_freq = (delta_ticks << 32)/ float(delta_t)
return int(tick_freq)
def set_timestamp_delta(self, time_delta):
""" Set the time delta to apply to the LD-MRS timestamp to bring
it up to current ROS time.
@param time_delta: the time delta to be applied to the timestamps from the LD-MRS
@type time_delta: a rostime.Duration object
"""
self.time_delta = time_delta
def smooth_timestamp(self, recv_time):
""" Smooth the timestamp to track the expected scan rate.
Parameter time_smoothing_factor controls
Small errors between rostime and smoothed time are corrected by
applying a correction weighted by the time_smoothong_gain
Large errors above the time_error_threshold are corrected to
recv_time by a step adjustment
@param recv_time: ros timestamp when the message was recieved
@type ldmrs_time: rostime.Time object
"""
if not self.smoothtime_prev:
# initialize to recv time
self.smoothtime = recv_time
else:
self.smoothtime = self.smoothtime_prev + self.known_delta_t
err = (recv_time - self.smoothtime).to_sec()
if self.time_smoothing_factor > 0 and abs(err) < self.time_error_threshold:
correction = rospy.Duration(err * (1-self.time_smoothing_factor))
self.smoothtime += correction
else:
# error too high, or smoothing disabled - set smoothtime to last timestamp
self.smoothtime = recv_time
#print 'delta_smoothtime: %f, err: %f'%((self.smoothtime - self.smoothtime_prev).to_sec(), err)
self.smoothtime_prev = self.smoothtime
def unpack_data(self, msg):
""" Unpack the data from an LD-MRS scan data message
into instance attributes to be picked up by make_point_cloud and/or
make_scan.
@param msg: a binary string in little-endian format encoding the
message
@type msg: read-only byte buffer (e.g. a python string)
"""
# increment ROS message header sequence number for this scan
# we are storing these internally rather than using the device supplied sequence number
# since the device seq number is only uint16 and will quickly roll over
#whereas the ROS message seq header field is uint32
self.seq_num += 1
# Parse the scan header into a dictionary
header_tuple = self.header_struct.unpack_from(msg)
for index, value in enumerate(header_tuple):
self.header[self.header_keys[index]] = value
self.rads_per_tick = (2.0 * np.pi) / self.header['AngleTicksPerRot']
self.scan_start_time = self.smoothtime #util.NTP64_to_ROStime(self.header['ScanStartTimeNTP']) + self.time_delta
# check that the input string has the correct number of bytes
self.n_points = self.header['NumScanPoints']
# Check we have enough bytes in the buffer
n_data_bytes = len(msg) - self.num_header_bytes
n_points_in_buffer = int(n_data_bytes/self.num_point_bytes)
if n_points_in_buffer != self.n_points:
rospy.logwarn("Number of point indicated by header (", self.n_points,
") is more than number of points in buffer (", n_points_in_buffer, ")")
self.n_points = n_points_in_buffer
# tick frequency (Hz*32)
# Observed to vary by 1-2 percent from nominal frequency
self.tick_freq = self.compute_tick_freq()
if self.n_points is 0:
# No data to unpack
self.point_data = np.array([], dtype=uint16le)
else:
# Make a bytearray from the point data in the input string
uint16le = np.dtype(np.uint16, align='<') # force little endian
self.point_data = np.frombuffer(msg, dtype=uint16le,
count = self.n_points * self.num_point_bytes/2, #/2 accounts for 16bit view
offset = self.num_header_bytes)
# reshape array as 2D array of uint16 (n rows * num_point_bytes/2 cols)
self.point_data = np.reshape(self.point_data, [self.n_points, -1])
# now array has format:
# Name: Layer/Echo/Flags | H Angle | Rdist | Echo Width | Reserved |
# Column: 0 | 1 | 2 | 3 | 4 |
# Pull out data as 16bit fields using slices and views
self.h_angle_ticks = self.point_data[:, 1].view(np.int16)
# adjust the start time to account for the tick shift
# compute number of scanner ticks since start angle tick for each sample
self.ticknum = -(self.h_angle_ticks - self.header['StartAngle'])
# radial distance to each point (in metres)
self.r_dist = (self.point_data[:, 2]/100.0).astype(np.float32)
# echo width (in cm!)
self.echo_w = self.point_data[:, 3].copy()
# Pull out echo layer and flags fields from echo_layer_flags
# need to copy so we can view as two byte arrays
layer_echo_flags = self.point_data.view(dtype=np.uint8) # view as 2D byte array
self.layer = layer_echo_flags[:,0].copy()
self.layer &= np.array([3], dtype=np.uint8) # extract bits 0-1
self.echo = layer_echo_flags[:,0].copy()
self.echo = (self.echo & np.array([48], dtype=np.uint8)) >> 4 # extract bits 4,5 and shift
# Valid Flags bits are 0,1,3 -- want to move bit 3 to bit 2
# so we can put layer,echo and flags into one byte for point cloud later
self.flags = layer_echo_flags[:,1].copy()
self.flags |= ((self.flags & np.array([8], dtype=np.uint16)) >> 1) # copy bit 3 to bit 2
self.flags &= np.array([7], dtype=np.uint8) # mask off bit 3 keeping bits 0-2
def publish_point_cloud(self):
""" Publish the finished point cloud to the ROS network
topic is /<node_name>/cloud
"""
if self.point_cloud is not None:
handle = self.topics["cloud"]
handle.publish(self.point_cloud)
else:
rospy.logerr("Trying to publish empty point cloud to topic %s"%handle.name)
def publish_scans(self):
""" Publish scan topics on the ROS network.
Publishes four topics (one for each scan plane of the LD-MRS):
/<node_name>/scan0 -- the lowest scan plane
/<node_name>/scan1
/<node_name>/scan2
/<node_name>/scan3 -- the highest scan plane
"""
for i, scan in enumerate(self.scans):
scan_id = "scan" + str(i)
handle = self.topics[scan_id]
if scan is not None and handle is not None:
handle.publish(scan)
else:
rospy.logerr("Trying to publish empty scan to topic %s"%handle.name)
def make_point_cloud(self):
"""Construct a point cloud from previously unpacked data (see unpack_data())
The finished point cloud is stored as the instance variable 'point_cloud'
Note you must call unpack_data before invoking this method
"""
if self.n_points is 0:
# No points
self.pc_data = ""
else:
# compute x,y,z coordinates in metres
h_angle_rads = self.h_angle_ticks * self.rads_per_tick
v_sines = self.v_sin_lut[self.layer] # lookup cosines for elevation angle
v_cosines = self.v_cos_lut[self.layer] # lookup cosines for elevation angle
# x is in direction of travel
# z is up
# y is left
# note that the scan direction is clockwise from y-axis toward x-axis
# this is the reverse of the expected scan direction for this coordinate system.
# We account for this explicitly in the LaserScan messages (see _fill_laser_scans())
self.x = (v_cosines * (np.cos(h_angle_rads) * self.r_dist)).astype(np.float32)
self.y = (v_cosines * (np.sin(h_angle_rads) * self.r_dist)).astype(np.float32)
self.z = (v_sines * self.r_dist).astype(np.float32)
# store delta from start time
self.time_deltas = (self.ticknum * 1.0/self.tick_freq).astype(np.float32)
# pack layer/echo/flags bytes into a single byte field
# Bit: 0,1, | 2,3 | 4,5,6 |
# Meaning: Layer | Echo | Flags |
self.layer_echo_flags = self.layer | (self.echo << 2) | (self.flags << 4)
# concatenate the numpy arrays in the correct order for the PointCloud2 message
# need to pack as 2D array, then flatten since fields have varying byte widths
data = np.hstack((self.x.view(np.uint8).T.reshape(-1, 4),
self.y.view(np.uint8).T.reshape(-1, 4),
self.z.view(np.uint8).T.reshape(-1, 4),
self.time_deltas.view(np.uint8).T.reshape(-1, 4),
self.echo_w.view(np.uint8).T.reshape(-1, 2),
self.layer_echo_flags.reshape(-1, 1))) # already uint8
data = data.reshape(-1) # 1D view with points correctly aligned
# convert to byte string for serialization and transmission
# The serialize_numpy method in _PointCloud2.py *should* (but doesn't)
# take care of this when the message is published;
# i.e. we should be able to leave this as a numpy array here.
# This is (probably) an oversight by the developer in this instance
self.pc_data = data.tostring()
# finished computing data, now fill out the fields in the message ready for transmission
self._fill_point_cloud()
def make_scans(self):
""" Generate laser scan messages from previously unpacked data
the ROS parameter value of 'use_first_echo' in the ROS parameter
server determines if first or last echo is used
"""
if self.n_points is 0:
# slot ranges into their correct location within the scan array
# and separate out each layer using the layer breaks computed earlier
self.scan_data = np.zeros([4,0], dtype=np.float32)
else:
# number of points to allocate per scan message
# (depends on angular resolution and scan angle)
self.npoints_scan = abs((self.header['StartAngle'] - self.header['EndAngle'])/self.ticknum2ind)
# Now get indices of first and last echos...
# this is non-trivial since:
# 1. each point has 0-3 echoes (no guarantee about order is given)
# 2. points are interlaced by scan plane
# ... so we need to lexical sort by: echo, then tick number, then layer
# to get data in the right order.
# Next we need to compute the indices for the
# first/last echo for each sample using index differencing
# do lexical sort to preserve order of previous sorts.
# The echo sort may be redundant but SICK gives no guarantees on how
# the echoes are organized within the scan
sort_inds = np.lexsort((self.echo, self.ticknum, self.layer))
# find where h_angle_ticks changes
ind_diff = np.diff(self.h_angle_ticks[sort_inds])
breaks = ind_diff.nonzero()[0] # [0] due to singleton tuple return from nonzero()
if self.params[LDMRSParams.use_first_echo]:
# get indices of first echoes
# breaks +1 indexes the first echo of each sample
# prepend 0 since diff result has length n-1 and
# first sample is always the first echo
inds = np.hstack((np.array([0]), breaks + 1)) # sorted indices of first echoes
else:
# get indices of last echoes
# breaks indexes the last echo
# append last index since diff result has length n-1 and
# last sample is always the last echo
last_ind = np.array([self.r_dist.size-1])
inds = np.hstack((breaks, last_ind)) #sorted indices of last echoes
# back out to unsorted inds so we can filter the required data
inds = sort_inds[inds]
#select first/last echo entries (elements remain sorted by ticknum and layer)
layers = self.layer[inds]
ranges = self.r_dist[inds]
ticks = self.ticknum[inds]
# Finally we need to find the layer breaks and store each layer separately
# in a zero padded array of the correct size.
# find the indices of the layer breaks
layer_breaks = np.diff(layers).nonzero()[0]
#index of first entry for each layer
firsts = np.hstack((np.array([0]), layer_breaks + 1))
#index of last entry for each layer
lasts = np.hstack((layer_breaks, np.array([ranges.size -1])))
# !!! NOTE: When scan frequency is 50 Hz this shifts the scan clockwise by 1/16th degree
# a better solution would be to move the start angle and start time correspondingly
tick_inds = np.round(((ticks - self.tick_ind_adjust)/self.ticknum2ind)).astype(np.int16) # integer division
# slot ranges into their correct location within the scan array
# and separate out each layer using the layer breaks computed earlier
self.scan_data = np.zeros((4, self.npoints_scan), dtype = np.float32)
for i in range(0, 4):
self.scan_data[i, tick_inds[firsts[i]:(lasts[i]+1)]] = ranges[firsts[i]:(lasts[i]+1)]
# finished marsahlling the range data, now it's time to fill in the details
self._fill_scans()
#--------------------------------------------------------------------------
# Private methods
def _init_point_cloud(self):
""" Set up the point cloud class as much as possible in advance
"""
pc = self.point_cloud
# set frame id (for the cloud topic this is just the frame_id_base)
pc.header.frame_id = self.params[LDMRSParams.frame_id_prefix]
# set up point fields
# there are 6 fields:
# x,y,z,timedelta,echowidth,layerechoflags
fields = [['x', PointField.FLOAT32, 0],
['y', PointField.FLOAT32, 4],
['z', PointField.FLOAT32, 8],
['timedelta', PointField.FLOAT32, 12],
['echowidth', PointField.UINT16, 16],
['layerechoflags', PointField.UINT8, 18]]
# set up PointFields that describe the layout of the data
# blob in the message
point_fields = []
for field in fields:
pf = PointField()
pf.name = field[0]
pf.datatype = field[1]
pf.offset = field[2]
pf.count = 1
point_fields.append(pf)
pc.fields = point_fields
# set up other attributes
pc.height = 1 # data is 1D
pc.is_dense = False # unordered points
pc.point_step = 19 # bytes per point
# endianness check and set
if struct.pack("h", 1) == "\000\001":
pc.is_bigendian = True
else:
pc.is_bigendian = False
def _fill_point_cloud(self):
""" Pack the point cloud data into the PointCloud2 message """
pc = self.point_cloud
# ------------------Header----------------------------
pc.header.stamp = self.smoothtime
pc.header.seq = self.seq_num
# ----------------- Other ----------------------------
num_points = len(self.x)
pc.width = num_points
pc.row_step = len(self.pc_data) # number of bytes of data
#------------------- Data -------------------
pc.data = self.pc_data
def _init_scans(self):
""" Initial set up for the Scan messages
"""
# conversion factors from tick num to scan index used in make_scans
tick_freq = self.params[LDMRSParams.scan_frequency]
self.ticknum2ind = tick_freq/800 # 4, 8, 16 which corresponds to 1/8, 1/4, and 1/2 degree for 12.5, 25, 50 Hz
# convert ticks to indices with a coarser resolution base
# to conserve space in the scan message. Original base is 1/32 degree.
# Otherwise there would be a lot of zero padding in between points
# and thus extra b/w overhead in the message
# angle_ticks has a 2 tick offset when scan frequency is 50Hz
# need to shift ticks down by 2 so we can divide ticks to get scan indices later
if self.params['scan_frequency'] == 12800: # 50Hz
self.tick_ind_adjust = 2
time_per_tick = 1.0/(50.0 * 11520)
self.start_time_adjust = rospy.Duration(2 * time_per_tick)
else:
self.tick_ind_adjust = 0
self.start_time_adjust = rospy.Duration(0)
frame_id_prefix = self.params[LDMRSParams.frame_id_prefix]
for i, scan in enumerate(self.scans):
scan.header.frame_id = frame_id_prefix + str(i) # append the scan plane number to the frame id
scan.intensities = np.array([]) # empty (must be numpy array for serialization)
scan.range_min = 0.01 # using 1cm so that zero range points are not displayed in RVIZ
scan.range_max = 10000 # No max range specified on spec sheet
def _fill_scans(self):
"""Add scan data to the scans and set up per-message parameter values
"""
# adjust start time and start angle if using 50 Hz to compensate
# for 2 sample offset of first sample
#tick2rad = 2*np.pi/11520
#start_angle_adjust = self.tick_ind_adjust*tick2rad
#start_time_adjust = self.tick_ind_adjust/12800.0
#start_time = self.scan_start_time + rospy.Duration(start_time_adjust)
# compute the time since the last scan started
if self.smoothtime_prev is None:
time_between_scans = 0.0
else:
time_between_scans = self.smoothtime - self.smoothtime_prev - self.start_time_adjust
time_between_scans = time_between_scans.secs + time_between_scans.nsecs/1e9
# Set up angles
# NOTE!: the scanner rotates clockwise from above so
# angle_min > angle_max and we use a NEGATIVE ANGLE INCREMENT
# this works in RVIZ so we are going with it
# the problem stems from the poorly named angle_min and angle_max fields in LaserScan.
# These should probably have been named angle_start and angle_end (or similar) to prevent
# assumptions about which one is greater, and hence assumptions about
# the sign of the angle_increment.
angle_min = (self.header['StartAngle'] - self.tick_ind_adjust)*self.rads_per_tick # rad
angle_max = (self.header['EndAngle'] - self.tick_ind_adjust)*self.rads_per_tick # rad
angle_increment = -self.ticknum2ind*self.rads_per_tick # angle between samples (rad)
# Times are handled as you would expect (time increment is +ve):
# time for each point = header.stamp + time_increment*<index>
time_increment = float(self.ticknum2ind)/self.tick_freq #time between samples
# set up each scan (only the range data differs)
for i, scan in enumerate(self.scans):
scan.header.stamp = self.scan_start_time + self.start_time_adjust
scan.header.seq = self.seq_num
scan.scan_time = time_between_scans
scan.angle_min = angle_min
scan.angle_max = angle_max
scan.angle_increment = angle_increment
scan.time_increment = time_increment
scan.ranges = self.scan_data[i, :]
# store the current time to generate the next scan.scan_time
self.last_start_time = self.scan_start_time
#EOF
| 46.865672 | 120 | 0.611253 |
b42a78998e0e46cd77f57f6ed86bf86773992a06
| 3,665 |
py
|
Python
|
Packs/CommonScripts/Scripts/LookupCSV/LookupCSV.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CommonScripts/Scripts/LookupCSV/LookupCSV.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CommonScripts/Scripts/LookupCSV/LookupCSV.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
"""
Given a CSV file in the War Room by entry ID, searches based on column and value.
If the column is not present, simply parse the CSV into a list of lists or list of dicts (if header row supplied).
"""
from CommonServerPython import *
import csv
def search_dicts(k, v, data):
"""
Search a list of dicts by key
"""
match = []
for row in data:
if k in row:
if v == row[k]:
match.append(row)
if len(match) == 1:
# If we only get one result: return just it as a dictr
return match[0]
else:
return match
def search_lists(k, v, data):
"""
Search a list of lists by index
"""
match = []
k = int(k)
for row in data:
row_values = list(row.values())
if row_values[k] == v:
match.append(row)
if len(match) == 1:
# If we only get one result: return just it.
return match[0]
else:
return match
def main():
d_args = demisto.args()
entry_id = d_args['entryID'] if 'entryID' in d_args else None
header_row = d_args['header_row'] if 'header_row' in d_args else None
search_column = d_args['column'] if 'column' in d_args else None
search_value: str = d_args['value'] if 'value' in d_args else None
add_row = d_args['add_header_row'] if 'add_header_row' in d_args else None
res = demisto.getFilePath(entry_id)
if not res:
return_error("Entry {} not found".format(entry_id))
file_path = res['path']
file_name = res['name']
if not file_name.lower().endswith('.csv'):
return_error(
'"{}" is not in csv format. Please ensure the file is in correct format and has a ".csv" extension'.format(
file_name))
csv_data: list = []
with open(file_path, mode='r') as csv_file:
if header_row:
csv_reader = csv.DictReader(csv_file)
for line in csv_reader:
csv_data.append(line)
elif add_row:
headers = add_row.split(',')
csv_reader = csv.DictReader(csv_file, fieldnames=headers)
for line in csv_reader:
csv_data.append(line)
if len(line) != len(add_row.split(",")):
return_error(
"Added row via add_header_row has invalid length.")
else:
csv_reader = csv.DictReader(csv_file, fieldnames=[])
for line in csv_reader:
line_values = list(line.values())
if line_values:
csv_data.append(line_values[0])
# If we're searching the CSV
if search_column:
if header_row:
csv_data = search_dicts(search_column, search_value, csv_data)
else:
# Lists are 0-indexed but this makes it more human readable (column 0 is column 1)
try:
search_column = int(search_column) - 1
except ValueError:
return_error(
"CSV column spec must be integer if header_row not supplied (got {})".format(search_column))
csv_data = search_lists(search_column, search_value, csv_data)
output = {
'LookupCSV': {
'FoundResult': True if csv_data and search_column else False,
'Result': csv_data if csv_data else None,
'SearchValue': '' if not search_value else search_value
}
}
demisto.results({
"Type": entryTypes["note"],
"ContentsFormat": formats["json"],
"Contents": csv_data,
"EntryContext": output
})
if __name__ in ('__builtin__', 'builtins'):
main()
| 30.289256 | 119 | 0.580082 |
b44ca279fa516e51328f360799be9e337a10538b
| 559 |
py
|
Python
|
tag_1/p_2_2_groesste_zahl_von_dreien.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
tag_1/p_2_2_groesste_zahl_von_dreien.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
tag_1/p_2_2_groesste_zahl_von_dreien.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
"""
2 If-Abfragen (Tag 1)
2.2 Bestimme die größte von drei vorher festgelegten Zahlen und gib diese Zahl aus.
"""
from hilfprogramme import float_number_input
if __name__ == '__main__':
print('Bestimmung der größten von drei Zahlen a, b und c.')
a = float_number_input('a? ')
b = float_number_input('b? ')
c = float_number_input('c? ')
print('a =', a, '| b =', b, '| c =', c)
if a >= b and a >= c:
answer = a
elif b >= a and b >= c:
answer = b
else:
answer = c
print('Die größte Zahl ist', answer)
| 27.95 | 83 | 0.588551 |
d2f4ba9b49c28a3839f1b5fc9dc48ed29ef0dc0a
| 144 |
py
|
Python
|
pages/themes/beginners/logicalExpressionsAndConditionalStatements/labs/while_loop.py
|
ProgressBG-Python-Course/ProgressBG-Python
|
6429833696c2c50d9f902f62cc3a65ca62659c69
|
[
"MIT"
] | null | null | null |
pages/themes/beginners/logicalExpressionsAndConditionalStatements/labs/while_loop.py
|
ProgressBG-Python-Course/ProgressBG-Python
|
6429833696c2c50d9f902f62cc3a65ca62659c69
|
[
"MIT"
] | null | null | null |
pages/themes/beginners/logicalExpressionsAndConditionalStatements/labs/while_loop.py
|
ProgressBG-Python-Course/ProgressBG-Python
|
6429833696c2c50d9f902f62cc3a65ca62659c69
|
[
"MIT"
] | null | null | null |
while 1:
user_number = int(input("Entersitive number: "))
if user_number > 0:
break
print("Do somet7hing with number!")
| 16 | 52 | 0.611111 |
7de89e19e2188e37b9a811f275d48bad19a0a257
| 540 |
py
|
Python
|
app/views/apis/parser.py
|
luoweis/xskAdmin
|
d92d0297102140ce5241a0c9f3a80f520bee96a8
|
[
"MIT"
] | null | null | null |
app/views/apis/parser.py
|
luoweis/xskAdmin
|
d92d0297102140ce5241a0c9f3a80f520bee96a8
|
[
"MIT"
] | null | null | null |
app/views/apis/parser.py
|
luoweis/xskAdmin
|
d92d0297102140ce5241a0c9f3a80f520bee96a8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding=utf-8 -*-
from flask_restful.reqparse import RequestParser
parser = RequestParser()
'''
默认参数类型为str
允许传入的是空值,None
通过required=True,不允许为空值
'''
parser.add_argument(
"id",
type=int,
required=True,
# action="append",# 允许传入多个值,最终是一个列表形式
help="传入的值不能为空,只接收数值类型的数据"
)
webhook_parser = RequestParser()
webhook_parser.add_argument(
"password",
type=str,
required=True,
# action="append",# 允许传入多个值,最终是一个列表形式
help="密码不能为空"
)
| 18 | 48 | 0.624074 |
6fce8a5ff9b3666160e64aa4d93bf8b51e557f6a
| 3,634 |
py
|
Python
|
src/visitpy/visit_flow/flow/tests/test_filters_npy_ops.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/visitpy/visit_flow/flow/tests/test_filters_npy_ops.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/visitpy/visit_flow/flow/tests/test_filters_npy_ops.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
# Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: test_filters_npy_ops.py
author: Cyrus Harrison <[email protected]>
created: 3/24/2012
description:
unittest test cases for Filters in the npy_ops module.
"""
import unittest
try:
import numpy as npy
except:
pass
from visit_flow import *
from visit_flow.filters import npy_ops
from decorators import numpy_test
# uncomment for detailed exe info
#import logging
#logging.basicConfig(level=logging.INFO)
class TestNPyOps(unittest.TestCase):
def setUp(self):
print("")
@numpy_test
def test_01_simple_workspace(self):
w = Workspace()
w.register_filters(npy_ops)
v_a = npy.array(list(range(10)),dtype=npy.double)
v_b = npy.array(list(range(10)),dtype=npy.double)
print("")
w.registry_add(":src_a",v_a)
w.registry_add(":src_b",v_b)
w.add_filter("add","f1")
w.add_filter("sub","f2")
w.add_filter("mult","f3")
w.add_filter("mult","f4")
w.add_filter("add","f5")
# f1 = src_a + src_b
w.connect(":src_a","f1:in_a")
w.connect(":src_b","f1:in_b")
# f2 = src_b - src_a
w.connect(":src_b","f2:in_a")
w.connect(":src_a","f2:in_b")
# f3 = f1^2
w.connect("f1","f3:in_a")
w.connect("f1","f3:in_b")
# f4 = f2^2
w.connect("f2","f4:in_a")
w.connect("f2","f4:in_b")
# f5 = f4 + f3
w.connect("f3","f5:in_a")
w.connect("f4","f5:in_b")
print("")
print(w.graph)
print(w.execution_plan())
act_res = w.execute()
# get output and test
test_res = npy.power((v_a + v_b),2.0)+ npy.power((v_a - v_b),2.0)
dsum = npy.sum(act_res - test_res)
print("Filter Graph Result: %s" % str(act_res))
print("Test Result: %s" % str(test_res))
print("Difference: %s" % str(dsum))
self.assertTrue(dsum < 1e-6)
@numpy_test
def test_02_more_ops(self):
w = Workspace()
w.register_filters(npy_ops)
v_a = npy.array(list(range(10)),dtype=npy.double)
v_b = npy.array(list(range(10)),dtype=npy.double)
v_p = npy.array([2]*10,dtype=npy.double)
w.registry_add(":src_a",v_a)
w.registry_add(":src_b",v_b)
w.registry_add(":src_p",v_p)
w.add_filter("add","f1")
w.add_filter("sub","f2")
w.add_filter("pow","f3")
w.add_filter("pow","f4")
w.add_filter("add","f5")
# f1 = src_a + src_b
w.connect(":src_a","f1:in_a")
w.connect(":src_b","f1:in_b")
# f2 = src_b - src_a
w.connect(":src_b","f2:in_a")
w.connect(":src_a","f2:in_b")
# f3 = f1^2
w.connect("f1","f3:in_a")
w.connect(":src_p","f3:in_b")
# f4 = f2^2
w.connect("f2","f4:in_a")
w.connect(":src_p","f4:in_b")
# f5 = f4 + f3
w.connect("f3","f5:in_a")
w.connect("f4","f5:in_b")
print("")
act_res = w.execute()
# get output and test
test_res = npy.power((v_a + v_b),2.0)+ npy.power((v_a - v_b),2.0)
dsum = npy.sum(act_res - test_res)
print("Filter Graph Result: %s" % str(act_res))
print("Test Result: %s" % str(test_res))
print("Difference: %s" % str(dsum))
self.assertTrue(dsum < 1e-6)
if __name__ == '__main__':
unittest.main()
| 31.059829 | 73 | 0.560815 |
d2682116e9b41175911f414ea445f65505ef2001
| 163 |
py
|
Python
|
python/python_backup/Python_Progs/PYTHON_LEGACY_PROJECTS/height_round.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/python_backup/Python_Progs/PYTHON_LEGACY_PROJECTS/height_round.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/python_backup/Python_Progs/PYTHON_LEGACY_PROJECTS/height_round.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
print("Input your height :")
h_ft=int(input("Feet :"))
h_inch=int(input("Inches :"))
h_inch+=h_ft*12
h_cm=round(h_inch*2.54,1)
print("Your height is : %d cm"%h_cm)
| 27.166667 | 36 | 0.680982 |
838a5da57026647158a07560fb026f3f34086b49
| 1,165 |
py
|
Python
|
Packs/DemistoRESTAPI/Scripts/DemistoUploadFileV2/DemistoUploadFileV2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/DemistoRESTAPI/Scripts/DemistoUploadFileV2/DemistoUploadFileV2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/DemistoRESTAPI/Scripts/DemistoUploadFileV2/DemistoUploadFileV2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from typing import Tuple
from CommonServerPython import *
def upload_file(incident_id: str, entry_id: str, body: str = ''):
return demisto.executeCommand("demisto-api-multipart",
{"uri": f'entry/upload/{incident_id}', "entryID": entry_id, "body": body})
def upload_file_command(args: dict) -> Tuple[str, str]:
incident_id = args.get('incidentID', '')
entry_id = args.get('entryID', '')
body = args.get('body', '')
response = upload_file(incident_id, entry_id, body)
if is_error(response[0]):
raise Exception("There was an issue uploading the file. Check your API key and input arguments.")
uploaded_entry_id = demisto.dt(response, 'Contents.response.entries.id')
readable = f'File uploaded successfully. Entry ID is {uploaded_entry_id}'
if body:
readable += f'. Comment is:{body}'
return readable, response
def main():
try:
readable, response = upload_file_command(demisto.args())
return_outputs(readable, {}, response)
except Exception as err:
return_error(str(err))
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| 30.657895 | 108 | 0.656652 |
f7cd7d10fe18b1dfa6091427745884f777c5c68e
| 1,085 |
py
|
Python
|
books/PythonCleanCode/ch3_good_code/test_exceptions_1.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonCleanCode/ch3_good_code/test_exceptions_1.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonCleanCode/ch3_good_code/test_exceptions_1.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
"""Clean Code in Python - Chapter 3: General Traits of Good Code
"""
import unittest
from unittest.mock import Mock, patch
from exceptions_1 import DataTransport, Event
class FailsAfterNTimes:
def __init__(self, n_times: int, with_exception) -> None:
self._remaining_failures = n_times
self._exception = with_exception
def connect(self):
self._remaining_failures -= 1
if self._remaining_failures >= 0:
raise self._exception
return self
def send(self, data):
return data
@patch("time.sleep", return_value=0)
class TestTransport(unittest.TestCase):
def test_connects_after_retries(self, sleep):
data_transport = DataTransport(
FailsAfterNTimes(2, with_exception=ConnectionError)
)
data_transport.send = Mock()
data_transport.deliver_event(Event("test"))
data_transport.send.assert_called_once_with("decoded test")
assert sleep.call_count == DataTransport.retry_n_times - 1, sleep.call_count
if __name__ == "__main__":
unittest.main()
| 25.833333 | 84 | 0.690323 |
f7ceaec3f165d68502ab2e79f89237cc0e1f5fd4
| 1,654 |
py
|
Python
|
_test_.py
|
arcticle/Quark
|
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
|
[
"MIT"
] | null | null | null |
_test_.py
|
arcticle/Quark
|
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
|
[
"MIT"
] | null | null | null |
_test_.py
|
arcticle/Quark
|
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
|
[
"MIT"
] | null | null | null |
from quark_core_api.core import QuarkApplication
from quark_core_api.context import ApplicationContext
from quark_core_api.common import ContextInitializer
import json
import os
app_dir = os.path.expanduser("~\\")
app = QuarkApplication(ApplicationContext(app_dir, ContextInitializer.application))
location = "D:\\quark"
def create_ws(name, loc):
return app.create_workspace(name, loc)
def create_xp(ws, name):
return ws.create_experiment(name)
def create_script(ws, name):
return ws.create_script(name,"{} script text goes here...".format(name))
def add_script(xp, stage, name):
xp.add_script(stage, name)
def add_param(xp, name, value):
xp.add_parameter(name, value)
def print_workspaces():
for ws in app.workspaces:
print(ws, app.workspaces[ws].name)
# create_xp(app.workspaces[20181001131931], "LGBM_CV")
# add_script(app.workspaces[20181001131931].experiments["LGBM_CV"], "prep", "preprocess")
# add_script(app.workspaces[20181001131931].experiments["LGBM_CV"], "prep", "remove_nan")
# add_script(app.workspaces[20181001131931].experiments["LGBM_CV"], "prep", "clean")
pipeline = app.workspaces[20181001131931].experiments["LGBM_CV"].pipeline
for s in pipeline.steps:
print(s.name)
for s in pipeline.stages:
print(s)
# from quark_core_api.context import ApplicationContext, ContextInitializer
# app_ctx_init = {
# "workspaces": [{"id":1, "name":"ws-1", "dir":"home"}]
# }
# def test_application_context():
# ctx = ApplicationContext(None, ContextInitializer(app_ctx_init, None))
# ctx.create_storage("app")
# print (ctx.workspaces[0])
# test_application_context()
| 23.971014 | 89 | 0.734583 |
f75199dee264bbd9684b8c372ddf97bb6e2be2ba
| 492 |
py
|
Python
|
exercises/es/solution_01_08_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/es/solution_01_08_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/es/solution_01_08_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
import spacy
nlp = spacy.load("es_core_news_sm")
text = (
"De acuerdo con la revista Fortune, Apple fue la empresa "
"más admirada en el mundo entre 2008 y 2012."
)
# Procesa el texto
doc = nlp(text)
for token in doc:
# Obtén el texto del token, el part-of-speech tag y el dependency label
token_text = token.text
token_pos = token.pos_
token_dep = token.dep_
# Esto es solo por formato
print("{:<12}{:<10}{:<10}".format(token_text, token_pos, token_dep))
| 24.6 | 75 | 0.676829 |
f7721c10cce179af262e3ef00ac909e6a4fceff6
| 9,244 |
py
|
Python
|
src/onegov/winterthur/views/mission_report.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/winterthur/views/mission_report.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/winterthur/views/mission_report.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from datetime import date
from onegov.core.elements import Link
from onegov.core.security import Public, Private
from onegov.form import FieldDependency, WTFormsClassBuilder, move_fields
from onegov.org.views.files import view_get_image_collection
from onegov.winterthur import WinterthurApp, _
from onegov.winterthur.collections import MissionReportCollection
from onegov.winterthur.collections import MissionReportFileCollection
from onegov.winterthur.collections import MissionReportVehicleCollection
from onegov.winterthur.forms import MissionReportForm
from onegov.winterthur.forms import MissionReportVehicleForm
from onegov.winterthur.layout import MissionReportLayout
from onegov.winterthur.models import MissionReport
from onegov.winterthur.models import MissionReportVehicle
from onegov.winterthur.models import MissionReportVehicleUse
from uuid import UUID
from wtforms.fields import BooleanField
from wtforms.fields.html5 import IntegerField
def mission_report_form(model, request):
if isinstance(model, MissionReportCollection):
report = MissionReport()
else:
report = model
form_class = report.with_content_extensions(MissionReportForm, request)
class MissionReportVehicleUseForm(form_class):
def populate_obj(self, obj):
super().populate_obj(obj)
for used in obj.used_vehicles:
request.session.delete(used)
request.session.flush()
fids = (
fid for fid in self._fields
if fid.startswith('vehicles_')
and not fid.endswith('_count')
and self.data[fid]
)
for fid in fids:
obj.used_vehicles.append(
MissionReportVehicleUse(
vehicle_id=UUID(fid.replace('vehicles_', '')),
count=self.data[f'{fid}_count']))
def process_obj(self, obj):
super().process_obj(obj)
for used in obj.used_vehicles:
field_id = f'vehicles_{used.vehicle_id.hex}'
getattr(self, field_id).data = True
getattr(self, f'{field_id}_count').data = used.count
builder = WTFormsClassBuilder(MissionReportVehicleUseForm)
builder.set_current_fieldset(_("Vehicles"))
vehicles = MissionReportVehicleCollection(request.session).query()
vehicles = {v.id: v for v in vehicles if v.access == 'public'}
# include hidden vehicles that were picked before being hidden
for used in report.used_vehicles:
if used.vehicle_id not in vehicles:
vehicles[used.vehicle_id] = used.vehicle
vehicle_field_id = None
for vehicle in sorted(vehicles.values(), key=lambda v: v.name):
field_id = f'vehicles_{vehicle.id.hex}'
vehicle_field_id = f'{field_id}_count'
builder.add_field(
field_class=BooleanField,
field_id=field_id,
label=vehicle.title,
required=False,
id=vehicle.id
)
builder.add_field(
field_class=IntegerField,
field_id=vehicle_field_id,
label=request.translate(_("Count")),
required=True,
dependency=FieldDependency(field_id, 'y'),
default=1
)
form_class = builder.form_class
if vehicle_field_id:
form_class = move_fields(
form_class, ('access', ), vehicle_field_id)
return form_class
def mission_report_vehicle_form(model, request):
if isinstance(model, MissionReportVehicleCollection):
report = MissionReportVehicle()
else:
report = model
return report.with_content_extensions(MissionReportVehicleForm, request)
@WinterthurApp.html(
model=MissionReportCollection,
permission=Public,
template='mission_reports.pt')
def view_mission_reports(self, request):
return {
'layout': MissionReportLayout(self, request),
'title': _("Mission Reports"),
'reports': self.batch,
'count': self.mission_count(),
'year': self.year,
}
@WinterthurApp.html(
model=MissionReport,
permission=Public,
template='mission_report.pt')
def view_mission(self, request):
return {
'title': self.title,
'layout': MissionReportLayout(
self, request,
Link(self.title, '#')
),
'model': self
}
@WinterthurApp.html(
model=MissionReportVehicleCollection,
permission=Private,
template='mission_report_vehicles.pt')
def view_mission_report_vehicles(self, request):
return {
'layout': MissionReportLayout(
self, request,
Link(_("Vehicles"), request.link(self))
),
'title': _("Vehicles"),
'vehicles': tuple(self.query()),
}
@WinterthurApp.html(model=MissionReportFileCollection, template='images.pt',
permission=Private)
def view_mission_report_files(self, request):
data = view_get_image_collection(self, request)
data['layout'] = MissionReportLayout(
self, request,
Link(self.report.title, request.link(self.report)),
Link(_("Images"), '#', editbar=False)
)
return data
@WinterthurApp.form(
model=MissionReportCollection,
permission=Private,
form=mission_report_form,
name='new',
template='form.pt')
def handle_new_mission_report(self, request, form):
if form.submitted(request):
mission = self.add(date=form.date, **{
k: v for k, v in form.data.items()
if k not in ('csrf_token', 'day', 'time')
and not k.startswith('vehicles_')
})
form.populate_obj(mission)
if mission.date.year != date.today().year:
request.warning(
_(
"The report was entered in the current year, "
"please verify the date"
))
else:
request.success(_("Successfully added a mission report"))
return request.redirect(request.link(mission))
return {
'title': _("Mission Reports"),
'form': form,
'layout': MissionReportLayout(
self, request,
Link(_("New"), '#', editbar=False)
),
}
@WinterthurApp.form(
model=MissionReport,
permission=Private,
form=mission_report_form,
name='edit',
template='form.pt')
def handle_edit_mission_report(self, request, form):
if form.submitted(request):
form.populate_obj(self)
self.date = form.date
request.success(_("Your changes were saved"))
return request.redirect(request.link(self))
elif not request.POST:
form.process(obj=self)
form.date = self.date
return {
'title': _("Mission Reports"),
'form': form,
'layout': MissionReportLayout(
self, request,
Link(self.title, request.link(self)),
Link(_("Edit"), '#', editbar=False),
)
}
@WinterthurApp.view(
model=MissionReport,
permission=Private,
request_method='DELETE')
def delete_mission_report(self, request):
request.assert_valid_csrf_token()
request.session.delete(self)
request.success(_("Successfully deleted mission report"))
@WinterthurApp.form(
model=MissionReportVehicleCollection,
permission=Private,
form=mission_report_vehicle_form,
name='new',
template='form.pt')
def handle_new_vehicle(self, request, form):
if form.submitted(request):
vehicle = self.add(
**{k: v for k, v in form.data.items() if k not in (
'csrf_token', 'symbol'
)})
# required for the symbol image
form.populate_obj(vehicle)
request.success(_("Successfully added a vehicle"))
return request.redirect(
request.class_link(MissionReportVehicleCollection))
return {
'title': _("Vehicle"),
'form': form,
'layout': MissionReportLayout(
self, request,
Link(_("Vehicles"), request.link(self)),
Link(_("New"), '#', editbar=False)
),
}
@WinterthurApp.form(
model=MissionReportVehicle,
permission=Private,
form=mission_report_vehicle_form,
name='edit',
template='form.pt')
def handle_edit_vehicle(self, request, form):
if form.submitted(request):
form.populate_obj(self)
request.success(_("Your changes were saved"))
return request.redirect(
request.class_link(MissionReportVehicleCollection))
elif not request.POST:
form.process(obj=self)
return {
'title': self.title,
'form': form,
'layout': MissionReportLayout(
self, request,
Link(_("Vehicles"), request.class_link(
MissionReportVehicleCollection)),
Link(self.title, '#')
)
}
@WinterthurApp.view(
model=MissionReportVehicle,
permission=Private,
request_method='DELETE')
def delete_mission_report_vehicle(self, request):
request.assert_valid_csrf_token()
request.session.delete(self)
request.success(_("Successfully deleted vehicle"))
| 28.619195 | 76 | 0.636629 |
f79eb89a720fe844729864ef93df4ddadd5ea21d
| 3,072 |
py
|
Python
|
guidelight/app/routes.py
|
prototypefund/guidelight-website
|
6496baeb33675aadcd401acd00d6922f909484a4
|
[
"MIT"
] | null | null | null |
guidelight/app/routes.py
|
prototypefund/guidelight-website
|
6496baeb33675aadcd401acd00d6922f909484a4
|
[
"MIT"
] | null | null | null |
guidelight/app/routes.py
|
prototypefund/guidelight-website
|
6496baeb33675aadcd401acd00d6922f909484a4
|
[
"MIT"
] | null | null | null |
from flask import render_template, request, redirect, url_for, send_from_directory
from app import app, mongo
from .helper import render_form_data, get_action, get_doc
################################################################################
# PAPGES
################################################################################
# HOME
@app.route("/")
@app.route("/index")
def home():
q = request.args.get("query")
if q is not None:
return redirect(url_for("search", query=q))
else:
return render_template("home.html")
# SEARCH
@app.route("/search")
def search():
q = request.args.get("query")
# TODO: refactore mongoDB stuff!!!
projection = {"name": 1, "short": 1, "category": 1}
sort_by = [("ranking", 1), ("name", 1)]
entities = mongo.db.entities.find({}, projection).sort(sort_by)
tags = mongo.db.entities.distinct("category")
return render_template("find.html", entities=entities, query=q, tags=tags)
# ENTITY
@app.route("/entity/<short>")
def entity(short):
doc = get_doc(short)
return render_template("entity.html", entity=doc)
# FORM
@app.route("/entity/<short>/<action>/")
def form(short, action):
# print(f"LOG: {action} form requested for {short}")
doc = get_doc(short)
act = get_action(doc, action)
data = render_form_data(doc, act)
return render_template("form.html", entity=data)
################################################################################
# STATIC PAGES and BLOG (later served by NGINX)
# TODO: generate pages and put in NGINX
################################################################################
# FAVICON
@app.route("/favicon.ico")
def favicon():
return send_from_directory("static/favicon/", "favicon.png", mimetype="image/png")
# FOOTER
@app.route("/about")
def about():
return redirect(url_for("blog", topic="GuideLight"))
# FOOTER
@app.route("/api")
def api():
doc = mongo.db.website.find_one_or_404({"name": "api"}, {"_id": 0})
cnt = doc["content"]
return render_template("blog.html", content=cnt)
# FOOTER
@app.route("/privacy")
def privacy():
doc = mongo.db.website.find_one_or_404({"name": "privacy"}, {"_id": 0})
cnt = doc["content"]
return render_template("blog.html", content=cnt)
# FOOTER
@app.route("/impressum")
def impressum():
doc = mongo.db.website.find_one_or_404({"name": "impressum"}, {"_id": 0})
cnt = doc["content"]
return render_template("blog.html", content=cnt)
# BLOG
@app.route("/blog/<topic>")
def blog(topic):
doc = mongo.db.website.find_one_or_404({"name": topic}, {"_id": 0})
cnt = doc["content"]
return render_template("blog.html", content=cnt)
################################################################################
# Special Functions/Pages
################################################################################
# listet alle Entities aus DB auf
@app.route("/listall")
def listall():
# TODO: refactore mongoDB stuff!!!
e = mongo.db.entities.distinct("name")
return f'{"<br>".join(e)}'
| 28.71028 | 86 | 0.555664 |
583611d554ef6503d6a4e438b82777d0cabd8ff1
| 698 |
py
|
Python
|
books/PythonAutomate/file_organize/selective_search.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonAutomate/file_organize/selective_search.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonAutomate/file_organize/selective_search.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
"""selective_search.py
선택적 복사
- 인자로 받은 확장자로 끝나는 파일을 지정한 디렉터리에 저장
"""
import os
import sys
import shutil
from pathlib import Path
def copy(format, dest_dir="new"):
cur_dir = Path.cwd()
dir_path = Path(dest_dir).absolute()
dir_path.mkdir(exist_ok=True)
for path in cur_dir.rglob(f"**/*.{format}"):
dest = dir_path / path.name
if path != dest:
print(f"copying {path.relative_to(cur_dir)} to {dest.relative_to(cur_dir)}")
shutil.copy(path, dest_dir)
def main():
if len(sys.argv) < 2:
print(f"python {__file__} [ext]")
return
copy(sys.argv[1])
if __name__ == "__main__":
main()
| 21.151515 | 89 | 0.588825 |
544675eca71aae1e4d82edd529b01f6fe34d6a01
| 7,529 |
py
|
Python
|
quant/observers/c_usdt.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 7 |
2017-10-22T15:00:09.000Z
|
2019-09-19T11:45:43.000Z
|
quant/observers/c_usdt.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 1 |
2018-01-19T16:19:40.000Z
|
2018-01-19T16:19:40.000Z
|
quant/observers/c_usdt.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 5 |
2017-12-11T15:10:29.000Z
|
2018-12-21T17:40:58.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
from quant.observers.basicbot import BasicBot
"""
./venv/bin/python -m quant.cli -mBinance_ETH_USDT,Binance_BTC_USDT,Huobi_ETH_USDT,Huobi_BTC_USDT -oC_USDT -f=c_usdt -v
"""
class C_USDT(BasicBot):
def __init__(self):
super(C_USDT, self).__init__()
self.market_eth_bn = "Binance_ETH_USDT"
self.market_eth_hb = "Huobi_ETH_USDT"
self.market_btc_bn = "Binance_BTC_USDT"
self.market_btc_hb = "Huobi_BTC_USDT"
self.profit_count = 0
self.profit_total = 0
self.percent_total = 0
self.fee_hb = 0.002
self.fee_bn = 0.001
logging.info('C_USDT Setup complete')
def is_depths_available(self, depths):
if not depths:
return False
res = self.market_eth_hb in depths and self.market_eth_bn in depths
if not res:
logging.debug("eth market not exist in depths")
return False
res = self.market_btc_hb in depths and self.market_btc_bn in depths
if not res:
logging.debug("btc market not exist in depths")
return False
if not depths[self.market_eth_hb]['bids'] or not depths[self.market_eth_hb]['asks']:
logging.debug("market_eth_hb invalid")
return False
if not depths[self.market_btc_hb]['bids'] or not depths[self.market_btc_hb]['asks']:
logging.debug("market_btc_hb invalid")
return False
if not depths[self.market_eth_bn]['bids'] or not depths[self.market_eth_bn]['asks']:
logging.debug("market_eth_bn invalid")
return False
if not depths[self.market_btc_bn]['bids'] or not depths[self.market_btc_bn]['asks']:
logging.debug("market_btc_bn invalid")
return False
bfx_bid_price = depths[self.market_eth_hb]['bids'][0]['price']
bfx_ask_price = depths[self.market_eth_hb]['asks'][0]['price']
if bfx_bid_price <= 0 or bfx_ask_price <= 0:
return False
bn_bid_price = depths[self.market_eth_bn]['bids'][0]['price']
bn_ask_price = depths[self.market_eth_bn]['asks'][0]['price']
if bn_bid_price <= 0 or bn_ask_price <= 0:
return False
return True
def handle_eth(self, depths):
hb_bid_price, hb_ask_price = self.get_ticker(depths, self.market_eth_hb)
hb_bid_price = round(hb_bid_price * (1 - self.fee_hb), 2)
hb_ask_price = round(hb_ask_price * (1 + self.fee_hb), 2)
hb_bid_amount, hb_ask_amount = self.get_amount(depths, self.market_eth_hb)
bn_bid_price, bn_ask_price = self.get_ticker(depths, self.market_eth_bn)
bn_bid_price = round(bn_bid_price * (1 - self.fee_bn), 2)
bn_ask_price = round(bn_ask_price * (1 + self.fee_bn), 2)
bn_bid_amount, bn_ask_amount = self.get_amount(depths, self.market_eth_bn)
if hb_bid_price > bn_ask_price:
sell_price = hb_bid_price
sell_amount = hb_bid_amount
buy_price = bn_ask_price
buy_amount = bn_ask_amount
diff_price = sell_price - buy_price
percent = round(diff_price / buy_price * 100, 3)
diff_amount = min(sell_amount, buy_amount)
profit = round(diff_price * diff_amount, 8)
self.profit_count += 1
self.profit_total += profit
self.percent_total = self.percent_total + percent
av_percent = round(self.percent_total / self.profit_count, 3)
logging.info("huobi and binance eth_usdt profit total: %s, av percent:%s, count: %s" %
(self.profit_total, av_percent, self.profit_count))
elif hb_ask_price < bn_bid_price:
sell_price = bn_bid_price
sell_amount = bn_bid_amount
buy_price = hb_ask_price
buy_amount = hb_ask_amount
diff_price = sell_price - buy_price
percent = round(diff_price / buy_price * 100, 3)
diff_amount = min(sell_amount, buy_amount)
profit = round(diff_price * diff_amount, 8)
self.profit_count += 1
self.profit_total += profit
self.percent_total = self.percent_total + percent
av_percent = round(self.percent_total / self.profit_count, 3)
logging.info("huobi and binance eth_usdt profit total: %s, av percent:%s, count: %s" %
(self.profit_total, av_percent, self.profit_count))
else:
logging.info("huobi and binance eth_usdt no chance to profit")
def handle_btc(self, depths):
hb_bid_price, hb_ask_price = self.get_ticker(depths, self.market_btc_hb)
hb_bid_price = round(hb_bid_price * (1 - self.fee_hb), 2)
hb_ask_price = round(hb_ask_price * (1 + self.fee_hb), 2)
hb_bid_amount, hb_ask_amount = self.get_amount(depths, self.market_btc_hb)
bn_bid_price, bn_ask_price = self.get_ticker(depths, self.market_btc_bn)
bn_bid_price = round(bn_bid_price * (1 - self.fee_bn), 2)
bn_ask_price = round(bn_ask_price * (1 + self.fee_bn), 2)
bn_bid_amount, bn_ask_amount = self.get_amount(depths, self.market_btc_bn)
if hb_bid_price > bn_ask_price:
sell_price = hb_bid_price
sell_amount = hb_bid_amount
buy_price = bn_ask_price
buy_amount = bn_ask_amount
diff_price = sell_price - buy_price
percent = round(diff_price / buy_price * 100, 3)
diff_amount = min(sell_amount, buy_amount)
profit = round(diff_price * diff_amount, 8)
self.profit_count += 1
self.profit_total += profit
self.percent_total = self.percent_total + percent
av_percent = round(self.percent_total / self.profit_count, 3)
logging.info("huobi and binance btc_usdt profit total: %s, av percent:%s, count: %s" %
(self.profit_total, av_percent, self.profit_count))
elif hb_ask_price < bn_bid_price:
sell_price = bn_bid_price
sell_amount = bn_bid_amount
buy_price = hb_ask_price
buy_amount = hb_ask_amount
diff_price = sell_price - buy_price
percent = round(diff_price / buy_price * 100, 3)
diff_amount = min(sell_amount, buy_amount)
profit = round(diff_price * diff_amount, 8)
self.profit_count += 1
self.profit_total += profit
self.percent_total = self.percent_total + percent
av_percent = round(self.percent_total / self.profit_count, 3)
logging.info("huobi and binance btc_usdt profit total: %s, av percent:%s, count: %s" %
(self.profit_total, av_percent, self.profit_count))
else:
logging.info("huobi and binance btc_usdt no chance to profit")
def tick(self, depths):
if not self.is_depths_available(depths):
return
self.handle_eth(depths)
self.handle_btc(depths)
@classmethod
def get_ticker(cls, depths, market):
bid_price = depths[market]["bids"][0]['price']
ask_price = depths[market]["asks"][0]['price']
return bid_price, ask_price
@classmethod
def get_amount(cls, depths, market):
bid_amount = depths[market]["bids"][0]['amount']
ask_amount = depths[market]["asks"][0]['amount']
return bid_amount, ask_amount
| 41.827778 | 118 | 0.625847 |
b75b3ff7a53a1d5834931510bcd08e2f8a5a0ce0
| 5,999 |
py
|
Python
|
research/cv/DnCNN/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/DnCNN/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/DnCNN/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train DnCNN"""
import os
import ast
import argparse
import mindspore.dataset as ds
from mindspore import nn
from mindspore import context, Model
from mindspore.common import set_seed
from mindspore.context import ParallelMode
from mindspore.communication.management import init, get_rank, get_group_size
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor
from src.model import DnCNN
from src.config import config
from src.lr_generator import step_lr
from src.data_generator import DenoisingDataset
set_seed(1)
parser = argparse.ArgumentParser(description='Mindspore DnCNN in Ascend')
parser.add_argument('--train_data', default='./data/Train400', type=str, help='the path of train dataset')
parser.add_argument('--run_modelart', default=False, type=ast.literal_eval, help='run on modelArt, default is false')
parser.add_argument('--is_distributed', default=False, type=ast.literal_eval, help='distribute training')
parser.add_argument('--device_target', type=str, default='Ascend', help='run in Ascend')
parser.add_argument('--device_id', default=0, type=int, help='device id')
# used for adapting to cloud
parser.add_argument('--data_url', default=None, help='Location of data.')
parser.add_argument('--train_url', default=None, help='Location of training outputs.')
args = parser.parse_args()
model = config.model
basic_lr = config.basic_lr
lr_gamma = config.lr_gamma
batch_size = config.batch_size
epochs = config.epoch
sigma = config.sigma
run_modelart = args.run_modelart
save_dir = os.path.join('models', model+'_' + 'sigma'+str(sigma))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if __name__ == '__main__':
if args.device_target == "Ascend":
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', save_graphs=False)
if run_modelart:
device_id = int(os.getenv('DEVICE_ID'))
device_num = int(os.getenv('RANK_SIZE'))
local_input_url = os.path.join('/cache/data' + str(device_id))
local_output_url = os.path.join('/cache/ckpt' + str(device_id))
context.set_context(device_id=device_id)
if device_num > 1:
init()
context.set_auto_parallel_context(device_num=device_num, global_rank=device_id,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
args.rank = get_rank()
else:
args.rank = 0
import moxing as mox
mox.file.copy_parallel(src_url=args.data_url, dst_url=local_input_url)
args.train_data = local_input_url
save_dir = local_output_url
elif args.is_distributed:
if os.getenv('DEVICE_ID', "not_set").isdigit():
context.set_context(device_id=int(os.getenv('DEVICE_ID')))
init()
args.rank = get_rank()
args.group_size = get_group_size()
device_num = args.group_size
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True, all_reduce_fusion_config=[2, 18])
else:
args.rank = 0 # record of different training process
args.group_size = 1
context.set_context(device_id=args.device_id)
print('======> Building model <======')
# define network
dncnn = DnCNN()
# dataset
DDataset = DenoisingDataset(args.train_data, sigma)
train_dataset = ds.GeneratorDataset(DDataset, ["noised_img", "noise"], shuffle=True)
train_dataset = train_dataset.batch(config.batch_size, drop_remainder=True)
train_data_size = train_dataset.get_dataset_size() # num of total patches div batch_size. means num of steps in one epoch
# loss
criterion = nn.MSELoss(reduction='sum')
# learning rate
lr = step_lr(basic_lr, lr_gamma, epochs*train_data_size, train_data_size)
# optimizer
optimizer = nn.Adam(dncnn.trainable_params(), learning_rate=lr)
# define model
dncnn_model = Model(dncnn, loss_fn=criterion, optimizer=optimizer, amp_level="O3")
# call back
loss_cb = LossMonitor(per_print_times=train_data_size)
time_cb = TimeMonitor(data_size=train_data_size)
cb = [loss_cb, time_cb]
if config.save_checkpoint:
ckpt_config = CheckpointConfig(save_checkpoint_steps=train_data_size, keep_checkpoint_max=5)
ckpt_save_path = os.path.join(save_dir, 'ckpt' + str(args.rank) + '/')
ckpt_cb = ModelCheckpoint(prefix="dncnn", directory=ckpt_save_path, config=ckpt_config)
cb.append(ckpt_cb)
print("======> start training <======")
dncnn_model.train(epoch=epochs, train_dataset=train_dataset,
callbacks=cb, dataset_sink_mode=True)
if run_modelart:
import moxing as mox
mox.file.copy_parallel(src_url=save_dir, dst_url=args.train_url)
print("======> end training <======")
else:
raise ValueError("Unsupported device. The device should be Ascend.")
| 46.867188 | 130 | 0.668611 |
4d9e239880c22e59c110c33013d0ee5b22ee922b
| 1,134 |
py
|
Python
|
linear_data_structure/trapping_rain_water.py
|
daesookimds/Algorithm
|
76f4cbfe9000e8c1736f470138499e7c735fecaa
|
[
"MIT"
] | null | null | null |
linear_data_structure/trapping_rain_water.py
|
daesookimds/Algorithm
|
76f4cbfe9000e8c1736f470138499e7c735fecaa
|
[
"MIT"
] | null | null | null |
linear_data_structure/trapping_rain_water.py
|
daesookimds/Algorithm
|
76f4cbfe9000e8c1736f470138499e7c735fecaa
|
[
"MIT"
] | null | null | null |
from typing import List
def trap_two_pointer(height: List[int]) -> int:
if not height:
return 0
volume = 0
left, right = 0, len(height) - 1
left_max, right_max = height[left], height[right]
while left < right:
left_max, right_max = max(height[left], left_max), max(height[right], right_max)
if left_max <= right_max:
volume += left_max - height[left]
left += 1
else:
volume += right_max - height[right]
right -= 1
return volume
def trap_stack(height: List[int]) -> int:
stack = []
volume = 0
for i in range(len(height)):
while stack and height[i] > height[stack[-1]]:
top = stack.pop()
if not len(stack):
break
distance = i - stack[-1] - 1
waters = min(height[i], height[stack[-1]]) - height[top]
volume += distance * waters
stack.append(i)
return volume
def test_case():
case = [0,1,0,2,1,0,1,3,2,1,2,1]
result1 = trap_two_pointer(case)
result2 = trap_stack(case)
print(result1, result2)
| 21.807692 | 88 | 0.549383 |
670b63e449cbf8d057768d99f03d611ad6677032
| 37,008 |
py
|
Python
|
paddlenlp/transformers/gau_alpha/modeling.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/transformers/gau_alpha/modeling.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/transformers/gau_alpha/modeling.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import numpy as np
from paddle.nn import Layer
from ..albert.modeling import ACT2FN
from .. import PretrainedModel, register_base_model
__all__ = [
"GAUAlphaModel",
"GAUAlphaForMaskedLM",
"GAUAlphaPretrainedModel",
"GAUAlphaForSequenceClassification",
"GAUAlphaForTokenClassification",
"GAUAlphaForQuestionAnswering",
"GAUAlphaForMultipleChoice",
]
INF = 1e4
class Norm(Layer):
def __init__(self, epsilon=1e-12):
super().__init__()
self._epsilon = epsilon
def forward(self, x):
variance = paddle.mean(paddle.square(x), axis=-1, keepdim=True)
return x / paddle.sqrt(variance + self._epsilon)
def attention_normalize(a, mask=None, axis=-1, method="softmax"):
if method == "softmax":
return F.softmax(a, axis=axis)
else:
if mask is not None:
l = mask.sum(-1, keepdim=True)
else:
l = paddle.ones_like(a) * paddle.shape(a)[-2]
if method == "squared_relu":
return F.relu(a)**2 / l
elif method == "softmax_plus":
scale = paddle.log(l) / np.log(512)
# mask: 1 for not padding, 0 for padding
# padding position's scale is 1
if mask is not None:
scale = scale * mask + 1 - mask
return F.softmax(a * scale, axis=axis)
return a
class ScaleOffset(Layer):
def __init__(
self,
hidden_size=768,
scale=True,
offset=True,
):
super().__init__()
self.scale = scale
self.offset = offset
if self.scale:
self.weight = self.create_parameter(
(hidden_size, ),
default_initializer=nn.initializer.Constant(1.0))
if self.offset:
self.bias = self.create_parameter((hidden_size, ), is_bias=True)
def forward(self, inputs):
if self.scale:
inputs = inputs * self.weight
if self.offset:
inputs = inputs + self.bias
return inputs
class GatedAttentionUnit(Layer):
'''
https://github.com/ZhuiyiTechnology/GAU-alpha/blob/ea15e08a85d35652775c360218090cbaed98da18/models.py#L6-L85
'''
def __init__(self,
hidden_size=768,
intermediate_size=1536,
attention_key_size=128,
activation="swish",
use_bias=False,
normalization="softmax_plus",
attention_scale=True,
attention_dropout=0.1,
max_position_embeddings=512):
super().__init__()
self.activation = ACT2FN[activation]
self.intermediate_size = intermediate_size
self.attention_key_size = attention_key_size
self.use_bias = use_bias
self.normalization = normalization
self.attention_scale = attention_scale
self.attention_dropout = attention_dropout
self.i_dense = nn.Linear(
hidden_size,
2 * intermediate_size + attention_key_size,
bias_attr=self.use_bias,
)
self.o_dense = nn.Linear(intermediate_size,
hidden_size,
bias_attr=self.use_bias)
self.q_scaleoffset = ScaleOffset(attention_key_size,
offset=self.use_bias)
self.k_scaleoffset = ScaleOffset(attention_key_size,
offset=self.use_bias)
self.rotary = RotaryPositionEmbedding(attention_key_size,
max_position_embeddings)
def forward(self, hidden_states, attention_mask=None):
x = self.i_dense(hidden_states)
u, v, qk = paddle.split(
self.activation(x),
[
self.intermediate_size, self.intermediate_size,
self.attention_key_size
],
axis=-1,
)
q, k = self.q_scaleoffset(qk), self.k_scaleoffset(qk)
# apply_rotary
q, k = self.rotary(q), self.rotary(k)
# Attention
a = paddle.matmul(q, k, transpose_y=True)
if self.attention_scale:
a = a / self.attention_key_size**0.5
if attention_mask is not None:
a = a * attention_mask + (attention_mask - 1) * INF
A = attention_normalize(a,
attention_mask,
axis=-1,
method=self.normalization)
A = F.dropout(A, p=self.attention_dropout, training=self.training)
o = self.o_dense(u * paddle.matmul(A, v))
return o
class GAULayer(Layer):
def __init__(
self,
hidden_size=768,
intermediate_size=1536,
attention_key_size=128,
activation="swish",
use_bias=False,
normalization="softmax_plus",
attention_scale=True,
attention_dropout=0.1,
hidden_dropout=0.1,
norm_eps=1e-12,
max_position_embeddings=512,
):
super().__init__()
self.gau = GatedAttentionUnit(hidden_size, intermediate_size,
attention_key_size, activation, use_bias,
normalization, attention_scale,
attention_dropout,
max_position_embeddings)
self.norm = Norm(norm_eps)
self.hidden_dropout = hidden_dropout
def forward(self, hidden_states, attention_mask=None):
gau_output = self.gau(hidden_states, attention_mask=attention_mask)
# dropout and residual
o = F.dropout(gau_output[0],
p=self.hidden_dropout,
training=self.training)
o = self.norm(hidden_states + o)
return o
def initializer(tensor, num_hidden_layers=12, order=2, gain=1.0):
"""
https://github.com/bojone/bert4keras/blob/5572ed481a14f5a62be7107e3846c88a5d6b617d/bert4keras/models.py#L1226-L1235
"""
shape = paddle.shape(tensor)
if shape[0] > 10000 or shape[0] < 10:
hidden_size = shape[1]
else:
hidden_size = shape[0]
gain *= num_hidden_layers**(-1. / order)
std = 1.13684723 / hidden_size**0.5 * gain
return nn.initializer.TruncatedNormal(std=std)
class RotaryPositionEmbedding(Layer):
def __init__(self, dim, max_position_embeddings=512):
super().__init__()
inv_freq = 1.0 / (10000**(
paddle.arange(0, dim, 2, dtype=paddle.get_default_dtype()) / dim))
t = paddle.arange(max_position_embeddings,
dtype=paddle.get_default_dtype())
freqs = paddle.matmul(t.unsqueeze(1), inv_freq.unsqueeze(0))
self.register_buffer("sin", freqs.sin(), persistable=False)
self.register_buffer("cos", freqs.cos(), persistable=False)
def forward(self, x, offset=0):
# x shape [batch_size, seqlen, dim]
seqlen = paddle.shape(x)[-2]
sin, cos = (
self.sin[offset:offset + seqlen, :],
self.cos[offset:offset + seqlen, :],
)
x1, x2 = x[..., 0::2], x[..., 1::2]
# [cos_nθ, -sin_nθ] [x1]
# [sin_nθ, cos_nθ] [x2]
# => [x1 * cos_nθ - x2 * sin_nθ, x1 * sin_nθ + x2 * cos_nθ]
return paddle.stack([x1 * cos - x2 * sin, x1 * sin + x2 * cos],
axis=-1).flatten(-2, -1)
class GAUAlphaPretrainedModel(PretrainedModel):
"""
An abstract class for pretrained GAU-alpha models. It provides GAU-alpha related
`model_config_file`, `pretrained_init_configuration`, `resource_files_names`,
`pretrained_resource_files_map`, `base_model_prefix` for downloading and
loading pretrained models.
See :class:`~paddlenlp.transformers.model_utils.PretrainedModel` for more details.
"""
model_config_file = "model_config.json"
pretrained_init_configuration = {
"chinese_GAU-alpha-char_L-24_H-768": {
"vocab_size": 12000,
"hidden_size": 768,
"intermediate_size": 1536,
"num_hidden_layers": 24,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"attention_key_size": 128,
"norm_eps": 1e-12,
"pad_token_id": 0,
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"hidden_act": "swish",
"use_bias": False,
"normalization": "softmax_plus",
"attention_scale": True,
},
}
resource_files_names = {"model_state": "model_state.pdparams"}
pretrained_resource_files_map = {
"model_state": {
"chinese_GAU-alpha-char_L-24_H-768":
"https://bj.bcebos.com/paddlenlp/models/transformers/gau_alpha/chinese_GAU-alpha-char_L-24_H-768/model_state.pdparams",
}
}
base_model_prefix = "gau_alpha"
def init_weights(self, layer):
"""Initialization hook"""
if isinstance(layer, (nn.Linear, nn.Embedding)):
# In the dygraph mode, use the `set_value` to reset the parameter directly,
# and reset the `state_dict` to update parameter in static mode.
if isinstance(layer.weight, paddle.Tensor):
num_hidden_layers = self.num_hidden_layers if hasattr(
self, "num_hidden_layers"
) else self.gau_alpha.config["num_hidden_layers"]
initializer(layer.weight, num_hidden_layers, order=2, gain=1.0)
if isinstance(layer, nn.Linear):
use_bias = self.use_bias if hasattr(
self, "use_bias") else self.gau_alpha.config["use_bias"]
if layer.bias is not None and not use_bias:
layer.bias = None
@register_base_model
class GAUAlphaModel(GAUAlphaPretrainedModel):
"""
The bare GAUAlpha Model transformer outputting raw hidden-states.
This model inherits from :class:`~paddlenlp.transformers.model_utils.PretrainedModel`.
Refer to the superclass documentation for the generic methods.
This model is also a Paddle `paddle.Layer <https://www.paddlepaddle.org.cn/documentation
/docs/en/api/paddle/fluid/dygraph/layers/Layer_en.html>`__ subclass. Use it as a regular Paddle Layer
and refer to the Paddle documentation for all matter related to general usage and behavior.
Args:
vocab_size (int):
Vocabulary size of `inputs_ids` in `GAUAlphaModel`. Also is the vocab size of token embedding matrix.
Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling `GAUAlphaModel`.
hidden_size (int, optional):
Dimensionality of the, encoder layers and pooler layer. Defaults to `768`.
intermediate_size (int, optional):
Dimensionality of the feed-forward (ff) layer in the encoder. Input tensors
to ff layers are firstly projected from `hidden_size` to `intermediate_size`,
and then projected back to `hidden_size`. Typically `intermediate_size` is larger than `hidden_size`.
Defaults to `3072`.
num_hidden_layers (int, optional):
Number of hidden layers in the gau_alpha encoder. Defaults to `12`.
max_position_embeddings (int, optional):
The maximum value of the dimensionality of position encoding, which dictates the maximum supported length of an input
sequence. Defaults to `512`.
type_vocab_size (int, optional):
The vocabulary size of `token_type_ids`.
Defaults to `2`.
attention_key_size (int, optional):
The dimensionality of the key used in the gau layer. Defaults to `128`.
norm_eps (float, optional):
The epsilon value used in the normalization layer.
Defaults to `1e-12`.
pad_token_id (int, optional):
The index of padding token in the token vocabulary.
Defaults to `0`.
hidden_dropout_prob (float, optional):
The dropout probability for all fully connected layers in the embeddings and encoder.
Defaults to `0.1`.
attention_probs_dropout_prob (float, optional):
The dropout probability used in gau in all encoder layers to drop some attention target.
Defaults to `0.1`.
hidden_act (str, optional):
The activation function used in gau layer. Defaults to `swish`.
use_bias (bool, optional):
Whether or not use bias.
Defaults to `False`.
normalization (str, optional):
The normalization method used in gau layer.
Defaults to `softmax_plus`.
attention_scale (bool, optional):
Whether or not to scale the attention scores.
Defaults to `True`.
"""
def __init__(
self,
vocab_size=12000,
hidden_size=768,
intermediate_size=1536,
num_hidden_layers=24,
max_position_embeddings=512,
type_vocab_size=2,
attention_key_size=128,
norm_eps=1e-12,
pad_token_id=0,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
hidden_act="swish",
use_bias=False,
normalization="softmax_plus",
attention_scale=True,
):
super(GAUAlphaModel, self).__init__()
self.pad_token_id = pad_token_id
self.norm_eps = norm_eps
self.num_hidden_layers = num_hidden_layers
self.use_bias = use_bias
self.embeddings = GAUAlphaEmbeddings(
vocab_size,
hidden_size,
hidden_dropout_prob,
type_vocab_size,
norm_eps,
)
self.encoder = GAUAlphaEncoder(
num_hidden_layers,
hidden_size,
intermediate_size,
attention_key_size,
hidden_act,
use_bias,
normalization,
attention_scale,
attention_probs_dropout_prob,
hidden_dropout_prob,
norm_eps,
max_position_embeddings,
)
self.apply(self.init_weights)
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None):
r'''
The GAUAlphaModel forward method, overrides the `__call__()` special method.
Args:
input_ids (Tensor):
Indices of input sequence tokens in the vocabulary. They are
numerical representations of tokens that build the input sequence.
Its data type should be `int64` and it has a shape of [batch_size, sequence_length].
attention_mask (Tensor, optional):
Mask used in gau to avoid performing attention on to some unwanted positions,
usually the paddings or the subsequent positions.
Its data type can be int, float and bool.
When the data type is bool, the `masked` tokens have `False` values and the others have `True` values.
When the data type is int, the `masked` tokens have `0` values and the others have `1` values.
It is a tensor with shape broadcasted to `[batch_size, sequence_length, sequence_length]`.
Defaults to `None`, which means nothing needed to be prevented attention to.
token_type_ids (Tensor, optional):
Segment token indices to indicate different portions of the inputs.
Selected in the range ``[0, type_vocab_size - 1]``.
If `type_vocab_size` is 2, which means the inputs have two portions.
Indices can either be 0 or 1:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
Its data type should be `int64` and it has a shape of [batch_size, sequence_length].
Defaults to `None`, which means we don't add segment embeddings.
Returns:
tuple: Returns `last_hidden_state` (Tensor)
Sequence of hidden-states at the last layer of the model.
It's data type should be float32 and its shape is [batch_size, sequence_length, hidden_size].
Example:
.. code-block::
import paddle
from paddlenlp.transformers import GAUAlphaModel, GAUAlphaTokenizer
tokenizer = GAUAlphaTokenizer.from_pretrained('chinese_GAU-alpha-char_L-24_H-768')
model = GAUAlphaModel.from_pretrained('chinese_GAU-alpha-char_L-24_H-768')
inputs = tokenizer("欢迎使用百度飞桨!")
inputs = {k:paddle.to_tensor([v], dtype="int64") for (k, v) in inputs.items()}
last_hidden_state = model(**inputs)
'''
if attention_mask is None:
attention_mask = input_ids != self.pad_token_id
if attention_mask.ndim == 2:
attention_mask = attention_mask.unsqueeze(1) # bs, 1, seqlen
attention_mask = attention_mask.astype(paddle.get_default_dtype())
attention_mask.stop_gradient = True
embedding_output = self.embeddings(
input_ids=input_ids,
token_type_ids=token_type_ids,
)
last_hidden_state = self.encoder(embedding_output,
attention_mask=attention_mask)
return last_hidden_state
class GAUAlphaEmbeddings(Layer):
"""
Include embeddings from word and token_type embeddings
"""
def __init__(
self,
vocab_size,
hidden_size=768,
hidden_dropout_prob=0.1,
type_vocab_size=2,
norm_eps=1e-12,
):
super(GAUAlphaEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(vocab_size, hidden_size)
self.token_type_embeddings = nn.Embedding(type_vocab_size, hidden_size)
self.norm = Norm(norm_eps)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
if token_type_ids is None:
token_type_ids = paddle.zeros_like(input_ids, dtype="int64")
input_embedings = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = input_embedings + token_type_embeddings
embeddings = self.norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class GAUAlphaEncoder(Layer):
def __init__(
self,
num_hidden_layers,
hidden_size,
intermediate_size,
attention_key_size,
hidden_act,
use_bias,
normalization,
attention_scale,
attention_probs_dropout_prob,
hidden_dropout_prob,
norm_eps,
max_position_embeddings,
):
super().__init__()
self.layer = nn.LayerList([
GAULayer(
hidden_size=hidden_size,
intermediate_size=intermediate_size,
attention_key_size=attention_key_size,
activation=hidden_act,
use_bias=use_bias,
normalization=normalization,
attention_scale=attention_scale,
attention_dropout=attention_probs_dropout_prob,
hidden_dropout=hidden_dropout_prob,
norm_eps=norm_eps,
max_position_embeddings=max_position_embeddings,
) for _ in range(num_hidden_layers)
])
def forward(self, hidden_states, attention_mask=None):
for layer_module in self.layer:
hidden_states = layer_module(
hidden_states,
attention_mask,
)
return hidden_states
class GAUAlphaForQuestionAnswering(GAUAlphaPretrainedModel):
"""
GAUAlpha with a linear layer on top of the hidden-states output to compute `span_start_logits`
and `span_end_logits`, designed for question-answering tasks like SQuAD.
Args:
gau_alpha (:class:`GAUAlphaModel`):
An instance of GAUAlphaModel.
dropout (float, optional):
The dropout probability for output of GAUAlpha.
If None, use the same value as `hidden_dropout_prob` of `GAUAlphaModel`
instance `gau_alpha`. Defaults to `None`.
"""
def __init__(self, gau_alpha, dropout=None):
super(GAUAlphaForQuestionAnswering, self).__init__()
self.gau_alpha = gau_alpha
self.dropout = nn.Dropout(dropout if dropout is not None else self.
gau_alpha.config["hidden_dropout_prob"])
self.classifier = nn.Linear(self.gau_alpha.config["hidden_size"], 2)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
r"""
The GAUAlphaForQuestionAnswering forward method, overrides the __call__() special method.
Args:
input_ids (Tensor):
See :class:`GAUAlphaModel`.
token_type_ids (Tensor, optional):
See :class:`GAUAlphaModel`.
attention_mask (Tensor, optional):
See :class:`GAUAlphaModel`.
Returns:
tuple: Returns tuple (`start_logits`, `end_logits`).
With the fields:
- `start_logits` (Tensor):
A tensor of the input token classification logits, indicates the start position of the labelled span.
Its data type should be float32 and its shape is [batch_size, sequence_length].
- `end_logits` (Tensor):
A tensor of the input token classification logits, indicates the end position of the labelled span.
Its data type should be float32 and its shape is [batch_size, sequence_length].
Example:
.. code-block::
import paddle
from paddlenlp.transformers import GAUAlphaForQuestionAnswering, GAUAlphaTokenizer
tokenizer = GAUAlphaTokenizer.from_pretrained('chinese_GAU-alpha-char_L-24_H-768')
model = GAUAlphaForQuestionAnswering.from_pretrained('chinese_GAU-alpha-char_L-24_H-768')
inputs = tokenizer("欢迎使用百度飞桨!")
inputs = {k:paddle.to_tensor([v], dtype="int64") for (k, v) in inputs.items()}
outputs = model(**inputs)
start_logits = outputs[0]
end_logits = outputs[1]
"""
sequence_output = self.gau_alpha(input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask)
logits = self.classifier(sequence_output)
start_logits, end_logits = paddle.unstack(logits, axis=-1)
return start_logits, end_logits
class GAUAlphaForSequenceClassification(GAUAlphaPretrainedModel):
"""
GAUAlpha Model with a linear layer on top of the output layer,
designed for sequence classification/regression tasks like GLUE tasks.
Args:
gau_alpha (`GAUAlphaModel`):
An instance of `paddlenlp.transformers.GAUAlphaModel`.
num_classes (int, optional):
The number of classes. Default to `2`.
dropout (float, optional):
The dropout probability for output of GAUAlpha.
If None, use the same value as `hidden_dropout_prob`
of `paddlenlp.transformers.GAUAlphaModel` instance. Defaults to `None`.
"""
def __init__(self, gau_alpha, num_classes=2, dropout=None):
super(GAUAlphaForSequenceClassification, self).__init__()
self.num_classes = num_classes
self.gau_alpha = gau_alpha
self.dropout = nn.Dropout(dropout if dropout is not None else self.
gau_alpha.config["hidden_dropout_prob"])
self.classifier = nn.Linear(self.gau_alpha.config["hidden_size"],
num_classes)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
r"""
Args:
input_ids (Tensor):
See :class:`GAUAlphaModel`.
token_type_ids (Tensor, optional):
See :class:`GAUAlphaModel`.
attention_mask (Tensor, optional):
See :class:`GAUAlphaModel`.
Returns:
Tensor: Returns tensor `logits`, a tensor of the input text classification logits.
Shape as `[batch_size, num_classes]` and dtype as float32.
Example:
.. code-block::
import paddle
from paddlenlp.transformers import GAUAlphaForSequenceClassification, GAUAlphaTokenizer
tokenizer = GAUAlphaTokenizer.from_pretrained('chinese_GAU-alpha-char_L-24_H-768')
model = GAUAlphaForSequenceClassification.from_pretrained('chinese_GAU-alpha-char_L-24_H-768')
inputs = tokenizer("欢迎使用百度飞桨!")
inputs = {k:paddle.to_tensor([v], dtype="int64") for (k, v) in inputs.items()}
logits = model(**inputs)
"""
sequence_output = self.gau_alpha(input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask)
pooled_output = sequence_output[:, 0]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits
class GAUAlphaForTokenClassification(GAUAlphaPretrainedModel):
"""
GAUAlpha Model with a linear layer on top of the hidden-states output layer,
designed for token classification tasks like NER tasks.
Args:
gau_alpha (`GAUAlphaModel`):
An instance of `paddlenlp.transformers.GAUAlphaModel`.
num_classes (int, optional):
The number of classes. Default to `2`.
dropout (float, optional):
The dropout probability for output of GAUAlpha.
If None, use the same value as `hidden_dropout_prob`
of `paddlenlp.transformers.GAUAlphaModel` instance. Defaults to `None`.
"""
def __init__(self, gau_alpha, num_classes=2, dropout=None):
super(GAUAlphaForTokenClassification, self).__init__()
self.num_classes = num_classes
self.gau_alpha = gau_alpha # allow gau_alpha to be config
self.dropout = nn.Dropout(dropout if dropout is not None else self.
gau_alpha.config["hidden_dropout_prob"])
self.classifier = nn.Linear(self.gau_alpha.config["hidden_size"],
num_classes)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
r"""
Args:
input_ids (Tensor):
See :class:`GAUAlphaModel`.
token_type_ids (Tensor, optional):
See :class:`GAUAlphaModel`.
attention_mask (Tensor, optional):
See :class:`GAUAlphaModel`.
Returns:
Tensor: Returns tensor `logits`, a tensor of the input token classification logits.
Shape as `[batch_size, sequence_length, num_classes]` and dtype as `float32`.
Example:
.. code-block::
import paddle
from paddlenlp.transformers import GAUAlphaForTokenClassification, GAUAlphaTokenizer
tokenizer = GAUAlphaTokenizer.from_pretrained('chinese_GAU-alpha-char_L-24_H-768')
model = GAUAlphaForTokenClassification.from_pretrained('chinese_GAU-alpha-char_L-24_H-768')
inputs = tokenizer("欢迎使用百度飞桨!")
inputs = {k:paddle.to_tensor([v], dtype="int64") for (k, v) in inputs.items()}
logits = model(**inputs)
"""
sequence_output = self.gau_alpha(input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
return logits
class GAUAlphaForMultipleChoice(GAUAlphaPretrainedModel):
"""
GAUAlpha Model with a linear layer on top of the hidden-states output layer,
designed for multiple choice tasks like RocStories/SWAG tasks.
Args:
gau_alpha (:class:`GAUAlphaModel`):
An instance of GAUAlphaModel.
num_choices (int, optional):
The number of choices. Defaults to `2`.
dropout (float, optional):
The dropout probability for output of GAUAlpha.
If None, use the same value as `hidden_dropout_prob` of `GAUAlphaModel`
instance `gau_alpha`. Defaults to None.
"""
def __init__(self, gau_alpha, num_choices=2, dropout=None):
super(GAUAlphaForMultipleChoice, self).__init__()
self.num_choices = num_choices
self.gau_alpha = gau_alpha
self.dropout = nn.Dropout(dropout if dropout is not None else self.
gau_alpha.config["hidden_dropout_prob"])
self.classifier = nn.Linear(self.gau_alpha.config["hidden_size"], 1)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
r"""
The GAUAlphaForMultipleChoice forward method, overrides the __call__() special method.
Args:
input_ids (Tensor):
See :class:`GAUAlphaModel` and shape as [batch_size, num_choice, sequence_length].
token_type_ids(Tensor, optional):
See :class:`GAUAlphaModel` and shape as [batch_size, num_choice, sequence_length].
attention_mask (list, optional):
See :class:`GAUAlphaModel` and shape as [batch_size, num_choice, sequence_length].
Returns:
Tensor: Returns tensor `reshaped_logits`, a tensor of the multiple choice classification logits.
Shape as `[batch_size, num_choice]` and dtype as `float32`.
Example:
.. code-block::
import paddle
from paddlenlp.transformers import GAUAlphaForMultipleChoice, GAUAlphaTokenizer
from paddlenlp.data import Pad
tokenizer = GAUAlphaTokenizer.from_pretrained('chinese_GAU-alpha-char_L-24_H-768')
model = GAUAlphaForMultipleChoice.from_pretrained('chinese_GAU-alpha-char_L-24_H-768', num_choices=2)
data = [
{
"question": "如何打开ipad屏幕?",
"answer1": "按音量按钮。",
"answer2": "按下锁定按钮。",
"label": 1,
},
{
"question": "如何缩进一些文本?",
"answer1": "在开始写之前留一些空格。",
"answer2": "按空格键。",
"label": 0,
},
]
text = []
text_pair = []
for d in data:
text.append(d["question"])
text_pair.append(d["answer1"])
text.append(d["question"])
text_pair.append(d["answer2"])
inputs = tokenizer(text, text_pair)
input_ids = Pad(axis=0, pad_val=tokenizer.pad_token_id)(inputs["input_ids"])
token_type_ids = Pad(axis=0, pad_val=tokenizer.pad_token_type_id)(inputs["token_type_ids"])
reshaped_logits = model(
input_ids=paddle.to_tensor(input_ids, dtype="int64"),
token_type_ids=paddle.to_tensor(token_type_ids, dtype="int64"),
)
print(reshaped_logits.shape)
# [2, 2]
"""
# input_ids: [bs, num_choice, seq_l]
input_ids = input_ids.reshape(
shape=(-1, paddle.shape(input_ids)[-1]
)) # flat_input_ids: [bs*num_choice,seq_l]
if token_type_ids is not None:
token_type_ids = token_type_ids.reshape(
shape=(-1, paddle.shape(token_type_ids)[-1]))
if attention_mask is not None:
attention_mask = attention_mask.reshape(
shape=(-1, paddle.shape(attention_mask)[-1]))
sequence_output = self.gau_alpha(input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask)
pooled_output = sequence_output[:, 0]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output) # logits: (bs*num_choice,1)
reshaped_logits = logits.reshape(
shape=(-1, self.num_choices)) # logits: (bs, num_choice)
return reshaped_logits
class GAUAlphaLMPredictionHead(Layer):
def __init__(self,
hidden_size,
vocab_size,
embedding_weights=None,
use_bias=False):
super(GAUAlphaLMPredictionHead, self).__init__()
self.use_bias = use_bias
self.decoder_weight = (self.create_parameter(
shape=[vocab_size, hidden_size], dtype=self.transform.weight.dtype)
if embedding_weights is None else
embedding_weights)
if use_bias:
self.decoder_bias = self.create_parameter(
shape=[vocab_size],
dtype=self.decoder_weight.dtype,
is_bias=True)
def forward(self, hidden_states):
hidden_states = paddle.matmul(hidden_states,
self.decoder_weight,
transpose_y=True)
if self.use_bias:
hidden_states = hidden_states + self.decoder_bias
return hidden_states
class GAUAlphaForMaskedLM(GAUAlphaPretrainedModel):
"""
GAUAlpha Model with a `masked language modeling` head on top.
Args:
gau_alpha (:class:`GAUAlphaModel`):
An instance of :class:`GAUAlphaModel`.
"""
def __init__(self, gau_alpha):
super(GAUAlphaForMaskedLM, self).__init__()
self.gau_alpha = gau_alpha
self.cls = GAUAlphaLMPredictionHead(
self.gau_alpha.config["hidden_size"],
self.gau_alpha.config["vocab_size"],
embedding_weights=self.gau_alpha.embeddings.word_embeddings.weight,
use_bias=self.gau_alpha.config["use_bias"])
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
r"""
Args:
input_ids (Tensor):
See :class:`GAUAlphaModel`.
token_type_ids (Tensor, optional):
See :class:`GAUAlphaModel`.
attention_mask (Tensor, optional):
See :class:`GAUAlphaModel`.
Returns:
Tensor: Returns tensor `prediction_scores`, The scores of masked token prediction.
Its data type should be float32 and shape is [batch_size, sequence_length, vocab_size].
Example:
.. code-block::
import paddle
from paddlenlp.transformers import GAUAlphaForMaskedLM, GAUAlphaTokenizer
tokenizer = GAUAlphaTokenizer.from_pretrained('chinese_GAU-alpha-char_L-24_H-768')
model = GAUAlphaForMaskedLM.from_pretrained('chinese_GAU-alpha-char_L-24_H-768')
inputs = tokenizer("欢迎使用百度飞桨!")
inputs = {k:paddle.to_tensor([v], dtype="int64") for (k, v) in inputs.items()}
logits = model(**inputs)
print(logits.shape)
# [1, 11, 12000]
"""
sequence_output = self.gau_alpha(input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask)
prediction_scores = self.cls(sequence_output)
return prediction_scores
| 38.751832 | 131 | 0.601681 |
67d5ee64eae1907099136d93d11c7438714c2c4a
| 215 |
py
|
Python
|
__init__.py
|
wangkaibiao/SettlersFinancialData3
|
498249e14f24bfa3186f07e8f66ee624d08c6ff1
|
[
"MIT"
] | null | null | null |
__init__.py
|
wangkaibiao/SettlersFinancialData3
|
498249e14f24bfa3186f07e8f66ee624d08c6ff1
|
[
"MIT"
] | null | null | null |
__init__.py
|
wangkaibiao/SettlersFinancialData3
|
498249e14f24bfa3186f07e8f66ee624d08c6ff1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 26 16:00:46 2019 @author: sfd
"""
import sys,os
sys.path.append(os.getcwd()+"/service")
#os.popen('adb shell mkdir foldername')
| 16.538462 | 50 | 0.609302 |
221784b90744c8ad8addeb3cf5bb8982987ed7d6
| 744 |
py
|
Python
|
tarefas-poo/lista-03/navio/view/paineis/painel_mostra_navio.py
|
victoriaduarte/POO_UFSC
|
0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a
|
[
"MIT"
] | null | null | null |
tarefas-poo/lista-03/navio/view/paineis/painel_mostra_navio.py
|
victoriaduarte/POO_UFSC
|
0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a
|
[
"MIT"
] | null | null | null |
tarefas-poo/lista-03/navio/view/paineis/painel_mostra_navio.py
|
victoriaduarte/POO_UFSC
|
0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a
|
[
"MIT"
] | null | null | null |
# --------------------------
# UFSC - CTC - INE - INE5603
# Exercício do Navio
# --------------------------
# Classe que permite mostra na tela os dados de um navio.
#
class PainelMostraNavio:
def mostre(self, navio):
pt = navio.peso_transportado()
pm = navio.peso_maximo()
perc_t = (pt / pm) * 100
qt = navio.qtd_containers()
capt = navio.capacidade()
perc_qtd = (qt / capt) * 100
print('- - - Mostrar Navio - - -')
print('Peso Transportado (Atual/Máximo/Percentual): {0} / {1} ({2:.2f}%)'.format(pt, pm, perc_t))
print('Qtde Containers Transportados (Atual/Máximo): {0} / {1} ({2:.2f}%))'.format(qt, capt, perc_qtd))
input('Digite ENTER para continuar')
| 33.818182 | 111 | 0.545699 |
1891ed554b7f58dbabc3dfd92bb83f6c6326bc6a
| 1,050 |
py
|
Python
|
making_anagrams.py
|
kautuk-desai/HackerRank
|
6c0b22800ea2e40d118d6a0a5c0ece067a0a69bf
|
[
"MIT"
] | null | null | null |
making_anagrams.py
|
kautuk-desai/HackerRank
|
6c0b22800ea2e40d118d6a0a5c0ece067a0a69bf
|
[
"MIT"
] | null | null | null |
making_anagrams.py
|
kautuk-desai/HackerRank
|
6c0b22800ea2e40d118d6a0a5c0ece067a0a69bf
|
[
"MIT"
] | null | null | null |
def main():
a = input().strip()
b = input().strip()
print(number_needed(a, b))
def number_needed(a, b):
sort_a = sorted(a)
sort_b = sorted(b)
a_len = len(sort_a)
b_len = len(sort_b)
if a_len < 1:
return b_len
elif b_len < 1:
return a_len
else:
num_needed = 0
while a_len > 0 or b_len > 0:
if a_len == 0:
num_needed += b_len
return num_needed
if b_len == 0:
num_needed += a_len
return num_needed
if sort_a[0] < sort_b[0]:
num_needed += 1
del sort_a[0]
a_len = len(sort_a)
elif sort_a[0] > sort_b[0]:
num_needed += 1
del sort_b[0]
b_len = len(sort_b)
else:
del sort_a[0]
a_len = len(sort_a)
del sort_b[0]
b_len = len(sort_b)
return num_needed
if __name__ == '__main__':
main()
| 22.826087 | 39 | 0.444762 |
55e6df57f02b71d6bd2a8eea1d5fd82809606ea3
| 950 |
py
|
Python
|
backend/apps/iamstudent/migrations/0015_custom_migration_timestamps.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | 2 |
2020-03-28T13:56:39.000Z
|
2020-03-29T10:16:12.000Z
|
backend/apps/iamstudent/migrations/0015_custom_migration_timestamps.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | 76 |
2020-03-27T21:53:04.000Z
|
2020-03-30T20:27:43.000Z
|
backend/apps/iamstudent/migrations/0015_custom_migration_timestamps.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | null | null | null |
import logging
from django.db import migrations, transaction
logger = logging.getLogger("django")
def update_emails(apps, schema_editor):
EmailToSend = apps.get_model('iamstudent', 'EmailToSend')
EmailToHospital = apps.get_model('iamstudent', 'EmailToHospital')
for email in EmailToSend.objects.all():
if email.send_date:
logger.warn("email has send_date")
continue
if email.was_sent:
email.send_date = email.registration_date
email.save()
for email in EmailToHospital.objects.all():
if email.send_date:
logger.warn("email has send_date")
continue
email.send_date = email.registration_date
email.save()
class Migration(migrations.Migration):
atomic = False
dependencies = [
('iamstudent', '0015_auto_20200409_1620'),
]
operations = [
migrations.RunPython(update_emails),
]
| 23.170732 | 69 | 0.647368 |
7fe0c2a6b8a10de2335d04924b2ab5a369ba3558
| 3,739 |
py
|
Python
|
src/server/app/endpoints/standby/profile/controllers.py
|
MatthiasRiener/DigiPen
|
9b4aff4a1c431e06d73733dc3dd3f3f3d4631704
|
[
"MIT"
] | 6 |
2020-12-15T18:57:53.000Z
|
2022-02-06T18:54:35.000Z
|
src/server/app/endpoints/standby/profile/controllers.py
|
MatthiasRiener/DigiPen
|
9b4aff4a1c431e06d73733dc3dd3f3f3d4631704
|
[
"MIT"
] | 186 |
2020-11-17T10:18:17.000Z
|
2022-03-02T07:19:22.000Z
|
src/server/app/endpoints/standby/profile/controllers.py
|
MatthiasRiener/DigiPen
|
9b4aff4a1c431e06d73733dc3dd3f3f3d4631704
|
[
"MIT"
] | 1 |
2020-12-14T19:37:30.000Z
|
2020-12-14T19:37:30.000Z
|
from ...db.settings import db, oidc
from flask import Flask, Blueprint, render_template, abort, g, request
from oauth2client.client import OAuth2Credentials
from flask_jwt_extended import (create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt, set_access_cookies, get_jti,
set_refresh_cookies, unset_jwt_cookies, decode_token)
import time
from PIL import Image
from ...models.User import User
from ...repository.AuthenticationRepository import AuthenticationRepository
from ...repository.WorkspaceRepository import WorkspaceRepository
from ...repository.TaskRepository import TaskRepository
from ...repository.PresentationRepository import PresentationRepository
from ...repository.LocationRepository import LocationRepository
import json
import string
import re
from os import path, mkdir
from binascii import a2b_base64
profile = Blueprint("profile", __name__,
static_folder="static", template_folder="templates")
repo = AuthenticationRepository(testing=False)
wRepo = WorkspaceRepository(testing=False)
taskRepo = TaskRepository(testing=False)
presRepo = PresentationRepository(testing=False)
locRepo = LocationRepository(testing=False)
@profile.route('/')
def index():
return render_template('/profile/index.html')
@profile.route('/user')
@jwt_required
def getUserData():
cur_user = get_jwt_identity()
user = repo.retrieveUserWithOutTimeChange(user_id=cur_user)
user.update({"workspaces": wRepo.getRepoCounter(u_id=cur_user)})
user.update({"location": locRepo.getUsersLocation(user_id=cur_user)})
return json.dumps(user)
@profile.route('/getActivityInfo')
@jwt_required
def getActivityRoute():
u_id = get_jwt_identity()
response = dict()
response["presentations"] = presRepo.getPresentationCount(user_id=u_id)
response["tasks"] = taskRepo.getTasksCount(u_id=u_id)
response["organizations"] = wRepo.getRepoCounter(u_id=u_id)
return json.dumps({"res": response})
@profile.route('/getUpComingTasks')
@jwt_required
def getUsersTasksRoute():
u_id = get_jwt_identity()
res = taskRepo.getUpcomingTasks(u_id=u_id)
return json.dumps({"res": res})
@profile.route('/getPresentationCount')
@jwt_required
def getUsersPresentationCountRoute():
u_id = get_jwt_identity()
response = presRepo.getPresentationCount(user_id=u_id)
return json.dumps({"res": response})
@profile.route('/saveDesc', methods=["POST"])
@jwt_required
def updateUsersDescriptionRoute():
u_id = get_jwt_identity()
data = request.form
desc = data["desc"]
res = repo.updateUserDesc(user_id=u_id, desc=desc)
return json.dumps({"res": res})
import urllib
import os
import platform
import base64
import requests
@profile.route('/uploadImage', methods=["POST"])
@jwt_required
def uploadUserImageRoute():
u_id = get_jwt_identity()
data = request.form
name = data["name"]
img_data = data["img"]
last_modified = data["lm"]
file_name = last_modified + "_" + name
## check if directory for user exists, otherwise create
cur_dir = os.getcwd()
cur_dir = cur_dir + "/app/files/static/profile/img"
if not path.exists(cur_dir + "/images/" + u_id):
mkdir(cur_dir + "/images/" + u_id + "/")
with open(cur_dir + "/images/" + u_id + "/" + file_name, 'wb') as img:
print(type(img_data))
print("=====")
response = urllib.request.urlopen(img_data)
img.write(response.file.read())
img.close()
repo.updateUserImg(user_id=u_id, file_name=file_name)
print(img)
return json.dumps({"res": repo.retrieveUserWithOutTimeChange(user_id=u_id)})
| 27.902985 | 176 | 0.72506 |
187eecd48f84c8f1b492fed02beda070c3344b73
| 312 |
py
|
Python
|
randtext.py
|
two-doges/tpf
|
a98f68fba40b3f07ce0ef14f6ca982e915ee3e07
|
[
"MIT"
] | 2 |
2018-03-04T13:36:22.000Z
|
2018-03-04T13:36:33.000Z
|
randtext.py
|
two-doges/tpf
|
a98f68fba40b3f07ce0ef14f6ca982e915ee3e07
|
[
"MIT"
] | null | null | null |
randtext.py
|
two-doges/tpf
|
a98f68fba40b3f07ce0ef14f6ca982e915ee3e07
|
[
"MIT"
] | null | null | null |
import random
import sys
sys.path.append("..")
def get_randtext(len=50):
ans = ""
chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
for i in range(len):
e = random.randint(0,61)
ans += chars[e]
return ans
if __name__=="__main__":
print(get_randtext())
| 22.285714 | 76 | 0.666667 |
a101cc0cc7d2f3e30ced2bd7fea990a1dccc4ca4
| 2,744 |
py
|
Python
|
games/rpg/dqn_controller.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | 2 |
2021-01-07T01:10:49.000Z
|
2022-01-21T09:37:16.000Z
|
games/rpg/dqn_controller.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | null | null | null |
games/rpg/dqn_controller.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | null | null | null |
from envs.rpg.rpg import RPGEnvironment
from envs.rpg.levels.large_tutorial import LargeTutorial
from envs.rpg.levels.small_tutorial import SmallTutorial
from envs.rpg.levels.slash_spike import SlashSpike
import json
import numpy as np
import random
import tensorflow as tf
np.random.seed(1)
random.seed(1)
model_save_path = 'destroy_reward=0.125 vain=-0.125'
json_save_path = '/data.json'
def adjust_matrix(matrix):
return np.flip(np.transpose(matrix), axis=0)
actions_repre = {
0: ["LEFTWARD", "WALK"],
1: ["FORWARD", "WALK"],
2: ["RIGHTWARD", "WALK"],
3: ["BACKWARD", "WALK"],
4: ["LEFTWARD", "JUMP"],
5: ["FORWARD", "JUMP"],
6: ["RIGHTWARD", "JUMP"],
7: ["BACKWARD", "JUMP"],
8: ["LEFTWARD", "SlASH"],
9: ["FORWARD", "SlASH"],
10: ["RIGHTWARD", "SlASH"],
11: ["BACKWARD", "SlASH"],
}
entities_repre = {
1: "SPIKE",
2: "BARRIER",
3: "TREASURE",
4: "ACTOR"
}
def generate_actions_list(actions_history):
actions_list = []
for action in actions_history:
actions_list.append(actions_repre[action])
return actions_list
def generate_entities_list(entities):
entities_list = []
for entity in entities:
entity_data = {"x": entity.position[0],
"y": entity.position[1],
"type": entities_repre[entity.representation]}
entities_list.append(entity_data)
return entities_list
if __name__ == "__main__":
env = RPGEnvironment(SlashSpike)
state = env.reset()
entities = env.world.entities
width = env.world.level_width
height = env.world.level_height
entities_list = generate_entities_list(entities)
print(adjust_matrix(state))
model = tf.keras.models.load_model('../../model/rpg/' + model_save_path)
actions_history = []
game_over = False
while not game_over:
state = tf.expand_dims(tf.constant(state, dtype=tf.float32), -1)
state = state[tf.newaxis, ...]
action = tf.argmax(model.predict(state), axis=1)
actions_history.append(int(action))
new_state, reward, game_over = env.step(RPGEnvironment.Action(int(action)))
state = new_state
print(adjust_matrix(new_state))
print(f'reward: {reward}, game_over: {game_over} time_elapsed: {env.world.time_elapsed}')
print(f'Game is over, reason: {env.world.status}')
print('action history: ', actions_history)
actions_list = generate_actions_list(actions_history)
save_data = {"level": {"width": width, "height": height, "entities": entities_list},
"action": actions_list}
with open('../../model/rpg/' + model_save_path + json_save_path, 'w') as file_obj:
json.dump(save_data, file_obj)
| 30.831461 | 97 | 0.653426 |
a1035620203a965acaa36f37842d4f5e586660cd
| 16,144 |
py
|
Python
|
RDS/circle3_central_services/token_storage/tests/lib/test_2_token_storage.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 10 |
2020-06-24T08:22:24.000Z
|
2022-01-13T16:17:36.000Z
|
RDS/circle3_central_services/token_storage/tests/lib/test_2_token_storage.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 78 |
2020-01-23T14:32:06.000Z
|
2022-03-07T14:11:16.000Z
|
RDS/circle3_central_services/token_storage/tests/lib/test_2_token_storage.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 1 |
2020-06-24T08:33:48.000Z
|
2020-06-24T08:33:48.000Z
|
import unittest
import pytest
from lib.Storage import Storage
from RDS import Token, OAuth2Token, User, LoginService, OAuth2Service, Util
from lib.Exceptions.StorageException import (
UserExistsAlreadyError,
UserHasTokenAlreadyError,
UserNotExistsError,
)
from RDS.ServiceException import (
ServiceNotExistsError,
ServiceExistsAlreadyError,
)
from fakeredis import FakeStrictRedis
def make_test_case(use_redis=False):
def get_opts():
if use_redis:
return {
"rc": FakeStrictRedis(decode_responses=True),
"use_in_memory_on_failure": False,
}
return {"use_in_memory_on_failure": True}
class Test_TokenStorage(unittest.TestCase):
def setUp(self):
Util.monkeypatch()
self.empty_storage = Storage(**get_opts())
self.user1 = User("Max Mustermann")
self.user2 = User("Mimi Mimikri")
self.service1 = LoginService(
servicename="MusterService", implements=["metadata"])
self.service2 = LoginService(
servicename="FahrService", implements=["metadata"])
self.oauthservice1 = OAuth2Service(
servicename="BetonService",
implements=["metadata"],
authorize_url="http://localhost/oauth/authorize",
refresh_url="http://localhost/oauth/token",
client_id="MNO",
client_secret="UVW",
)
self.oauthservice2 = OAuth2Service(
servicename="FlugService",
implements=["metadata"],
authorize_url="http://localhost21/oauth/authorize",
refresh_url="http://localhost21/oauth/token",
client_id="XCA",
client_secret="BCXY",
)
self.empty_storage.addService(self.service1)
self.empty_storage.addService(self.oauthservice1)
self.empty_storage.addService(self.oauthservice2)
self.token1 = Token(self.user1, self.service1, "ABC")
self.token_like_token1 = Token(self.user1, self.service1, "DEF")
self.token2 = Token(self.user1, self.oauthservice1, "XYZ")
self.token3 = Token(self.user2, self.service2, "XASD")
self.token4 = Token(self.user2, self.service1, "IOAJSD")
self.oauthtoken1 = OAuth2Token(
self.user1, self.oauthservice1, "ABC", "X_ABC"
)
self.oauthtoken_like_token1 = OAuth2Token(
self.user1, self.oauthservice1, "ABC", "X_DEF"
)
self.oauthtoken2 = OAuth2Token(
self.user1, self.oauthservice1, "XYZ", "X_XYZ"
)
self.oauthtoken3 = OAuth2Token(
self.user1, self.oauthservice2, "XYZ", "X_XYZ"
)
def test_storage_listUser(self):
empty_storage = Storage(**get_opts())
self.assertEqual(empty_storage.getUsers(), [])
empty_storage.addUser(self.user1)
self.assertEqual(empty_storage.getUsers(), [self.user1])
empty_storage.addUser(self.user2)
self.assertEqual(empty_storage.getUsers(),
[self.user1, self.user2])
# should raise an Exception, if user already there
with self.assertRaises(
UserExistsAlreadyError, msg=f"Storage {empty_storage}"
):
empty_storage.addUser(self.user1)
def test_tokenstorage_add_service(self):
empty_storage = Storage(**get_opts())
empty_storage.addUser(self.user1)
# test the exception raise
with self.assertRaises(ServiceNotExistsError):
empty_storage.addTokenToUser(self.token1, self.user1)
# now should work
empty_storage.addService(self.service1)
empty_storage.addTokenToUser(self.token1, self.user1)
self.assertEqual(empty_storage.getTokens(
self.user1), [self.token1])
with self.assertRaises(ServiceExistsAlreadyError):
self.empty_storage.addService(self.service1)
def test_storage_getUser_getToken(self):
empty_storage = Storage(**get_opts())
with self.assertRaises(UserNotExistsError):
empty_storage.getUser(self.user1.username)
with self.assertRaises(UserNotExistsError):
empty_storage.getTokens(self.user1.username)
empty_storage.addUser(self.user1)
empty_storage.addService(self.service1)
empty_storage.addTokenToUser(self.token1, self.user1)
self.assertEqual(empty_storage.getUser(
self.user1.username), self.user1)
self.assertEqual(
empty_storage.getTokens(self.user1.username), [self.token1]
)
self.assertEqual(
empty_storage.getToken(self.user1.username, 0), self.token1
)
self.assertEqual(empty_storage.getTokens(
self.user1), [self.token1])
empty_storage.addUser(self.user2)
empty_storage.addService(self.service2)
empty_storage.addTokenToUser(self.token3, self.user2)
self.assertEqual(empty_storage.getUser(
self.user2.username), self.user2)
self.assertEqual(empty_storage.getUser(
self.user1.username), self.user1)
self.assertEqual(
empty_storage.getToken(self.user2.username, 0), self.token3
)
self.assertEqual(
empty_storage.getToken(
self.user1.username, self.token1.servicename),
self.token1,
)
self.assertEqual(
empty_storage.getToken(
self.user2.username, self.token3.servicename),
self.token3,
)
empty_storage.addTokenToUser(self.token4, self.user2)
self.assertEqual(
empty_storage.getToken(
self.user2.username, self.token4.servicename),
self.token4,
)
def test_tokenstorage_add_user(self):
# raise an exception, if a user not exist for token
with self.assertRaises(
UserNotExistsError, msg=f"Storage {self.empty_storage}"
):
self.empty_storage.addTokenToUser(self.token1, self.user1)
# add one user, so in storage should be one
expected = {"Max Mustermann": {"data": self.user1, "tokens": []}}
self.empty_storage.addUser(self.user1)
self.assertEqual(
self.empty_storage._storage,
expected,
msg=f"Storage {self.empty_storage}",
)
# should raise an Exception, if user already there
with self.assertRaises(
UserExistsAlreadyError, msg=f"Storage {self.empty_storage}"
):
self.empty_storage.addUser(self.user1)
# add token to user
expected[self.user1.username]["tokens"].append(self.token1)
self.empty_storage.addTokenToUser(self.token1, self.user1)
self.assertEqual(
self.empty_storage._storage,
expected,
msg=f"Storage {self.empty_storage}",
)
# raise an exception, if token already there
with self.assertRaises(
UserHasTokenAlreadyError, msg=f"Storage {self.empty_storage}"
):
self.empty_storage.addTokenToUser(self.token1, self.user1)
def setUpRemove(self):
# setUp
self.empty_storage.addUser(self.user1)
self.empty_storage.addUser(self.user2)
def test_tokenstorage_remove_user(self):
self.setUpRemove()
expected = {}
expected[self.user1.username] = {"data": self.user1, "tokens": []}
expected[self.user2.username] = {"data": self.user2, "tokens": []}
# remove user
self.empty_storage.removeUser(self.user1)
del expected[self.user1.username]
self.assertEqual(self.empty_storage._storage, expected)
with self.assertRaises(UserNotExistsError):
self.empty_storage.removeUser(self.user1)
self.empty_storage.removeUser(self.user2)
del expected[self.user2.username]
self.assertEqual(self.empty_storage._storage, expected)
# storage now empty
self.assertEqual(self.empty_storage.getUsers(), [])
def test_tokenstorage_add_token_force(self):
# add Token to not existing user with force
expected = {"Max Mustermann": {
"data": self.user1, "tokens": [self.token1]}}
self.empty_storage.addTokenToUser(
self.token1, self.user1, Force=True)
self.assertEqual(
self.empty_storage._storage,
expected,
msg=f"Storage {self.empty_storage}",
)
# now overwrite the already existing token with force
expected[self.user1.username]["tokens"][0] = self.token_like_token1
self.empty_storage.addTokenToUser(
self.token_like_token1, self.user1, Force=True
)
self.assertEqual(
self.empty_storage._storage,
expected,
msg=f"Storage {self.empty_storage}",
)
def test_tokenstorage_oauthtokens_add_user(self):
# empty storage
self.assertEqual(self.empty_storage._storage, {})
# raise an exception, if a user not exist for token
with self.assertRaises(
UserNotExistsError, msg=f"Storage {self.empty_storage}"
):
self.empty_storage.addTokenToUser(self.oauthtoken1, self.user1)
# add one user, so in storage should be one
expected = {"Max Mustermann": {"data": self.user1, "tokens": []}}
self.empty_storage.addUser(self.user1)
self.assertEqual(
self.empty_storage._storage,
expected,
msg=f"Storage {self.empty_storage}",
)
# should raise an Exception, if user already there
with self.assertRaises(
UserExistsAlreadyError, msg=f"Storage {self.empty_storage}"
):
self.empty_storage.addUser(self.user1)
# add token to user
expected[self.user1.username]["tokens"].append(self.oauthtoken1)
self.empty_storage.addTokenToUser(self.oauthtoken1, self.user1)
self.assertEqual(
self.empty_storage._storage,
expected,
msg=f"Storage {self.empty_storage}",
)
# raise an exception, if token already there
with self.assertRaises(
UserHasTokenAlreadyError, msg=f"Storage {self.empty_storage}"
):
self.empty_storage.addTokenToUser(self.oauthtoken1, self.user1)
def test_tokenstorage_oauthtokens_add_token_force(self):
# add Token to not existing user with force
expected = {
"Max Mustermann": {"data": self.user1, "tokens": [self.oauthtoken1]}
}
self.empty_storage.addTokenToUser(
self.oauthtoken1, self.user1, Force=True)
self.assertEqual(
self.empty_storage._storage,
expected,
msg=f"Storage {self.empty_storage}",
)
# now overwrite the already existing token with force
expected[self.user1.username]["tokens"][0] = self.oauthtoken_like_token1
self.empty_storage.addTokenToUser(
self.oauthtoken_like_token1, self.user1, Force=True
)
self.assertEqual(
self.empty_storage._storage,
expected,
msg=f"\nStorage: {self.empty_storage._storage}\n expected: {expected}",
)
def test_tokenstorage_tokens_under_user(self):
oauthtoken1 = OAuth2Token(
self.user1, self.oauthservice1, "ABC", "X_ABC")
self.empty_storage.addTokenToUser(
oauthtoken1, self.user1, Force=True)
oauthtoken2 = OAuth2Token(
self.user1, self.oauthservice2, "XYZ", "X_XYZ")
self.empty_storage.addTokenToUser(
oauthtoken2, self.user1, Force=True)
token1 = Token(self.user1, self.service2, "ISADF")
with self.assertRaises(ServiceNotExistsError):
self.empty_storage.addTokenToUser(
token1, self.user1, Force=True)
self.empty_storage.addTokenToUser(
self.token1, self.user1, Force=True)
def test_tokenstorage_service_implementstype(self):
empty_storage = Storage(**get_opts())
service = LoginService(servicename="longname", implements=[
"fileStorage", "metadata"])
empty_storage.addUser(self.user1)
token1 = Token(self.user1, service, "ISADF")
# test the exception raise
with self.assertRaises(ServiceNotExistsError):
empty_storage.addTokenToUser(token1, self.user1)
# now should work
self.assertTrue(empty_storage.addService(service))
self.assertTrue(empty_storage.addTokenToUser(token1, self.user1))
self.assertEqual(empty_storage.getTokens(self.user1), [token1])
with self.assertRaises(ServiceExistsAlreadyError):
empty_storage.addService(service)
def test_tokenstorage_remove_mastertoken(self):
expected = {
self.user1.username: {"data": self.user1,
"tokens": [self.oauthtoken1]}
}
self.empty_storage.addTokenToUser(
self.oauthtoken1, self.user1, Force=True)
self.assertEqual(
self.empty_storage._storage,
expected,
msg=f"Storage {self.empty_storage}",
)
expected[self.user1.username]["tokens"].append(self.oauthtoken3)
self.empty_storage.addTokenToUser(self.oauthtoken3, self.user1)
self.assertEqual(
self.empty_storage._storage,
expected,
msg=f"Storage {self.empty_storage}",
)
self.empty_storage.removeToken(self.user1, self.oauthtoken1)
self.assertEqual(self.empty_storage.storage, {})
def test_tokenstorage_remove_token(self):
expected = {
self.user1.username: {"data": self.user1,
"tokens": [self.oauthtoken1]}
}
self.empty_storage.addTokenToUser(
self.oauthtoken1, self.user1, Force=True)
self.assertEqual(
self.empty_storage._storage,
expected,
msg=f"Storage {self.empty_storage}",
)
expected[self.user1.username]["tokens"].append(self.oauthtoken3)
self.empty_storage.addTokenToUser(self.oauthtoken3, self.user1)
self.assertEqual(
self.empty_storage._storage,
expected,
msg=f"Storage {self.empty_storage}",
)
del expected[self.user1.username]["tokens"][1]
self.empty_storage.removeToken(self.user1, self.oauthtoken3)
self.assertEqual(self.empty_storage.storage, expected)
return Test_TokenStorage
class StorageTestCase(make_test_case()):
pass
class StorageRedisBackedTestCase(make_test_case(use_redis=True)):
pass
| 37.457077 | 87 | 0.579286 |
b820b1c606e6a2cb29de80253c10073661d546a3
| 9,231 |
py
|
Python
|
wz/backend/interface_template_fields.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
wz/backend/interface_template_fields.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
wz/backend/interface_template_fields.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
backend/interface_template_fields.py - last updated 2021-05-22
Controller/dispatcher for the template-filler module.
==============================
Copyright 2021 Michael Towers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
### Messages
_NO_SUBSTITUTIONS = "Keine Felder werden ersetzt: Keine Datei wird erstellt."
_DONE_ODT = "Neue Datei erstellt:\n {fodt}"
_DONE_PDF = "Neue Dateien erstellt:\n {fodt}\n {fpdf}"
_DONE_SHOW = "Zwischendateien gelöscht"
################################################
import os
from core.base import Dates, DataError
from core.pupils import PUPILS
from local.field_handlers import FieldMap, FieldHandlerError, \
EmptyField
from template_engine.template_sub import Template, TemplateError
### +++++
class Template_Filler:
template = None
#
@staticmethod
def get_classes():
pupils = PUPILS(SCHOOLYEAR)
class_list = pupils.classes()
class_list.reverse() # start with the highest classes
CALLBACK('template_SET_CLASSES', classes = class_list)
return True
#
@staticmethod
def set_class(klass):
pupils = PUPILS(SCHOOLYEAR)
plist = [('', '–––')] + [(pdata['PID'], pupils.name(pdata))
for pdata in pupils.class_pupils(klass)]
CALLBACK('template_SET_PUPILS', pupil_list = plist)
return True
#
@classmethod
def force_template(cls, path):
"""FOR TESTING: preselect a template.
<path> is relative to the templates folder, e.g. 'Noten/SekI'.
"""
fpath = os.path.join(RESOURCES, 'templates', *path.split('/'))
cls.set_template(fpath)
return True
#
@staticmethod
def get_template_dir():
"""Fetch the path to a template file.
The RESOURCES folder is searched to find suitable files.
These have certain metadata fields set:
'title': must start with 'WZ-template'
'subject': short description
This allows a somewhat documented list/tree of available templates
to be shown.
"""
startpath = os.path.join(RESOURCES, 'templates')
data = []
for (root, dirs, files) in os.walk(startpath):
tfiles = []
for f in files:
if f.endswith('.odt'):
try:
t = Template(os.path.join(root, f), full_path = True)
except:
# Not a template file
continue
_meta = t.metadata()
try:
title = _meta['title']
except KeyError:
continue
if title.startswith('WZ-template'):
tfiles.append('%s:: %s' % (f, _meta['subject']))
if tfiles:
tfiles.sort()
data.append((root, tfiles))
data.sort()
CALLBACK('template_CHOOSE_TEMPLATE', templates = data)
return True
#
@classmethod
def set_template(cls, template_path):
cls.template = Template(template_path, full_path = True)
### Get template fields: [(field, style or <None>), ...]
fields_style = cls.template.fields()
# The fields are in order of appearance in the template file,
# keys may be present more than once!
# The style is only present for fields which are alone within a
# paragraph. This is a prerequisite for an entry with multiple
# lines – if an entry has line-breaks but no style, the generator
# will raise an Exception (TemplateError).
### Count number of appearances, reduce to single entries
_fields = {}
for f, s in fields_style:
try:
_fields[f] += 1
except KeyError:
_fields[f] = 1
### Get field information from the template file.
# This tells us how to handle certain fields.
# There can be "selections", for example, a list of permissible
# values for a particular template field.
try:
cls.field_map = FieldMap({})
cls.field_map.add_handlers(
cls.template.metadata().get('FIELD_INFO'))
# Order the fields so that dependencies come before the fields
# that need them:
cls.fields, deps = cls.field_map.sort_dependencies(_fields)
# One problem now is to distinguish between dependent fields
# with only internal dependencies and those with (also)
# external dependencies. The former are non-editable, for
# the latter an editor must be provided.
field_info = []
selects = {} # collect used "selects" with python list values
for field in cls.fields:
text, n = field, _fields[field]
if n != 1:
text += f' (*{n})'
sel = cls.field_map.selection(field)
if sel == 'LINE':
validation = 'LINE'
elif sel == 'DATE':
validation = 'DATE'
elif sel == 'TEXT':
validation = 'TEXT'
elif not sel:
# Field not writeable
continue
else:
# It must be a selection list/map
validation, slist = sel
if validation not in selects:
selects[validation] = slist
cls.field_map[field] = NONE
field_info.append((field, text, validation))
CALLBACK('template_SET_FIELDS', path = cls.template.template_path,
fields = field_info, selects = selects)
except FieldHandlerError as e:
REPORT('ERROR', str(e))
return False
return True
#
@classmethod
def renew(cls, klass, pid):
### Initial fields
field_values = {
'SCHOOLYEAR': SCHOOLYEAR,
'SCHOOL': SCHOOL_DATA['SCHOOL_NAME']
}
if klass:
field_values['CLASS'] = klass
if pid:
# This could (perhaps ...) change CLASS
field_values.update(PUPILS(SCHOOLYEAR)[pid])
for f in cls.field_map:
# Collect internal values for all editable fields
cls.field_map[f] = field_values.get(f) or ''
CALLBACK('template_RENEW', field_values = cls.field_map)
return True
#
@classmethod
def value_changed(cls, field, value):
cls.field_map[field] = value
CALLBACK('template_NEW_VALUE', field = field, value = value)
return True
#
@classmethod
def all_fields(cls, null_empty):
"""Prepare all fields for entry into the template.
The <exec_> methods are called (so far as they exist) to
perform all necessary processing.
If <null_empty> is true, empty fields will not be substituted,
allowing for partial template filling.
"""
fmap = {}
for f in cls.fields:
try:
val = cls.field_map.exec_(f,
value = cls.field_map.get(f) or '',
trap_empty = null_empty)
except EmptyField:
continue
except FieldHandlerError as e:
REPORT('ERROR', str(e))
continue
fmap[f] = val
# A tweak to handle '|' in last-names ...
try:
fmap['LASTNAME'] = fmap['LASTNAME'].replace('|', ' ')
except:
pass
return fmap
#
@classmethod
def gen_doc(cls, filename, null_empty):
fieldmap = cls.all_fields(null_empty)
if fieldmap:
cls.template.show(fieldmap, filename)
return True
REPORT('WARN', _NO_SUBSTITUTIONS)
return False
#
@classmethod
def show(cls):
cls.template.show({})
return True
########################################################################
def init():
FUNCTIONS['TEMPLATE_get_classes'] = Template_Filler.get_classes
FUNCTIONS['TEMPLATE_set_class'] = Template_Filler.set_class
FUNCTIONS['TEMPLATE_get_template_dir'] = Template_Filler.get_template_dir
FUNCTIONS['TEMPLATE_force_template'] = Template_Filler.force_template
FUNCTIONS['TEMPLATE_set_template'] = Template_Filler.set_template
FUNCTIONS['TEMPLATE_renew'] = Template_Filler.renew
FUNCTIONS['TEMPLATE_gen_doc'] = Template_Filler.gen_doc
FUNCTIONS['TEMPLATE_show'] = Template_Filler.show
FUNCTIONS['TEMPLATE_value_changed'] = Template_Filler.value_changed
| 37.677551 | 78 | 0.573936 |
c5131f90d418026cde9ec65d15c9fa1a69478a75
| 6,034 |
py
|
Python
|
oneflow/python/framework/local_blob.py
|
caishenghang/oneflow
|
db239cc9f98e551823bf6ce2d4395bd5c339b1c5
|
[
"Apache-2.0"
] | 2 |
2021-09-10T00:19:49.000Z
|
2021-11-16T11:27:20.000Z
|
oneflow/python/framework/local_blob.py
|
duijiudanggecl/oneflow
|
d2096ae14cf847509394a3b717021e2bd1d72f62
|
[
"Apache-2.0"
] | null | null | null |
oneflow/python/framework/local_blob.py
|
duijiudanggecl/oneflow
|
d2096ae14cf847509394a3b717021e2bd1d72f62
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import numpy as np
import oneflow.python.framework.remote_blob as remote_blob_util
import traceback
class LocalMirroredTensor(object):
def __init__(self, ndarray_list, is_dynamic, concat_axis=None):
self.ndarray_list_ = ndarray_list
self.is_dynamic_ = is_dynamic
self.concat_axis_ = concat_axis
self.ndarray_ = None
if not is_dynamic:
if len(self.ndarray_list_) == 1:
self.ndarray_ = self.ndarray_list_[0]
elif concat_axis is not None:
self.ndarray_ = np.concatenate(self.ndarray_list_, axis=concat_axis)
else:
# do nothing
pass
@property
def is_dynamic(self):
return self.is_dynamic_
def ndarray_list(self):
print(
"WARNING:",
"LocalMirroredTensor.ndarray_list is deprecated, please use LocalMirroredTensor.numpy_list\n",
traceback.format_stack()[-2],
)
return self.numpy_list()
def numpy_list(self):
return self.ndarray_list_
def ndarray(self):
print(
"WARNING:",
"LocalMirroredTensor.ndarray is deprecated, please use LocalMirroredTensor.numpy\n",
traceback.format_stack()[-2],
)
return self.numpy()
def numpy(self, parallel_id=None):
if parallel_id is None:
assert self.ndarray_ is not None
return self.ndarray_
else:
assert parallel_id >= 0
assert len(self.ndarray_list_) > parallel_id
return self.ndarray_list_[parallel_id]
def parallel_num(self):
return len(self.ndarray_list_)
def __getattr__(self, attr):
return getattr(self.numpy(), attr)
class LocalMirroredTensorList(object):
def __init__(self, ndarray_lists=None):
assert isinstance(ndarray_lists, (list, tuple))
for ndarray_list in ndarray_lists:
assert isinstance(ndarray_list, (list, tuple))
assert all(isinstance(ndarray, np.ndarray) for ndarray in ndarray_list)
self.ndarray_lists_ = ndarray_lists
def ndarray_lists(self):
print(
"WARNING:",
"LocalMirroredTensorList.ndarray_lists is deprecated, please use LocalMirroredTensorList.numpy_lists",
)
return self.numpy_lists()
def numpy_lists(self):
return self.ndarray_lists_
def numpy_list(self, parallel_id=None):
if parallel_id is None:
assert len(self.ndarray_lists_) == 0
return self.ndarray_lists_[0]
else:
assert parallel_id >= 0
assert len(self.ndarray_lists_) > parallel_id
return self.ndarray_lists_[parallel_id]
def parallel_num():
return len(self.ndarray_lists_)
def MakeLocalBlob(ndarray_lists, consistent_blob):
assert isinstance(consistent_blob, remote_blob_util.ConsistentBlob), type(
consistent_blob
)
if consistent_blob.is_tensor_list:
return LocalMirroredTensorList(ndarray_lists)
assert len(ndarray_lists) == 1
return LocalMirroredTensor(
ndarray_lists[0],
is_dynamic=consistent_blob.is_dynamic,
concat_axis=consistent_blob.split_axis,
)
def MergeLocalBlobs(local_blob_list, mirrored_blob):
assert isinstance(mirrored_blob, remote_blob_util.MirroredBlob)
if mirrored_blob.is_tensor_list:
for local_blob in local_blob_list:
assert type(local_blob) is LocalMirroredTensorList
return LocalMirroredTensorList([x.numpy_lists()[0] for x in local_blob_list])
return LocalMirroredTensor(
[x.numpy_list()[0] for x in local_blob_list],
is_dynamic=mirrored_blob.is_dynamic,
concat_axis=mirrored_blob.batch_axis,
)
def MakeLocalBlob4EagerBlob(eager_blob):
assert isinstance(eager_blob, remote_blob_util.EagerBlobTrait)
if eager_blob.is_tensor_list:
return LocalMirroredTensorList(eager_blob.numpy_list())
elif isinstance(eager_blob, remote_blob_util.EagerMirroredBlob):
return LocalMirroredTensor(
[eager_blob.numpy(i) for i in range(eager_blob.numpy_size())],
is_dynamic=eager_blob.is_dynamic,
concat_axis=eager_blob.batch_axis,
)
elif isinstance(eager_blob, remote_blob_util.EagerConsistentBlob):
return LocalMirroredTensor(
[eager_blob.numpy()], is_dynamic=False, concat_axis=0
)
else:
raise NotImplementedError
non_override_field = set(
[
"__class__",
"__doc__",
"__new__",
"__init__",
"__del__",
"__call__",
"__getattr__",
"__getattribute__",
"__setattr__",
"__delattr__",
"__dir__",
"__get__",
"__set__",
"__delete__",
]
)
def MakeBlobMethod(field_name):
def ConvertOtherArgs(args):
return [x.numpy() if isinstance(x, LocalMirroredTensor) else x for x in args]
return lambda self, *args: getattr(self.numpy(), field_name)(
*ConvertOtherArgs(args)
)
for field_name in dir(np.ndarray):
if field_name.startswith("__") == False:
continue
if field_name in non_override_field:
continue
if hasattr(LocalMirroredTensor, field_name) == False:
setattr(LocalMirroredTensor, field_name, MakeBlobMethod(field_name))
| 31.925926 | 114 | 0.668876 |
c5232a4d920d5c1791cb0ad922e8bc20c21d5189
| 180 |
py
|
Python
|
exercises/pt/test_03_14_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/pt/test_03_14_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/pt/test_03_14_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
def test():
assert (
"docs = list(nlp.pipe(TEXTS))" in __solution__
), "Você está usando nlp.pipe envolvido em uma lista (list)?"
__msg__.good("Bom trabalho!")
| 30 | 65 | 0.627778 |
c548c530afdc4e3d2859ad0c7fcd997ddf2e643f
| 294 |
py
|
Python
|
exercises/de/solution_02_02_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | 2 |
2020-07-07T01:46:37.000Z
|
2021-04-20T03:19:43.000Z
|
exercises/de/solution_02_02_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/de/solution_02_02_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
import spacy
nlp = spacy.blank("de")
doc = nlp("Ich habe eine Katze")
# Schlage den Hash für das Wort "Katze" nach
katze_hash = nlp.vocab.strings["Katze"]
print(katze_hash)
# Schlage katze_hash nach, um den String zu erhalten
katze_string = nlp.vocab.strings[katze_hash]
print(katze_string)
| 22.615385 | 52 | 0.755102 |
b8e16f929bbe355b7be5c0f425ce37c137ec3151
| 198 |
py
|
Python
|
turngen/ai_base.py
|
amrohendawi/AlphaZero-implementation
|
42103e63308ba256208b6dd6ddcbef2e797e9932
|
[
"MIT"
] | null | null | null |
turngen/ai_base.py
|
amrohendawi/AlphaZero-implementation
|
42103e63308ba256208b6dd6ddcbef2e797e9932
|
[
"MIT"
] | null | null | null |
turngen/ai_base.py
|
amrohendawi/AlphaZero-implementation
|
42103e63308ba256208b6dd6ddcbef2e797e9932
|
[
"MIT"
] | null | null | null |
from turn import Turn
class ai_base:
# color is 0 for white or 1 for black
def __init__(self, player):
self.player = player
def calculateTurn(self, board):
return Turn(0,0,1,1,True)
| 16.5 | 38 | 0.691919 |
626d9d546eff99796b7917e2bf088b26e0bd4636
| 1,541 |
py
|
Python
|
source/cloudsync.py
|
marctrommen/bloggenerator
|
5b4fed4cd12a899829261d492328dfc39a78153d
|
[
"MIT"
] | null | null | null |
source/cloudsync.py
|
marctrommen/bloggenerator
|
5b4fed4cd12a899829261d492328dfc39a78153d
|
[
"MIT"
] | null | null | null |
source/cloudsync.py
|
marctrommen/bloggenerator
|
5b4fed4cd12a899829261d492328dfc39a78153d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import filecmp
import subprocess
import os
import shutil
class CloudSync(object):
def __init__(self):
self.__cloud_mountpoint = '/media/marco/gmx'
def __mount_cloud(self):
command = ['mount', self.__cloud_mountpoint]
error_code = -1
with subprocess.Popen(command) as process:
error_code = process.wait()
return error_code == 0
def __unmount_cloud(self):
command = ['umount', self.__cloud_mountpoint]
error_code = -1
with subprocess.Popen(command) as process:
error_code = process.wait()
return error_code == 0
def sync(self, rel_cloud_dir, abs_local_dir, from_cloud_to_local = True):
"""synchronise """
has_changes = False
if self.__mount_cloud():
cloud_dir = os.path.join(self.__cloud_mountpoint, rel_cloud_dir)
if from_cloud_to_local:
from_dir = cloud_dir
to_dir = abs_local_dir
else:
from_dir = abs_local_dir
to_dir = cloud_dir
result = filecmp.dircmp(from_dir, to_dir)
# copy all new files from from_dir to to_dir
for new_file in result.left_only:
has_changes = True
from_file = os.path.join(from_dir, new_file)
shutil.copy(from_file, to_dir)
# copy all changed files from cloud_dir to local_dir
for changed_file in result.diff_files:
has_changes = True
from_file = os.path.join(from_dir, changed_file)
shutil.copy(from_file, to_dir)
has_changed = self.__unmount_cloud() and has_changes
else:
self.__unmount_cloud()
return has_changes
| 22.661765 | 74 | 0.707982 |
b23874081a22d246c0909a594345b7af818fd9b2
| 2,050 |
py
|
Python
|
tools/pythonpkg/tests/fast/arrow/test_arrow_replacement_scan.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 2,816 |
2018-06-26T18:52:52.000Z
|
2021-04-06T10:39:15.000Z
|
tools/pythonpkg/tests/fast/arrow/test_arrow_replacement_scan.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 1,310 |
2021-04-06T16:04:52.000Z
|
2022-03-31T13:52:53.000Z
|
tools/pythonpkg/tests/fast/arrow/test_arrow_replacement_scan.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 270 |
2021-04-09T06:18:28.000Z
|
2022-03-31T11:55:37.000Z
|
import duckdb
import pytest
import os
import pandas as pd
try:
import pyarrow.parquet as pq
import pyarrow.dataset as ds
can_run = True
except:
can_run = False
class TestArrowReplacementScan(object):
def test_arrow_table_replacement_scan(self, duckdb_cursor):
if not can_run:
return
parquet_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),'data','userdata1.parquet')
userdata_parquet_table = pq.read_table(parquet_filename)
df = userdata_parquet_table.to_pandas()
con = duckdb.connect()
for i in range (5):
assert con.execute("select count(*) from userdata_parquet_table").fetchone() == (1000,)
assert con.execute("select count(*) from df").fetchone() == (1000,)
def test_arrow_table_replacement_scan_view(self, duckdb_cursor):
if not can_run:
return
parquet_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),'data','userdata1.parquet')
userdata_parquet_table = pq.read_table(parquet_filename)
con = duckdb.connect()
con.execute("create view x as select * from userdata_parquet_table")
del userdata_parquet_table
with pytest.raises(Exception):
assert con.execute("select count(*) from x").fetchone()
def test_arrow_dataset_replacement_scan(self, duckdb_cursor):
if not can_run:
return
parquet_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),'data','userdata1.parquet')
userdata_parquet_table = pq.read_table(parquet_filename)
userdata_parquet_dataset= ds.dataset(parquet_filename)
con = duckdb.connect()
assert con.execute("select count(*) from userdata_parquet_dataset").fetchone() == (1000,)
def test_replacement_scan_fail(self, duckdb_cursor):
random_object = "I love salmiak rondos"
with pytest.raises(Exception):
con.execute("select count(*) from random_object").fetchone()
| 37.962963 | 111 | 0.677073 |
b27d485d469b532a016ac931b3c92df6632581ed
| 123 |
py
|
Python
|
group3-module1-tobechukwu/Tasks/Python Image resize/hello.py
|
cornelia247/cil-internship-cohort-01
|
b8184337056d378eab16d26b40b26ed58cd177bb
|
[
"MIT"
] | null | null | null |
group3-module1-tobechukwu/Tasks/Python Image resize/hello.py
|
cornelia247/cil-internship-cohort-01
|
b8184337056d378eab16d26b40b26ed58cd177bb
|
[
"MIT"
] | null | null | null |
group3-module1-tobechukwu/Tasks/Python Image resize/hello.py
|
cornelia247/cil-internship-cohort-01
|
b8184337056d378eab16d26b40b26ed58cd177bb
|
[
"MIT"
] | null | null | null |
from PIL import Image
img = Image.open("img_resize.jpg")
re_img = img.resize((700,300))
re_img.show()
#img.show()
| 15.375 | 35 | 0.658537 |
a74123f1653a55720b5e0e3853a156d6a77cb057
| 420 |
py
|
Python
|
Project Euler Qusetions 51 - 60/Project Euler Question 53.py
|
Clayton-Threm/Coding-Practice
|
6671e8a15f9e797338caa617dae45093f4157bc1
|
[
"MIT"
] | 1 |
2020-02-11T02:03:02.000Z
|
2020-02-11T02:03:02.000Z
|
Project Euler Qusetions 51 - 60/Project Euler Question 53.py
|
Clayton-Threm/Coding-Practice
|
6671e8a15f9e797338caa617dae45093f4157bc1
|
[
"MIT"
] | null | null | null |
Project Euler Qusetions 51 - 60/Project Euler Question 53.py
|
Clayton-Threm/Coding-Practice
|
6671e8a15f9e797338caa617dae45093f4157bc1
|
[
"MIT"
] | null | null | null |
#Project Euler Question 53
#Combinatoric selections
def factorial(x):
if x > 1:
return x * factorial(x - 1)
else:
return 1
comb_list = []
for x in range(1, 101):
for y in range(1, 101):
if y > x:
break
combinations = int(factorial(x) / (factorial(y) * factorial(x - y)))
if combinations > 1000000:
comb_list.append(x)
print (len(comb_list))
| 21 | 76 | 0.566667 |
a7ae8f8227992cf8efc320728d75748c7ccf1b57
| 1,777 |
py
|
Python
|
stphohapp/views.py
|
itechtian/stphoh
|
5a778deece3a690e0ec92c799b0b87ba26f1892c
|
[
"MIT"
] | null | null | null |
stphohapp/views.py
|
itechtian/stphoh
|
5a778deece3a690e0ec92c799b0b87ba26f1892c
|
[
"MIT"
] | null | null | null |
stphohapp/views.py
|
itechtian/stphoh
|
5a778deece3a690e0ec92c799b0b87ba26f1892c
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, reverse
from django.http import HttpResponseRedirect
from .models import Patient, Result
def admin_view(request):
context = {'Patient':Patient.objects.all()}
return render(request, "adminbase.html", context)
def hiv_test(request):
if request.method == "POST":
first = request.POST.get("first")
last = request.POST.get("last")
age = request.POST.get("age")
sex = request.POST.get("sex")
phone = request.POST.get("phone")
state = request.POST.get("state")
result = Result()
form = Patient(first=first, last=last, state=state, age=age, sex=sex, phone=phone, instance=result)
form.save()
return render(request, "hivtest.html")
return render(request, "hivtest.html")
def add_result(request, patient_id, first, last):
patient = Patient.objects.get(pk=patient_id, first=first, last=last)
if request.method == 'POST':
hepatitis_B_test = request.POST.get('hepatitisBtest')
hiv_aid_test = request.POST.get('hivaidtest')
form = Result(hepatitis_B_test=hepatitis_B_test, hiv_aid_test=hiv_aid_test)
form.save()
return HttpResponseRedirect(reverse("adminview"))
return render(request, "add_result.html",{'patient':patient})
def patientinfo(request, patient_id):
patient = Patient.objects.get(pk=patient_id)
result = patient.result
return render(request, "patientinfo.html",{'patient':patient, 'result':result})
def delete(request, patient_id):
patient = Patient.objects.get(pk=patient_id)
if request.method == "POST":
obj = Patient.objects.get(id=patient_id).delete()
return HttpResponseRedirect(reverse("adminview"))
return render(request, "delete.html")
| 37.020833 | 107 | 0.683174 |
a7b5a5aa0086bf94663363f1507118c810efe76d
| 2,446 |
py
|
Python
|
server/weather/RestWeatherSource.py
|
EveryOtherUsernameWasAlreadyTaken/BIS
|
e132ce42dcc74e634231398dfecb08834d478cba
|
[
"MIT"
] | 3 |
2019-07-09T08:51:20.000Z
|
2019-09-16T17:27:54.000Z
|
server/weather/RestWeatherSource.py
|
thomasw-mitutoyo-ctl/BIS
|
08525cc12164902dfe968ae41beb6de0cd5bc411
|
[
"MIT"
] | 24 |
2019-06-17T12:33:35.000Z
|
2020-03-27T08:17:35.000Z
|
server/weather/RestWeatherSource.py
|
EveryOtherUsernameWasAlreadyTaken/BIS
|
e132ce42dcc74e634231398dfecb08834d478cba
|
[
"MIT"
] | 1 |
2020-03-24T17:54:07.000Z
|
2020-03-24T17:54:07.000Z
|
import logging
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from WeatherSource import WeatherSource
log = logging.getLogger(__name__)
class RestWeatherSource(WeatherSource):
"""
This WeatherSource serves a simple HTTP server which accepts simple GET requests. When a GET request arrived
the repository gets updated
"""
def __init__(self, repository, address, port):
WeatherSource.__init__(self)
self.port = port
self.address = address
self.repository = repository
def run(self):
server = HTTPServer((self.address, self.port), self.request_handler)
server.serve_forever()
def request_handler(self, *args):
HTTPRequestHandler(self.repository, *args)
class HTTPRequestHandler(BaseHTTPRequestHandler):
"""
Request handler for the RestWeatherSource
"""
def __init__(self, repository, *args):
self.repository = repository
BaseHTTPRequestHandler.__init__(self, *args)
# noinspection PyPep8Naming
def do_GET(self):
"""
Processes a GET request
:return:
"""
city = self.headers.get('city')
# Extract the weather data from the request header
weather_data = {'latitude': self.headers.get('latitude'),
'longitude': self.headers.get('longitude'),
'barometer': self.headers.get('barometer'),
'temperature': self.headers.get('temperature'),
'temperature_min': self.headers.get('temperature_min'),
'temperature_max': self.headers.get('temperature_max'),
'humidity': self.headers.get('humidity'),
'precipitation': self.headers.get('precipitation'),
'wind_speed': self.headers.get('wind_speed'),
'sunrise': self.headers.get('sunrise'),
'sunset': self.headers.get('sunset'),
'icon': '0'}
weather_data = dict((k, v) for k, v in weather_data.iteritems() if v is not None)
if city is not None and len(weather_data) > 0:
log.debug("Got new weather data for " + city + ": " + str(weather_data))
self.repository.put_data_for_city("RestWeatherSource", city, weather_data)
self.send_response(200)
else:
self.send_response(400)
| 35.970588 | 112 | 0.605478 |
a7be27c27fecb3715f48a3e9f5c0137614000958
| 3,898 |
py
|
Python
|
tests/test_ausschreibungslos.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
tests/test_ausschreibungslos.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
tests/test_ausschreibungslos.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
import pytest # type:ignore[import]
from bo4e.com.ausschreibungslos import Ausschreibungslos, AusschreibungslosSchema
from bo4e.enum.preismodell import Preismodell
from bo4e.enum.rechnungslegung import Rechnungslegung
from bo4e.enum.sparte import Sparte
from bo4e.enum.vertragsform import Vertragsform
from tests.serialization_helper import assert_serialization_roundtrip # type:ignore[import]
from tests.test_ausschreibungsdetail import ( # type:ignore[import]
example_ausschreibungsdetail,
example_ausschreibungsdetail_dict,
)
from tests.test_menge import example_menge, example_menge_dict # type:ignore[import]
from tests.test_zeitraum import example_zeitraum, example_zeitraum_dict # type:ignore[import]
example_ausschreibungslos = Ausschreibungslos(
losnummer="foo",
bezeichnung="bar",
bemerkung="asd",
preismodell=Preismodell.FESTPREIS,
energieart=Sparte.STROM,
wunsch_rechnungslegung=Rechnungslegung.MONATSRECHN,
wunsch_vertragsform=Vertragsform.DIREKT,
betreut_durch="Max Mustermann",
anzahl_lieferstellen=17,
lieferstellen=[example_ausschreibungsdetail],
gesamt_menge=example_menge,
wunsch_mindestmenge=example_menge,
wunsch_maximalmenge=example_menge,
lieferzeitraum=example_zeitraum,
wunsch_kuendingungsfrist=example_zeitraum,
wunsch_zahlungsziel=example_zeitraum,
wiederholungsintervall=example_zeitraum,
)
class TestAusschreibungslos:
@pytest.mark.parametrize(
"ausschreibungslos, expected_json_dict",
[
pytest.param(
example_ausschreibungslos,
{
"lieferzeitraum": example_zeitraum_dict,
"preismodell": "FESTPREIS",
"energieart": "STROM",
"wiederholungsintervall": example_zeitraum_dict,
"bemerkung": "asd",
"bezeichnung": "bar",
"losnummer": "foo",
"anzahlLieferstellen": 17,
"lieferstellen": [example_ausschreibungsdetail_dict],
"wunschKuendingungsfrist": example_zeitraum_dict,
"wunschZahlungsziel": example_zeitraum_dict,
"gesamtMenge": example_menge_dict,
"wunschVertragsform": "DIREKT",
"wunschMaximalmenge": example_menge_dict,
"wunschRechnungslegung": "MONATSRECHN",
"wunschMindestmenge": example_menge_dict,
"betreutDurch": "Max Mustermann",
},
id="maximal attributes",
),
],
)
def test_serialization_roundtrip(self, ausschreibungslos, expected_json_dict):
"""
Test de-/serialisation of Ausschreibungslos
"""
assert_serialization_roundtrip(ausschreibungslos, AusschreibungslosSchema(), expected_json_dict)
def test_ausschreibungslos_lieferstellen_required(self):
with pytest.raises(ValueError) as excinfo:
_ = Ausschreibungslos(
losnummer="foo",
bezeichnung="bar",
bemerkung="asd",
preismodell=Preismodell.FESTPREIS,
energieart=Sparte.STROM,
wunsch_rechnungslegung=Rechnungslegung.MONATSRECHN,
wunsch_vertragsform=Vertragsform.DIREKT,
betreut_durch="Max Mustermann",
anzahl_lieferstellen=17,
lieferzeitraum=example_zeitraum,
## ^^ above is just clutter
lieferstellen=[], # the important line
)
assert "List lieferstellen must not be empty." in str(excinfo.value)
def test_missing_required_attribute(self):
with pytest.raises(TypeError) as excinfo:
_ = Ausschreibungslos()
assert "missing 10 required" in str(excinfo.value)
| 41.031579 | 104 | 0.655721 |
3ba582fb1c60a969e56afd0b5185358be55c3ea3
| 4,550 |
py
|
Python
|
scripts/dataset_statistics.py
|
yokuyuki/Enrichr
|
ed8e4b5639b73a740f2d1e99b068049b114955ca
|
[
"Apache-2.0"
] | 1 |
2017-09-19T01:36:47.000Z
|
2017-09-19T01:36:47.000Z
|
scripts/dataset_statistics.py
|
yokuyuki/Enrichr
|
ed8e4b5639b73a740f2d1e99b068049b114955ca
|
[
"Apache-2.0"
] | 3 |
2015-05-09T06:19:08.000Z
|
2016-11-03T06:15:03.000Z
|
scripts/dataset_statistics.py
|
yokuyuki/Enrichr
|
ed8e4b5639b73a740f2d1e99b068049b114955ca
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
gmt_lookup = dict()
gmt_lookup['BioCarta.gmt'] = ('BioCarta', 'http://pid.nci.nih.gov/download.shtml')
gmt_lookup['Cancer_Cell_Line_Encyclopedia.gmt'] = ('Cancer Cell Line Encyclopedia', 'http://www.broadinstitute.org/ccle/data/browseData')
gmt_lookup['ChEA.gmt'] = ('ChEA', 'http://amp.pharm.mssm.edu/lib/cheadownload.jsp')
gmt_lookup['CORUM.gmt'] = ('CORUM', 'http://mips.helmholtz-muenchen.de/genre/proj/corum/')
gmt_lookup['Chromosome_Location.gmt'] = ('Chromosome Location', 'http://hgdownload.cse.ucsc.edu/downloads.html')
gmt_lookup['Down-regulated_CMAP.gmt'] = ('Down-regulated CMAP', 'http://www.broadinstitute.org/cmap/')
gmt_lookup['ENCODE_TF_ChIP-seq.gmt'] = ('ENCODE TF ChIP-seq', 'http://genome.ucsc.edu/ENCODE/downloads.html')
gmt_lookup['GO_Biological_Process.gmt'] = ('GO Biological Process', 'http://www.geneontology.org/GO.downloads.annotations.shtml')
gmt_lookup['GO_Cellular_Component.gmt'] = ('GO Cellular Component', 'http://www.geneontology.org/GO.downloads.annotations.shtml')
gmt_lookup['GO_Molecular_Function.gmt'] = ('GO Molecular Function', 'http://www.geneontology.org/GO.downloads.annotations.shtml')
gmt_lookup['GeneSigDB.gmt'] = ('GeneSigDB', 'http://compbio.dfci.harvard.edu/genesigdb/downloadall.jsp')
gmt_lookup['Genome_Browser_PWMs.gmt'] = ('Genome Browser PWMs', 'http://hgdownload.cse.ucsc.edu/goldenPath/hg18/database/')
gmt_lookup['Human_Endogenous_Complexome.gmt'] = ('Human Endogenous Complexome', 'http://www.sciencedirect.com/science/article/pii/S0092867411005320')
gmt_lookup['Histone_Modifications_ChIP-seq.gmt'] = ('Histone Modifications ChIP-seq', 'http://www.ncbi.nlm.nih.gov/geo/roadmap/epigenomics/')
gmt_lookup['HMDB_Metabolites.gmt'] = ('HMDB Metabolites', 'http://www.hmdb.ca/downloads')
gmt_lookup['Human_Gene_Atlas.gmt'] = ('Human Gene Atlas', 'http://biogps.org/downloads/')
gmt_lookup['KEA.gmt'] = ('KEA', 'http://amp.pharm.mssm.edu/lib/keacommandline.jsp')
gmt_lookup['KEGG.gmt'] = ('KEGG', 'http://www.kegg.jp/kegg/download/')
gmt_lookup['MGI_MP_top3.gmt'] = ('MGI Mammalian Phenotype Top 3', 'ftp://ftp.informatics.jax.org/pub/reports/index.html#pheno')
gmt_lookup['MGI_Mammalian_Phenotype.gmt'] = ('MGI Mammalian Phenotype Top 4', 'ftp://ftp.informatics.jax.org/pub/reports/index.html#pheno')
gmt_lookup['microRNA.gmt'] = ('microRNA', 'http://www.targetscan.org/cgi-bin/targetscan/data_download.cgi?db=vert_61')
gmt_lookup['MSigDB_Computational.gmt'] = ('MSigDB Computational', 'http://www.broadinstitute.org/gsea/msigdb/collections.jsp')
gmt_lookup['MSigDB_Oncogenic_Signatures.gmt'] = ('MSigDB Oncogenic Signatures', 'http://www.broadinstitute.org/gsea/msigdb/collections.jsp')
gmt_lookup['Mouse_Gene_Atlas.gmt'] = ('Mouse Gene Atlas', 'http://biogps.org/downloads/')
gmt_lookup['NCI-60_Cancer_Cell_Lines.gmt'] = ('NCI-60 Cancer Cell Lines', 'http://biogps.org/downloads/')
gmt_lookup['OMIM_Disease.gmt'] = ('OMIM Disease', 'http://www.omim.org/downloads')
gmt_lookup['OMIM_Expanded.gmt'] = ('OMIM Expanded', 'http://www.omim.org/downloads')
gmt_lookup['Pfam_InterPro_Domains.gmt'] = ('Pfam InterPro Domains', 'ftp://ftp.ebi.ac.uk/pub/databases/interpro/')
gmt_lookup['PPI_Hub_Proteins.gmt'] = ('PPI Hub Proteins', 'http://amp.pharm.mssm.edu/genes2networks/')
gmt_lookup['Reactome.gmt'] = ('Reactome', 'http://www.reactome.org/download/index.html')
gmt_lookup['SILAC_Phosphoproteomics.gmt'] = ('SILAC Phosphoproteomics', 'http://amp.pharm.mssm.edu/lib/keacommandline.jsp')
gmt_lookup['TRANSFAC_and_JASPAR_PWMs.gmt'] = ('TRANSFAC/JASPAR PWMs', 'http://jaspar.genereg.net/html/DOWNLOAD/')
gmt_lookup['Up-regulated_CMAP.gmt'] = ('Up-regulated CMAP', 'http://www.broadinstitute.org/cmap/')
gmt_lookup['VirusMINT.gmt'] = ('VirusMINT', 'http://mint.bio.uniroma2.it/virusmint/download.do')
gmt_lookup['WikiPathways.gmt'] = ('WikiPathways', 'http://www.wikipathways.org/index.php/Download_Pathways')
datasets = []
for file in os.listdir('../src/main/resources'):
if file.endswith('.gmt'):
with open('../src/main/resources/' + file) as sig_file:
terms = 0
average_genes = 0
unique_genes = set()
for line in sig_file:
terms += 1
split_line = line.rstrip().split('\t')
average_genes += len(split_line[2:])
unique_genes = unique_genes.union(set(split_line[2:]))
average_genes /= float(terms)
datasets.append([gmt_lookup[file][0], terms, len(unique_genes), average_genes, gmt_lookup[file][1]])
with open('dataset_statistics.json', 'w') as out:
out.write(json.dumps(datasets))
| 77.118644 | 150 | 0.732308 |
5a551b8b3524c83268270b2b2bb3e85255659053
| 6,175 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/icx/test_icx_logging.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/icx/test_icx_logging.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/icx/test_icx_logging.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.plugins.modules.network.icx import icx_logging
from ansible_collections.community.general.tests.unit.modules.utils import set_module_args
from ..icx_module import TestICXModule, load_fixture
class TestICXLoggingModule(TestICXModule):
module = icx_logging
def setUp(self):
super(TestICXLoggingModule, self).setUp()
self.mock_get_config = patch('ansible_collections.community.general.plugins.modules.network.icx.icx_logging.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible_collections.community.general.plugins.modules.network.icx.icx_logging.load_config')
self.load_config = self.mock_load_config.start()
self.mock_exec_command = patch('ansible_collections.community.general.plugins.modules.network.icx.icx_logging.exec_command')
self.exec_command = self.mock_exec_command.start()
self.set_running_config()
def tearDown(self):
super(TestICXLoggingModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_exec_command.stop()
def load_fixtures(self, commands=None):
compares = None
def load_file(*args, **kwargs):
module = args
for arg in args:
if arg.params['check_running_config'] is True:
return load_fixture('icx_logging_config.cfg').strip()
else:
return ''
self.get_config.side_effect = load_file
self.load_config.return_value = None
def test_icx_logging_set_host(self):
set_module_args(dict(dest='host', name='172.16.10.15'))
if not self.ENV_ICX_USE_DIFF:
commands = ['logging host 172.16.10.15']
self.execute_module(changed=True, commands=commands)
else:
commands = ['logging host 172.16.10.15']
self.execute_module(changed=True, commands=commands)
def test_icx_logging_set_ipv6_host(self):
set_module_args(dict(dest='host', name='2001:db8::1'))
if not self.ENV_ICX_USE_DIFF:
commands = ['logging host 2001:db8::1']
else:
commands = ['logging host 2001:db8::1']
def test_icx_logging_set_host_udp_port(self):
set_module_args(dict(dest='host', name='172.16.10.15', udp_port=2500))
if not self.ENV_ICX_USE_DIFF:
commands = ['logging host 172.16.10.15 udp-port 2500']
self.execute_module(changed=True, commands=commands)
else:
commands = ['logging host 172.16.10.15 udp-port 2500']
self.execute_module(changed=True, commands=commands)
def test_icx_logging_remove_console(self):
set_module_args(dict(dest='console', state='absent'))
if not self.ENV_ICX_USE_DIFF:
commands = ['no logging console']
self.execute_module(changed=True, commands=commands)
else:
commands = ['no logging console']
self.execute_module(changed=True, commands=commands)
def test_icx_logging_remove_on(self):
set_module_args(dict(dest='on', state='absent'))
if not self.ENV_ICX_USE_DIFF:
commands = ['no logging on']
self.exec_command(changed=True, commands=commands)
else:
commands = ['no logging on']
self.exec_command(changed=True, commands=commands)
def test_icx_logging_set_aggregate(self):
aggregate = [
dict(dest='host', name='172.16.10.16', udp_port=2500, facility='local0'),
dict(dest='host', name='2001:db8::1', udp_port=5000)
]
set_module_args(dict(aggregate=aggregate, state='present'))
if not self.ENV_ICX_USE_DIFF:
result = self.execute_module(changed=True)
expected_commands = [
'logging facility local0',
'logging host 172.16.10.16 udp-port 2500',
'logging host ipv6 2001:db8::1 udp-port 5000'
]
self.assertEqual(result['commands'], expected_commands)
else:
result = self.execute_module(changed=True)
expected_commands = [
'logging facility local0',
'logging host 172.16.10.16 udp-port 2500',
'logging host ipv6 2001:db8::1 udp-port 5000'
]
self.assertEqual(result['commands'], expected_commands)
def test_icx_logging_set_aggregate_remove(self):
aggregate = [
dict(dest='host', name='172.16.10.55', udp_port=2500, facility='local0'),
dict(dest='host', name='2001:db8::1', udp_port=5500)
]
set_module_args(dict(aggregate=aggregate, state='absent'))
if not self.ENV_ICX_USE_DIFF:
result = self.execute_module(changed=True)
expected_commands = [
'no logging facility',
'no logging host 172.16.10.55 udp-port 2500',
'no logging host ipv6 2001:db8::1 udp-port 5500'
]
self.assertEqual(result['commands'], expected_commands)
else:
result = self.execute_module(changed=True)
expected_commands = [
'no logging facility',
'no logging host 172.16.10.55 udp-port 2500',
'no logging host ipv6 2001:db8::1 udp-port 5500'
]
self.assertEqual(result['commands'], expected_commands)
def test_icx_logging_compare(self):
set_module_args(dict(dest='host', name='172.16.10.21', check_running_config=True))
if self.get_running_config(compare=True):
if not self.ENV_ICX_USE_DIFF:
self.execute_module(changed=False)
else:
self.execute_module(changed=False)
| 41.166667 | 132 | 0.636599 |
ce49bc415deed892a71cf036b2fcc9e9e8866a9e
| 2,974 |
py
|
Python
|
experiments/nets/Research/case04/sumoenv.py
|
june6723/sumo-rl-offset
|
775cddc8d168fb7c4959610a96a791d746fa0afd
|
[
"MIT"
] | 4 |
2020-10-11T01:30:13.000Z
|
2021-04-27T16:03:41.000Z
|
experiments/nets/Research/case04/sumoenv.py
|
june6723/sumo-rl-offset
|
775cddc8d168fb7c4959610a96a791d746fa0afd
|
[
"MIT"
] | null | null | null |
experiments/nets/Research/case04/sumoenv.py
|
june6723/sumo-rl-offset
|
775cddc8d168fb7c4959610a96a791d746fa0afd
|
[
"MIT"
] | null | null | null |
import os
import sys
import traci
import numpy as np
class SumoEnv:
place_len = 7.5
place_offset = 8.50
lane_len = 10
lane_ids = ['-gneE0_0', '-gneE0_1', '-gneE1_0', '-gneE1_1', '-gneE2_0', '-gneE2_1', '-gneE3_0', '-gneE3_1']
def __init__(self, label='default', gui_f=False):
self.label = label
self.wt_last = 0.
self.ncars = 0
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
exe = 'sumo-gui.exe' if gui_f else 'sumo.exe'
sumoBinary = os.path.join(os.environ['SUMO_HOME'], 'bin', exe)
self.sumoCmd = [sumoBinary, '-c', 'intersection.sumocfg']
return
def get_state_d(self):
state = np.zeros(self.lane_len * 8 + 4, dtype=np.float32)
for ilane in range(0, 8):
lane_id = self.lane_ids[ilane]
ncars = traci.lane.getLastStepVehicleNumber(lane_id)
cars = traci.lane.getLastStepVehicleIDs(lane_id)
for icar in cars:
xcar, ycar = traci.vehicle.getPosition(icar)
if ilane < 2:
pos = (ycar - self.place_offset) / self.place_len
elif ilane < 4:
pos = (xcar - self.place_offset) / self.place_len
elif ilane < 6:
pos = (-ycar - self.place_offset) / self.place_len
else:
pos = (-xcar - self.place_offset) / self.place_len
if pos > self.lane_len - 1.:
continue
pos = np.clip(pos, 0., self.lane_len - 1. - 1e-6)
ipos = int(pos)
state[int(ilane * self.lane_len + ipos)] += 1. - pos + ipos
state[int(ilane * self.lane_len + ipos + 1)] += pos - ipos
state[self.lane_len * 8:self.lane_len * 8+4] = np.eye(4)[traci.trafficlight.getPhase('gneJ00')]
return state
def step_d(self, action):
done = False
# traci.switch(self.label)
action = np.squeeze(action)
traci.trafficlight.setPhase('gneJ00', action)
traci.simulationStep()
traci.simulationStep()
self.ncars += traci.simulation.getDepartedNumber()
state = self.get_state_d()
wt = 0
for ilane in range(0, 8):
lane_id = self.lane_ids[ilane]
wt += traci.lane.getWaitingTime(lane_id)
reward = - (wt - self.wt_last)*0.004
if self.ncars > 250:
done = True
return state, reward, done, np.array([[reward]])
def reset(self):
self.wt_last = 0.
self.ncars = 0
traci.start(self.sumoCmd, label=self.label)
traci.trafficlight.setProgram('gneJ00', '0')
traci.simulationStep()
return self.get_state_d()
def close(self):
traci.close()
| 32.681319 | 111 | 0.549092 |
0c8c3ee7e181a0905622a2e6245a44e031a14663
| 2,377 |
py
|
Python
|
pycsw/pycsw/core/formats/fmt_json.py
|
Geosoft2/Geosoftware-II-AALLH
|
bdb61d9a1111b9082ec2b9f309998c5f2166975e
|
[
"MIT"
] | 118 |
2015-01-07T00:24:09.000Z
|
2022-03-19T15:35:43.000Z
|
pycsw/pycsw/core/formats/fmt_json.py
|
Geosoft2/Geosoftware-II-AALLH
|
bdb61d9a1111b9082ec2b9f309998c5f2166975e
|
[
"MIT"
] | 319 |
2015-01-06T23:51:46.000Z
|
2022-03-20T11:22:57.000Z
|
pycsw/pycsw/core/formats/fmt_json.py
|
Geosoft2/Geosoftware-II-AALLH
|
bdb61d9a1111b9082ec2b9f309998c5f2166975e
|
[
"MIT"
] | 113 |
2015-01-07T00:42:23.000Z
|
2022-02-19T18:05:08.000Z
|
# -*- coding: utf-8 -*-
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
# Ricardo Garcia Silva <[email protected]>
#
# Copyright (c) 2015 Tom Kralidis
# Copyright (c) 2017 Ricardo Garcia Silva
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import json
import xmltodict
def xml2dict(xml_string, namespaces):
"""Convert an xml document to a dictionary.
Parameters
----------
xml_string: str
XML representation to convert to a dictionary.
namespaces: dict
Namespaces used in the ``xml_string`` parameter
Returns
-------
ordereddict
An ordered dictionary with the contents of the xml data
"""
namespaces_reverse = dict((v, k) for k, v in namespaces.items())
return xmltodict.parse(xml_string, process_namespaces=True,
namespaces=namespaces_reverse)
def xml2json(xml_string, namespaces, pretty_print=False):
"""Convert an xml string to JSON"""
separators = (',', ': ')
if pretty_print:
return json.dumps(xml2dict(xml_string, namespaces),
indent=4, separators=separators)
return json.dumps(xml2dict(xml_string, namespaces), separators=separators)
| 33.957143 | 78 | 0.671435 |
0b7ceb2cccaccabf04b4842293aa7cdbe1be8244
| 6,654 |
py
|
Python
|
checks/load_in_browser.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 19 |
2018-04-20T11:03:41.000Z
|
2022-01-12T20:58:56.000Z
|
checks/load_in_browser.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 160 |
2018-04-05T16:12:59.000Z
|
2022-03-01T13:01:27.000Z
|
checks/load_in_browser.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 8 |
2018-11-05T13:07:57.000Z
|
2021-06-11T11:46:43.000Z
|
"""
Collects information by loading pages in a browser.
Information includes:
- whether the document width adapts well to viewports as little as 360 pixels wide
- whether javascript errors or errors from missing resources occur
- what CSS font-family properties are in use
- what cookies are set during loading the page
"""
import logging
import math
import shutil
import time
import sqlite3
from selenium import webdriver
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import TimeoutException
import tenacity
from checks.abstract_checker import AbstractChecker
class Checker(AbstractChecker):
page_load_timeout = 30
# sizes we check for (width, height)
sizes = (
(360, 640), # rather old smartphone
(768, 1024), # older tablet or newer smartphone
(1024, 768), # older desktop or horiz. tablet
(1920, 1080), # Full HD horizontal
)
def __init__(self, config, previous_results=None):
super().__init__(config, previous_results)
# Our selenium user agent using Chrome headless as an engine
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-extensions')
# path where to get cookies from
chrome_options.add_argument("--user-data-dir=/opt/chrome-userdir")
# empty /opt/chrome-userdir
shutil.rmtree('/opt/chrome-userdir', ignore_errors=True)
self.driver = webdriver.Chrome(options=chrome_options)
self.driver.set_page_load_timeout(self.page_load_timeout)
def run(self):
results = {}
for url in self.config.urls:
results[url] = {
'cookies': None,
'sizes': None,
'min_document_width': None,
'logs': None,
'font_families': None,
}
# responsive check
try:
sizes = self.check_responsiveness(url)
results[url] = {
'sizes': sizes,
'min_document_width': min([s['document_width'] for s in sizes]),
'logs': self.capture_log(),
}
except TimeoutException as e:
logging.warn("TimeoutException when checking responsiveness for %s: %s" % (url, e))
pass
except tenacity.RetryError as re:
logging.warn("RetryError when checking responsiveness for %s: %s" % (url, re))
pass
try:
self.scroll_to_bottom()
except TimeoutException as e:
logging.warn("TimeoutException in scroll_to_bottom for %s: %s" % (url, e))
pass
except tenacity.RetryError as re:
logging.warn("RetryError in scroll_to_bottom for %s: %s" % (url, re))
pass
# CSS collection
font_families = None
try:
elements = self.driver.find_elements_by_xpath("//*")
font_families = set()
for element in elements:
try:
font_family = element.value_of_css_property('font-family')
if font_family is None:
continue
font_families.add(font_family.lower())
except StaleElementReferenceException as e:
logging.warn("StaleElementReferenceException when collecting CSS properties for %s: %s" % (url, e))
continue
results[url]['font_families'] = sorted(list(font_families))
except TimeoutException as e:
logging.warn("TimeoutException when collecting CSS elements for %s: %s" % (url, e))
pass
try:
results[url]['cookies'] = self.get_cookies()
except TimeoutException as e:
logging.warn("TimeoutException when collecting cookies %s: %s" % (url, e))
pass
except tenacity.RetryError as re:
logging.warn("RetryError when collecting cookies for %s: %s" % (url, re))
pass
self.driver.quit()
return results
def get_cookies(self):
# read cookie DB to get 3rd party cookies, too
cookies = []
db = sqlite3.connect('/opt/chrome-userdir/Default/Cookies')
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute("SELECT creation_utc, host_key, name, path, expires_utc, is_secure, is_httponly, has_expires, is_persistent, firstpartyonly FROM cookies")
for row in c.fetchall():
cookies.append(dict(row))
c.close()
db.close()
return cookies
@tenacity.retry(stop=tenacity.stop_after_attempt(3),
retry=tenacity.retry_if_exception_type(TimeoutException))
def check_responsiveness(self, url):
result = []
# set window to the first size initially
self.driver.set_window_size(self.sizes[0][0], self.sizes[0][1])
self.driver.get(url)
for (width, height) in self.sizes:
self.driver.set_window_size(width, height)
# wait for re-render/re-flow
time.sleep(1.0)
doc_width = self.driver.execute_script("return document.body.scrollWidth")
result.append({
'viewport_width': width,
'document_width': int(doc_width),
})
return result
def capture_log(self):
"""
Returns log elements with level "SEVERE" or "WARNING"
"""
entries = []
for entry in self.driver.get_log('browser'):
if entry['level'] in ('WARNING', 'SEVERE'):
entries.append(entry)
return entries
@tenacity.retry(stop=tenacity.stop_after_attempt(3),
retry=tenacity.retry_if_exception_type(TimeoutException))
def scroll_to_bottom(self):
"""
Scroll through the entire page once to trigger loading of all resources
"""
height = self.driver.execute_script("return document.body.scrollHeight")
height = int(height)
pages = math.floor(height / 1000)
for _ in range(0, pages):
self.driver.execute_script("window.scrollBy(0,1000)")
time.sleep(0.2)
| 35.582888 | 156 | 0.581755 |
0bbfec64a1343471f7dadb8a14b31ce2058aa0ad
| 1,401 |
py
|
Python
|
torch/fx/experimental/fx2trt/converters/maxpool.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | 183 |
2018-04-06T21:10:36.000Z
|
2022-03-30T15:05:24.000Z
|
torch/fx/experimental/fx2trt/converters/maxpool.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | 631 |
2018-06-05T16:59:11.000Z
|
2022-03-31T16:26:57.000Z
|
torch/fx/experimental/fx2trt/converters/maxpool.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | 58 |
2018-06-05T16:40:18.000Z
|
2022-03-16T15:37:29.000Z
|
import torch
import tensorrt as trt
from torch.fx.experimental.fx2trt.converter_registry import tensorrt_converter
from .converter_utils import mark_as_int8_layer, extend_mod_attr_to_tuple
def common_maxpool(network, mod, dimension, input_val, layer_name):
kernel_size = extend_mod_attr_to_tuple(mod, "kernel_size", dimension)
stride = extend_mod_attr_to_tuple(mod, "stride", dimension)
padding = extend_mod_attr_to_tuple(mod, "padding", dimension)
layer = network.add_pooling(
input=input_val, type=trt.PoolingType.MAX, window_size=kernel_size)
layer.stride = stride
layer.padding = padding
layer.name = layer_name
if mod.ceil_mode:
layer.padding_mode = trt.PaddingMode.EXPLICIT_ROUND_UP
if input_val.dynamic_range:
mark_as_int8_layer(layer, input_val.dynamic_range)
return layer.get_output(0)
@tensorrt_converter(torch.nn.modules.pooling.MaxPool2d)
def maxpool2d(network, submod, args, kwargs, layer_name):
# args/kwargs should have already been normalized to kwargs
assert len(args) == 0
input_val = kwargs["input"]
if not isinstance(input_val, trt.tensorrt.ITensor):
raise RuntimeError(f"MaxPool2d received input {input_val} that is not part "
"of the TensorRT region!")
return common_maxpool(network, submod, dimension=2, input_val=input_val, layer_name=layer_name)
| 35.923077 | 99 | 0.747323 |
b2f937927ed6b42dc28c2ea20854db281f7c64e3
| 6,078 |
py
|
Python
|
src/config_db/tests/test_processing_block.py
|
ska-telescope/sdp-prototype
|
8c6cbda04a83b0e16987019406ed6ec7e1058a31
|
[
"BSD-3-Clause"
] | 2 |
2019-07-15T09:49:34.000Z
|
2019-10-14T16:04:17.000Z
|
src/config_db/tests/test_processing_block.py
|
ska-telescope/sdp-prototype
|
8c6cbda04a83b0e16987019406ed6ec7e1058a31
|
[
"BSD-3-Clause"
] | 17 |
2019-07-15T14:51:50.000Z
|
2021-06-02T00:29:43.000Z
|
src/config_db/tests/test_processing_block.py
|
ska-telescope/sdp-configuration-prototype
|
8c6cbda04a83b0e16987019406ed6ec7e1058a31
|
[
"BSD-3-Clause"
] | 1 |
2019-10-10T08:16:48.000Z
|
2019-10-10T08:16:48.000Z
|
"""High-level API tests on processing blocks."""
import os
import pytest
from ska_sdp_config import config, entity, backend
# pylint: disable=missing-docstring,redefined-outer-name
PREFIX = "/__test_pb"
WORKFLOW = {
'id': 'test_rt_workflow',
'version': '0.0.1',
'type': 'realtime'
}
# pylint: disable=W0212
@pytest.fixture(scope="session")
def cfg():
host = os.getenv('SDP_TEST_HOST', '127.0.0.1')
with config.Config(global_prefix=PREFIX, host=host) as cfg:
cfg._backend.delete(PREFIX, must_exist=False, recursive=True)
yield cfg
cfg._backend.delete(PREFIX, must_exist=False, recursive=True)
def test_simple_pb():
for missing in ['id', 'version', 'type']:
with pytest.raises(ValueError, match="Workflow must"):
workflow = dict(WORKFLOW)
del workflow[missing]
entity.ProcessingBlock('foo-bar', None, workflow)
with pytest.raises(ValueError, match="Processing block ID"):
entity.ProcessingBlock('asd_htb', None, WORKFLOW)
with pytest.raises(ValueError, match="Processing block ID"):
entity.ProcessingBlock('foo/bar', None, WORKFLOW)
pb = entity.ProcessingBlock('foo-bar', None, WORKFLOW)
# pylint: disable=W0123,W0611
from ska_sdp_config.entity import ProcessingBlock
assert pb == eval(repr(pb))
def test_create_pb(cfg):
# Create 3 processing blocks
for txn in cfg.txn():
pb1_id = txn.new_processing_block_id(WORKFLOW['type'])
pb1 = entity.ProcessingBlock(pb1_id, None, WORKFLOW)
assert txn.get_processing_block(pb1_id) is None
txn.create_processing_block(pb1)
with pytest.raises(backend.Collision):
txn.create_processing_block(pb1)
assert txn.get_processing_block(pb1_id).pb_id == pb1_id
pb2_id = txn.new_processing_block_id(WORKFLOW['type'])
pb2 = entity.ProcessingBlock(pb2_id, None, WORKFLOW)
txn.create_processing_block(pb2)
pb_ids = txn.list_processing_blocks()
assert(pb_ids == [pb1_id, pb2_id])
# Make sure that it stuck
for txn in cfg.txn():
pb_ids = txn.list_processing_blocks()
assert(pb_ids == [pb1_id, pb2_id])
# Make sure we can update them
for txn in cfg.txn():
pb1.parameters['test'] = 'test'
pb1.scan_parameters['12345'] = {
'test_scan': 'asd'
}
txn.update_processing_block(pb1)
# Check that update worked
for txn in cfg.txn():
pb1x = txn.get_processing_block(pb1.pb_id)
assert pb1x.sbi_id is None
assert pb1x.parameters == pb1.parameters
assert pb1x.scan_parameters == pb1.scan_parameters
def test_take_pb(cfg):
workflow2 = dict(WORKFLOW)
workflow2['id'] += "-take"
# Create another processing block
for txn in cfg.txn():
pb_id = txn.new_processing_block_id(workflow2['type'])
pb = entity.ProcessingBlock(pb_id, None, workflow2)
txn.create_processing_block(pb)
with cfg.lease() as lease:
for txn in cfg.txn():
txn.take_processing_block(pb_id, lease)
for txn in cfg.txn():
assert txn.get_processing_block_owner(pb_id) == cfg.owner
assert txn.is_processing_block_owner(pb_id)
for txn in cfg.txn():
assert txn.get_processing_block_owner(pb_id) is None
assert not txn.is_processing_block_owner(pb_id)
# Check that asking for a non-existing workflow doesn't work
for txn in cfg.txn():
workflow3 = dict(WORKFLOW)
workflow3['id'] += "-take-doesnt-exist"
assert txn.take_processing_block_by_workflow(workflow3, lease) is None
# Test that we can find the processing block by workflow
with cfg.lease() as lease:
for txn in cfg.txn():
pb2 = txn.take_processing_block_by_workflow(workflow2, lease)
assert pb2.pb_id == pb_id
for txn in cfg.txn():
assert txn.get_processing_block_owner(pb_id) is None
assert not txn.is_processing_block_owner(pb_id)
# Check that we can re-claim it using client lease
for txn in cfg.txn():
pb2 = txn.take_processing_block_by_workflow(workflow2,
cfg.client_lease)
assert pb2.pb_id == pb_id
for txn in cfg.txn():
assert txn.get_processing_block_owner(pb_id) == cfg.owner
assert txn.is_processing_block_owner(pb_id)
def test_pb_state(cfg):
pb_id = 'teststate-00000000-0000'
state1 = {
"state": "executing",
"subarray": "ON",
"obsState": "SCANNING",
"receiveAddresses": {
"1": {
"1": ["0.0.0.0", 1024]
}
}
}
state2 = {
"state": "failed",
"subarray": "ON",
"obsState": "SCANNING",
"receiveAddresses": {
"1": {
"1": ["0.0.0.0", 1024]
}
}
}
# Create processing block
for txn in cfg.txn():
pb = entity.ProcessingBlock(pb_id, None, WORKFLOW)
txn.create_processing_block(pb)
# Check PB state is None
for txn in cfg.txn():
state_out = txn.get_processing_block_state(pb_id)
assert state_out is None
# Create PB state as state1
for txn in cfg.txn():
txn.create_processing_block_state(pb_id, state1)
# Read PB state and check it matches state1
for txn in cfg.txn():
state_out = txn.get_processing_block_state(pb_id)
assert state_out == state1
# Try to create PB state again and check it raises a collision exception
for txn in cfg.txn():
with pytest.raises(backend.Collision):
txn.create_processing_block_state(pb_id, state1)
# Update PB state to state2
for txn in cfg.txn():
txn.update_processing_block_state(pb_id, state2)
# Read PB state and check it now matches state2
for txn in cfg.txn():
state_out = txn.get_processing_block_state(pb_id)
assert state_out == state2
if __name__ == '__main__':
pytest.main()
| 30.542714 | 78 | 0.634913 |
65083a601c220863de10788eeab61c1a10edd19d
| 791 |
py
|
Python
|
THC/2021/crypto/Rsa_internal_attacker/chall.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
THC/2021/crypto/Rsa_internal_attacker/chall.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
THC/2021/crypto/Rsa_internal_attacker/chall.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from Crypto.Util.number import getPrime, inverse, bytes_to_long
import random
from math import gcd
def init():
p = getPrime(1024)
q = getPrime(1024)
return p, q
def new_user(p, q):
phi = (p - 1) * (q - 1)
while True:
e = random.randint(2, 100000)
if gcd(e, phi) == 1:
break
d = inverse(e, phi)
return e, d
def encrypt(m, e, n):
return pow(m, e, n)
p, q = init()
n = p * q
e_a, d_a = new_user(p, q)
e_b, d_b = new_user(p, q)
FLAG = b"THC2021{??????????????????????????????????????}"
c = encrypt(bytes_to_long(FLAG), e_b, n)
print(f"The public modulus : {hex(n)}")
print(f"Your key pair : ({hex(e_a)}, {hex(d_a)})")
print(f"Your boss public key : {hex(e_b)}")
print(f"Intercepted message : {hex(c)}")
| 20.815789 | 63 | 0.561315 |
041c268d8b5bc4823437c97b7ba8c2541ac8e1e8
| 1,638 |
py
|
Python
|
scripts/fabsp/vim.py
|
swoiow/dsc
|
5860e6bfaa70b700e025533c406a6bc52d4ab74b
|
[
"MIT"
] | null | null | null |
scripts/fabsp/vim.py
|
swoiow/dsc
|
5860e6bfaa70b700e025533c406a6bc52d4ab74b
|
[
"MIT"
] | null | null | null |
scripts/fabsp/vim.py
|
swoiow/dsc
|
5860e6bfaa70b700e025533c406a6bc52d4ab74b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*
from os import environ
from _internal_utils import exec_bash, lines, pf
from fabric.api import cd, settings, sudo
if not environ.get("VIM"):
environ["VIM"] = "8.1.0630"
def depend_debian():
"""
apt-get install -y libncurses5-dev libncursesw5-dev
"""
for line in lines(depend_debian):
sudo(line)
def depend_redhat():
"""
yum install -y ncurses-devel
"""
for line in lines(depend_redhat):
sudo(line)
def depend():
depend_map = [
("debian", depend_debian),
("redhat", depend_redhat),
]
dict(depend_map)[pf()]()
def download():
"""
curl -sL https://github.com/vim/vim/archive/v{var}.tar.gz | tar -xz
"""
with cd("/usr/src/"), settings(warn_only=True):
for line in lines(download):
sudo(line.format(var=environ["VIM"]))
def install():
"""
./configure --enable-python3interp --enable-luainterp --enable-cscope --prefix=/usr/local/vim/
make -s -j2
make install
ln -sf /usr/local/vim/bin/vim /usr/bin/vim
"""
depend()
download()
with cd("/usr/src/vim-{var}".format(var=environ["VIM"])), settings(warn_only=True):
for line in lines(install):
sudo(line)
@exec_bash
def install_plug():
"""
#yum install install -y cmake gcc-c++ make cmake3
#git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
#curl -sL -o ~/.vimrc https://github.com/swoiow/dsc/raw/master/config-vim/.vimrc
vim +PluginInstall +qall
cd ~/.vim/bundle/YouCompleteMe && python3 install.py
"""
| 21.84 | 98 | 0.612943 |
0443b0eb724be8c1428c443397d11e4a2b9ffec6
| 582 |
py
|
Python
|
admin-tutorial/AdminActions/actions/admin.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 5 |
2020-07-14T07:48:10.000Z
|
2021-12-20T21:20:10.000Z
|
admin-tutorial/AdminActions/actions/admin.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 7 |
2021-03-26T03:13:38.000Z
|
2022-03-12T00:42:03.000Z
|
admin-tutorial/AdminActions/actions/admin.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 1 |
2021-02-16T07:04:25.000Z
|
2021-02-16T07:04:25.000Z
|
from django.contrib import admin
from .models import Article
# Register your models here.
def make_published(modeladmin, request, queryset):
queryset.update(status='p') # queryset 是一个集合对象, update 可以批量对集合做更新操作.
make_published.short_description = "Mark selected stories as published"
class ArticleAdmin(admin.ModelAdmin):
list_display = ['title', 'status']
ordering = ['title'] # 排序
actions = [make_published] # 批量操作(按make_published函数来操作: 批量更新发布状态.)
admin.site.register(Article, ArticleAdmin)
| 30.631579 | 92 | 0.668385 |
4a1982e3f5eb9d2a758adeed5e499a650df907ef
| 645 |
py
|
Python
|
profiles/util.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
profiles/util.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
profiles/util.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User, AnonymousUser
def user_authenticated(user):
"""
Checks if a user is authenticated.
:param user: User to check
:return: True, if the useris verified and accepted general terms and conditions as well as the privacy policy;
False otherwise.
"""
if isinstance(user, User):
return \
user.is_authenticated and \
user.profile.verified and \
user.profile.accepted_general_terms_and_conditions and \
user.profile.accepted_privacy_policy
elif isinstance(user, AnonymousUser):
return False
else:
pass
| 30.714286 | 114 | 0.669767 |
4f8bb60b2f154c13d32c76dd341c90acef2b72e3
| 2,166 |
py
|
Python
|
research/hpc/deepbsde/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/hpc/deepbsde/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/hpc/deepbsde/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""DeepBSDE train script"""
import os
from mindspore import dtype as mstype
from mindspore import context, Tensor, Model
from mindspore import nn
from mindspore.nn.dynamic_lr import piecewise_constant_lr
from mindspore.train.callback import TimeMonitor, LossMonitor
from src.net import DeepBSDE, WithLossCell
from src.config import config
from src.equation import get_bsde, create_dataset
from src.eval_utils import EvalCallBack
if __name__ == '__main__':
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
if not os.path.exists(config.log_dir):
os.mkdir(config.log_dir)
config.ckpt_path = os.path.join(config.log_dir, "deepbsde_{}_{}.ckpt".format(config.eqn_name, "{}"))
bsde = get_bsde(config)
dataset = create_dataset(bsde)
print('Begin to solve', config.eqn_name)
net = DeepBSDE(config, bsde)
net_with_loss = WithLossCell(net)
config.lr_boundaries.append(config.num_iterations)
lr = Tensor(piecewise_constant_lr(config.lr_boundaries, config.lr_values), dtype=mstype.float32)
opt = nn.Adam(net.trainable_params(), lr)
model = Model(net_with_loss, optimizer=opt)
eval_param = {"model": net_with_loss, "valid_data": bsde.sample(config.valid_size)}
cb = [LossMonitor(), TimeMonitor(), EvalCallBack(eval_param, config.ckpt_path, config.logging_frequency)]
epoch = dataset.get_dataset_size() // config.logging_frequency
model.train(epoch, dataset, callbacks=cb, sink_size=config.logging_frequency)
| 48.133333 | 109 | 0.740074 |
4f9a77c08c39fe4fdecd29ebfc1afd8ffadc5731
| 3,771 |
py
|
Python
|
website/apps/fileupload/models.py
|
stahlnow/stahlnow
|
265dd46c54f68173071d1c86218201d6e618ceeb
|
[
"MIT"
] | 1 |
2017-03-14T08:08:31.000Z
|
2017-03-14T08:08:31.000Z
|
website/apps/fileupload/models.py
|
stahlnow/stahlnow
|
265dd46c54f68173071d1c86218201d6e618ceeb
|
[
"MIT"
] | null | null | null |
website/apps/fileupload/models.py
|
stahlnow/stahlnow
|
265dd46c54f68173071d1c86218201d6e618ceeb
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
import os
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django_extensions.db.fields import *
from django.db.models.signals import post_delete, pre_save
from django.dispatch import receiver
from taggit.managers import TaggableManager
from fileupload.image import pillow_backend as backend
file_store = FileSystemStorage(
location=settings.FILES_ROOT, base_url=settings.FILES_URL)
def generate_file_path(obj, file):
path = "%s%s" % (obj.uuid, os.path.splitext(file)[1])
return path.replace('-', '/')
class File(models.Model):
uuid = UUIDField()
created = CreationDateTimeField()
updated = ModificationDateTimeField()
file = models.FileField(upload_to=generate_file_path, storage=file_store)
tags = TaggableManager(blank=True)
class Meta:
verbose_name = _('file')
verbose_name_plural = _('files')
db_table = 'fileupload_files'
ordering = ('-created',)
get_latest_by = 'created'
def __unicode__(self):
return self.file.name
@models.permalink
def get_absolute_url(self):
return 'upload_new'
def save(self, *args, **kwargs):
super(File, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.file.delete(False)
super(File, self).delete(*args, **kwargs)
def file_tag(self):
if (is_image(self.file.url)):
return u'<img src="%s" />' % (self.file.url)
elif (is_movie(self.file.url)):
return u'<video autoplay loop muted src="%s" />' % (self.file.url)
def file_tag_thumb(self):
if (is_image(self.file.url)):
if os.path.isfile(unicode('{0}_thumb{1}').format(*os.path.splitext(self.file.url))):
return u'<a href="%s"><img src="%s" /></a>' % (self.pk, unicode('{0}_thumb{1}').format(*os.path.splitext(self.file.url)))
else:
return u'<a href="%s"><img src="%s" width=150px /></a>' % (self.pk, self.file.url)
elif (is_movie(self.file.url)):
return u'<a href="%s"><video width="300px" autoplay loop muted src="%s" /></a>' % (self.pk, self.file.url)
file_tag.short_description = 'Content'
file_tag.allow_tags = True
file_tag_thumb.short_description = 'Preview'
file_tag_thumb.allow_tags = True
def is_image(path):
ext = path.split('.')[-1].lower()
return ext in ['jpg', 'jpeg', 'png', 'gif', 'svg']
def is_movie(path):
ext = path.split('.')[-1].lower()
return ext in ['mp4', 'webm']
@receiver(models.signals.post_delete, sender=File)
def auto_delete_file_on_delete(sender, instance, **kwargs):
"""Deletes file from filesystem
when corresponding 'File' object is deleted.
"""
if instance.file:
if os.path.isfile(instance.file.path):
os.remove(instance.file.path)
@receiver(models.signals.pre_save, sender=File)
def auto_delete_file_on_change(sender, instance, **kwargs):
"""Deletes file from filesystem
when corresponding 'File' object is changed.
"""
if not instance.pk:
return False
try:
old_file = File.objects.get(pk=instance.pk).file
except File.DoesNotExist:
return False
new_file = instance.file
if not old_file == new_file:
if os.path.isfile(old_file.path):
os.remove(old_file.path)
@receiver(models.signals.post_save, sender=File)
def generate_thumbnail(sender, instance, **kwargs):
if instance.file:
if os.path.isfile(instance.file.path):
if backend.should_create_thumbnail(instance.file.path):
backend.create_thumbnail(instance.file.path)
| 32.791304 | 137 | 0.656325 |
8c19af40a9282fa2530c691e3187ae478de40130
| 3,009 |
py
|
Python
|
pocketthrone/entities/unit.py
|
herrschr/pocket-throne
|
819ebae250f45b0a4b15a8320e2836c0b5113528
|
[
"BSD-2-Clause"
] | 4 |
2016-06-05T16:48:04.000Z
|
2020-03-23T20:06:06.000Z
|
pocketthrone/entities/unit.py
|
herrschr/pocket-throne
|
819ebae250f45b0a4b15a8320e2836c0b5113528
|
[
"BSD-2-Clause"
] | null | null | null |
pocketthrone/entities/unit.py
|
herrschr/pocket-throne
|
819ebae250f45b0a4b15a8320e2836c0b5113528
|
[
"BSD-2-Clause"
] | null | null | null |
from pocketthrone.entities.enum import UnitType, UnitCategory, SkillLevel
from pocketthrone.entities.weapon import Weapon
# Unit class for std-units and heroes
class Unit:
# system properties
_id = -1
basename = None
_instanciated = False
_possible_moves = []
# engine properties
name = ""
name_de = ""
image_path = None
image_override = None
# unit properties
city = None
health = 4
movement = 2
# unit's weapon
weapon = None
# unit category & type
category = UnitCategory.UNITCAT_INFANTRY
unit_type = UnitType.UNITTYPE_SOLIDER
# unit flags
is_disabled = False
is_owned_by_nature = False
max_per_player = -1
max_per_map = -1
# requirements
requirements = []
required_building = None
required_fraction = None
# costs
cost_turns = 5
cost_gold = 10
# unit's owner
player_num = -1
# changeable unit vaiables
hp = -1
mp = movement
# position
pos_x = -1
pos_y = -1
# experience level
experience = 0
def __init__(self, unit_type):
self.basename = unit_type
def __repr__(self):
'''returns an xml like representation of this unit'''
return "<Unit player=" + str(self.player_num) + " type=" + self.name + \
" pos=" + str(self.get_position()) + " hp=" + str(self.hp) + " mp=" + \
str(self.mp) + ">"
# load values from json skeleton
def loadFromJson(json_path):
pass
def get_name(self):
'''returns the english name of this unit'''
return self.name
def get_basename(self):
'''returns the type (basename) of this unit'''
return self.basename
def get_type(self):
'''returns unit type'''
return self.unit_type
def give_weapon(self, weapon):
'''gives the unit a weapon object'''
self.weapon = weapon
def _id(self):
'''returns unit id'''
if self._id != -1:
return self._id
else:
return None
def get_player_num(self):
'''returns number of the owner of this unit'''
return self.player_num
def get_player(self):
'''returns owner of this unit'''
return None
def set_position(self, (pos_x, pos_y)):
'''sets absolute unit position'''
self.pos_x = pos_x
self.pos_y = pos_y
def get_position(self):
'''returns absolute unit position'''
return (self.pos_x, self.pos_y)
def get_required_fraction(self):
'''returns the basename of the required fraction of this unit or None'''
return self.required_fraction
def get_required_building(self):
'''returns required building for unit recruition in a city or None'''
return self.required_building
def reset_mps(self):
'''reset mp on turn change'''
self.mp = self.movement
def damage(self, damage):
'''damages this unit (decrease hp)'''
self.hp = self.hp - damage
def heal(self, heal_hp):
'''heal this unit (increase hp)'''
self.hp = self.hp + heal_hp
def get_image_path(self):
'''returns path of image file of this units texture'''
if self.image_override != None:
return self.image_override
return "unit_" + self.get_basename()
def get_category(self):
'''returns category of this unit'''
return self.category
| 21.340426 | 75 | 0.695248 |
50b2593959249682c0de283b6178725d2aa3d205
| 16,223 |
py
|
Python
|
suds/properties.py
|
koocieyu/interactive-tutorials
|
873851b37f0a13b6218ba1e656d51169010981fe
|
[
"Apache-2.0"
] | 46 |
2018-10-22T23:34:03.000Z
|
2022-03-31T09:31:34.000Z
|
suds/properties.py
|
koocieyu/interactive-tutorials
|
873851b37f0a13b6218ba1e656d51169010981fe
|
[
"Apache-2.0"
] | 317 |
2018-10-05T23:51:48.000Z
|
2022-03-22T17:38:52.000Z
|
suds/properties.py
|
koocieyu/interactive-tutorials
|
873851b37f0a13b6218ba1e656d51169010981fe
|
[
"Apache-2.0"
] | 46 |
2018-10-10T18:55:00.000Z
|
2022-03-28T07:27:04.000Z
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Properties classes.
"""
from logging import getLogger
log = getLogger(__name__)
class AutoLinker(object):
"""
Base class, provides interface for I{automatic} link
management between a L{Properties} object and the L{Properties}
contained within I{values}.
"""
def updated(self, properties, prev, next):
"""
Notification that a values was updated and the linkage
between the I{properties} contained with I{prev} need to
be relinked to the L{Properties} contained within the
I{next} value.
"""
pass
class Link(object):
"""
Property link object.
@ivar endpoints: A tuple of the (2) endpoints of the link.
@type endpoints: tuple(2)
"""
def __init__(self, a, b):
"""
@param a: Property (A) to link.
@type a: L{Property}
@param b: Property (B) to link.
@type b: L{Property}
"""
pA = Endpoint(self, a)
pB = Endpoint(self, b)
self.endpoints = (pA, pB)
self.validate(a, b)
a.links.append(pB)
b.links.append(pA)
def validate(self, pA, pB):
"""
Validate that the two properties may be linked.
@param pA: Endpoint (A) to link.
@type pA: L{Endpoint}
@param pB: Endpoint (B) to link.
@type pB: L{Endpoint}
@return: self
@rtype: L{Link}
"""
if pA in pB.links or \
pB in pA.links:
raise Exception, 'Already linked'
dA = pA.domains()
dB = pB.domains()
for d in dA:
if d in dB:
raise Exception, 'Duplicate domain "%s" found' % d
for d in dB:
if d in dA:
raise Exception, 'Duplicate domain "%s" found' % d
kA = pA.keys()
kB = pB.keys()
for k in kA:
if k in kB:
raise Exception, 'Duplicate key %s found' % k
for k in kB:
if k in kA:
raise Exception, 'Duplicate key %s found' % k
return self
def teardown(self):
"""
Teardown the link.
Removes endpoints from properties I{links} collection.
@return: self
@rtype: L{Link}
"""
pA, pB = self.endpoints
if pA in pB.links:
pB.links.remove(pA)
if pB in pA.links:
pA.links.remove(pB)
return self
class Endpoint(object):
"""
Link endpoint (wrapper).
@ivar link: The associated link.
@type link: L{Link}
@ivar target: The properties object.
@type target: L{Property}
"""
def __init__(self, link, target):
self.link = link
self.target = target
def teardown(self):
return self.link.teardown()
def __eq__(self, rhs):
return ( self.target == rhs )
def __hash__(self):
return hash(self.target)
def __getattr__(self, name):
return getattr(self.target, name)
class Definition:
"""
Property definition.
@ivar name: The property name.
@type name: str
@ivar classes: The (class) list of permitted values
@type classes: tuple
@ivar default: The default value.
@ivar type: any
"""
def __init__(self, name, classes, default, linker=AutoLinker()):
"""
@param name: The property name.
@type name: str
@param classes: The (class) list of permitted values
@type classes: tuple
@param default: The default value.
@type default: any
"""
if not isinstance(classes, (list, tuple)):
classes = (classes,)
self.name = name
self.classes = classes
self.default = default
self.linker = linker
def nvl(self, value=None):
"""
Convert the I{value} into the default when I{None}.
@param value: The proposed value.
@type value: any
@return: The I{default} when I{value} is I{None}, else I{value}.
@rtype: any
"""
if value is None:
return self.default
else:
return value
def validate(self, value):
"""
Validate the I{value} is of the correct class.
@param value: The value to validate.
@type value: any
@raise AttributeError: When I{value} is invalid.
"""
if value is None:
return
if len(self.classes) and \
not isinstance(value, self.classes):
msg = '"%s" must be: %s' % (self.name, self.classes)
raise AttributeError,msg
def __repr__(self):
return '%s: %s' % (self.name, str(self))
def __str__(self):
s = []
if len(self.classes):
s.append('classes=%s' % str(self.classes))
else:
s.append('classes=*')
s.append("default=%s" % str(self.default))
return ', '.join(s)
class Properties:
"""
Represents basic application properties.
Provides basic type validation, default values and
link/synchronization behavior.
@ivar domain: The domain name.
@type domain: str
@ivar definitions: A table of property definitions.
@type definitions: {name: L{Definition}}
@ivar links: A list of linked property objects used to create
a network of properties.
@type links: [L{Property},..]
@ivar defined: A dict of property values.
@type defined: dict
"""
def __init__(self, domain, definitions, kwargs):
"""
@param domain: The property domain name.
@type domain: str
@param definitions: A table of property definitions.
@type definitions: {name: L{Definition}}
@param kwargs: A list of property name/values to set.
@type kwargs: dict
"""
self.definitions = {}
for d in definitions:
self.definitions[d.name] = d
self.domain = domain
self.links = []
self.defined = {}
self.modified = set()
self.prime()
self.update(kwargs)
def definition(self, name):
"""
Get the definition for the property I{name}.
@param name: The property I{name} to find the definition for.
@type name: str
@return: The property definition
@rtype: L{Definition}
@raise AttributeError: On not found.
"""
d = self.definitions.get(name)
if d is None:
raise AttributeError(name)
return d
def update(self, other):
"""
Update the property values as specified by keyword/value.
@param other: An object to update from.
@type other: (dict|L{Properties})
@return: self
@rtype: L{Properties}
"""
if isinstance(other, Properties):
other = other.defined
for n,v in other.items():
self.set(n, v)
return self
def notset(self, name):
"""
Get whether a property has never been set by I{name}.
@param name: A property name.
@type name: str
@return: True if never been set.
@rtype: bool
"""
self.provider(name).__notset(name)
def set(self, name, value):
"""
Set the I{value} of a property by I{name}.
The value is validated against the definition and set
to the default when I{value} is None.
@param name: The property name.
@type name: str
@param value: The new property value.
@type value: any
@return: self
@rtype: L{Properties}
"""
self.provider(name).__set(name, value)
return self
def unset(self, name):
"""
Unset a property by I{name}.
@param name: A property name.
@type name: str
@return: self
@rtype: L{Properties}
"""
self.provider(name).__set(name, None)
return self
def get(self, name, *df):
"""
Get the value of a property by I{name}.
@param name: The property name.
@type name: str
@param df: An optional value to be returned when the value
is not set
@type df: [1].
@return: The stored value, or I{df[0]} if not set.
@rtype: any
"""
return self.provider(name).__get(name, *df)
def link(self, other):
"""
Link (associate) this object with anI{other} properties object
to create a network of properties. Links are bidirectional.
@param other: The object to link.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
Link(self, other)
return self
def unlink(self, *others):
"""
Unlink (disassociate) the specified properties object.
@param others: The list object to unlink. Unspecified means unlink all.
@type others: [L{Properties},..]
@return: self
@rtype: L{Properties}
"""
if not len(others):
others = self.links[:]
for p in self.links[:]:
if p in others:
p.teardown()
return self
def provider(self, name, history=None):
"""
Find the provider of the property by I{name}.
@param name: The property name.
@type name: str
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: The provider when found. Otherwise, None (when nested)
and I{self} when not nested.
@rtype: L{Properties}
"""
if history is None:
history = []
history.append(self)
if name in self.definitions:
return self
for x in self.links:
if x in history:
continue
provider = x.provider(name, history)
if provider is not None:
return provider
history.remove(self)
if len(history):
return None
return self
def keys(self, history=None):
"""
Get the set of I{all} property names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of property names.
@rtype: list
"""
if history is None:
history = []
history.append(self)
keys = set()
keys.update(self.definitions.keys())
for x in self.links:
if x in history:
continue
keys.update(x.keys(history))
history.remove(self)
return keys
def domains(self, history=None):
"""
Get the set of I{all} domain names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of domain names.
@rtype: list
"""
if history is None:
history = []
history.append(self)
domains = set()
domains.add(self.domain)
for x in self.links:
if x in history:
continue
domains.update(x.domains(history))
history.remove(self)
return domains
def prime(self):
"""
Prime the stored values based on default values
found in property definitions.
@return: self
@rtype: L{Properties}
"""
for d in self.definitions.values():
self.defined[d.name] = d.default
return self
def __notset(self, name):
return not (name in self.modified)
def __set(self, name, value):
d = self.definition(name)
d.validate(value)
value = d.nvl(value)
prev = self.defined[name]
self.defined[name] = value
self.modified.add(name)
d.linker.updated(self, prev, value)
def __get(self, name, *df):
d = self.definition(name)
value = self.defined.get(name)
if value == d.default and len(df):
value = df[0]
return value
def str(self, history):
s = []
s.append('Definitions:')
for d in self.definitions.values():
s.append('\t%s' % repr(d))
s.append('Content:')
for d in self.defined.items():
s.append('\t%s' % str(d))
if self not in history:
history.append(self)
s.append('Linked:')
for x in self.links:
s.append(x.str(history))
history.remove(self)
return '\n'.join(s)
def __repr__(self):
return str(self)
def __str__(self):
return self.str([])
class Skin(object):
"""
The meta-programming I{skin} around the L{Properties} object.
@ivar __pts__: The wrapped object.
@type __pts__: L{Properties}.
"""
def __init__(self, domain, definitions, kwargs):
self.__pts__ = Properties(domain, definitions, kwargs)
def __setattr__(self, name, value):
builtin = name.startswith('__') and name.endswith('__')
if builtin:
self.__dict__[name] = value
return
self.__pts__.set(name, value)
def __getattr__(self, name):
return self.__pts__.get(name)
def __repr__(self):
return str(self)
def __str__(self):
return str(self.__pts__)
class Unskin(object):
def __new__(self, *args, **kwargs):
return args[0].__pts__
class Inspector:
"""
Wrapper inspector.
"""
def __init__(self, options):
self.properties = options.__pts__
def get(self, name, *df):
"""
Get the value of a property by I{name}.
@param name: The property name.
@type name: str
@param df: An optional value to be returned when the value
is not set
@type df: [1].
@return: The stored value, or I{df[0]} if not set.
@rtype: any
"""
return self.properties.get(name, *df)
def update(self, **kwargs):
"""
Update the property values as specified by keyword/value.
@param kwargs: A list of property name/values to set.
@type kwargs: dict
@return: self
@rtype: L{Properties}
"""
return self.properties.update(**kwargs)
def link(self, other):
"""
Link (associate) this object with anI{other} properties object
to create a network of properties. Links are bidirectional.
@param other: The object to link.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
p = other.__pts__
return self.properties.link(p)
def unlink(self, other):
"""
Unlink (disassociate) the specified properties object.
@param other: The object to unlink.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
p = other.__pts__
return self.properties.unlink(p)
| 29.821691 | 80 | 0.551501 |
e876deef02ebb383ab43fb4de4d2a018fef93267
| 2,265 |
py
|
Python
|
python/en/archive/dropbox/udacity-ds_and_algos/python_basics/test_zip.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/dropbox/udacity-ds_and_algos/python_basics/test_zip.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/dropbox/udacity-ds_and_algos/python_basics/test_zip.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
# zip() in Python
# https://www.geeksforgeeks.org/zip-in-python/
# 1. zip() in Python
name = ["Manjeet", "Nikhil","Shambhavi", "Astha"];
roll_no = [4,1,3,2]
marks = [40,50,60,70]
mapped = zip(name,roll_no,marks)
print(mapped)
# <zip object at 0x7f7aaebc28c8>
mapped = set(mapped)
print("The zipped results is: ", end="")
print(mapped)
print("\n")
# The zipped results is: {('Nikhil', 1, 50), ('Astha', 2, 70), ('Manjeet', 4, 40), ('Shambhavi', 3, 60)}
# 2. How to unzip?
name = ["Manjeet","Nikhil","Shambhavi","Astha"]
roll_no = [4,1,3,2]
#marks = [40, 50, 60, 70]
marks = [40, 50, 60, 70,80] # Note the extra element of 80 won't be zipped!
mapped = zip(name,roll_no, marks)
print(mapped)
#<zip object at 0x7f7aae3aff08>
mapped = list(mapped)
print("The zipped results i: ", end="")
print(mapped)
print("\n")
# The zipped results i: [('Manjeet', 4, 40), ('Nikhil', 1, 50), ('Shambhavi', 3, 60), ('Astha', 2, 70)]
# Note the difference between set and list is:
# {('Nikhil', 1, 50), ('Astha', 2, 70), ('Manjeet', 4, 40), ('Shambhavi', 3, 60)}
# [('Manjeet', 4, 40), ('Nikhil', 1, 50), ('Shambhavi', 3, 60), ('Astha', 2, 70)]
#
# The set starts and ends with {( and )} whereas
# the list starts and ends with [( and )].
#
# The former is presented in alphabetical order whereas
# the latter is in the order presented.
# Unzipping the values
list_of_name, list_of_roll_no, list_of_marks = zip(*mapped)
print("The unzippped results:")
print("list_of_name: ", end="")
print(list_of_name)
print("list_of_roll_no: ", end="")
print(list_of_roll_no)
print("list_of_marks: ", end="")
print(list_of_marks)
print("\n")
#The unzippped results:
#list_of_name: ('Manjeet', 'Nikhil', 'Shambhavi', 'Astha')
#list_of_roll_no: (4, 1, 3, 2)
#list_of_marks: (40, 50, 60, 70)
# 3. Practical Applications
#A small example of scorecard is demonstrated below.
#There are many possible applications that can be said to be exected using zip,
#be it student database or scorecard or any other utility that requires mapping of groups.
players = ["Sachin","Sehwag","Gambhir","Dravid","Raina"]
scores = [100,15,17,28,43]
print("players\tscores")
for pl, sc in zip(players, scores):
print(pl,"\t",sc)
| 31.027397 | 105 | 0.642384 |
ad233b78df4ffce0ea8d961a0ed80427b394c655
| 1,218 |
py
|
Python
|
Packs/CommonScripts/Scripts/ParseWordDoc/ParseWordDoc.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CommonScripts/Scripts/ParseWordDoc/ParseWordDoc.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CommonScripts/Scripts/ParseWordDoc/ParseWordDoc.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
from docx import Document
from docx.opc.exceptions import PackageNotFoundError
def parse_word_doc(entry_id):
res = []
errEntry = {
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": ""
}
try:
cmd_res = demisto.getFilePath(entry_id)
file_path = cmd_res.get('path')
document = Document(file_path)
file_data = '\n'.join([para.text for para in document.paragraphs])
file_name = cmd_res.get('name')
output_file_name = file_name[0:file_name.rfind('.')] + '.txt'
res = fileResult(output_file_name, file_data.encode('utf8'))
except PackageNotFoundError:
errEntry["Contents"] = "Input file is not a valid docx/doc file."
demisto.results(errEntry)
except BaseException as e:
errEntry["Contents"] = "Error occurred while parsing input file.\nException info: " + str(e)
demisto.results(errEntry)
demisto.results(res)
def main():
entry_id = demisto.args()['entryID']
parse_word_doc(entry_id)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 30.45 | 100 | 0.65353 |
7e2f029aedae91d03a294725ee7059e1c30971b3
| 1,676 |
py
|
Python
|
crm/tests/test_forms.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | 1 |
2021-01-23T21:42:10.000Z
|
2021-01-23T21:42:10.000Z
|
crm/tests/test_forms.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | null | null | null |
crm/tests/test_forms.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from crm.forms import *
from django.contrib.auth.models import User, Permission,Group
from django.test import TestCase, Client
class TestForms(TestCase):
def setUp(self):
self.user = User.objects.create_superuser(username="user1",email="[email protected]",password="Hallo12345")
self.client = Client()
group_name = "mitarbeiter"
self.group = Group(name=group_name)
self.group.save()
def test_kunde_form_valid_data(self):
self.user.groups.add(self.group)
self.user.save()
self.client.login(username="user1", password="Hallo12345")
form = KundeForm(data={
'vorname':'VornameKunde',
'nachname':'NachnameKunde',
'email':'[email protected]',
'telefon':'+4917666666666',
'web':'kunde.kunde.de',
'notiz':'Notizen',
'template':'Template 1'})
self.assertTrue(form.is_valid())
def test_kunde_form_invalid_data(self):
self.user.groups.add(self.group)
self.user.save()
self.client.login(username="user1", password="Hallo12345")
form = KundeForm(data={
})
self.assertFalse(form.is_valid())
def test_mitarbeiter_form_valid_data(self):
form = MitarbeiterForm(data={
'vorname':'VornameMA',
'nachname':'NachnameMA',
'email':'[email protected]',
'telefon':'+4917666666666'
})
self.assertTrue(form.is_valid())
def test_mitarbeiter_form_invalid_data(self):
form = KundeForm(data={
})
self.assertFalse(form.is_valid())
| 29.403509 | 114 | 0.606802 |
7e586d1c64061f10f5ec95ea69ade113ae96fcb4
| 98,000 |
py
|
Python
|
tests/test_isp.py
|
MedPhyDO/pygqa
|
580b2c6028d2299790a38262b795b8409cbfcc37
|
[
"MIT"
] | 3 |
2021-02-25T13:19:52.000Z
|
2021-03-03T03:46:46.000Z
|
tests/test_isp.py
|
MedPhyDO/app-skeleton
|
1161736ccf356c704c6c13b17fa11aca64b17dac
|
[
"MIT"
] | null | null | null |
tests/test_isp.py
|
MedPhyDO/app-skeleton
|
1161736ccf356c704c6c13b17fa11aca64b17dac
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Alle in isp befindlichen Klassen und Funktionen prüfen.
Alle Laufzeit Fehlermeldungen sind bei der Testausführung gewollt
Nach der Ausführung steht am Ende OK wenn alle Tests durchgefürt wurden.
Bei Fehlern in den Überprüfungen steht am Ende::
======================================================================
FAIL:
.......
FAILED (failures=x)
"""
import os
from os import path as osp
# Module auch von der Konsole erreichbar machen
ABSPATH = os.path.dirname( os.path.abspath( __file__) )
path = osp.join( ABSPATH , "..")
import sys
sys.path.insert(0, path)
import shutil
from shutil import copyfile
#print(sys.path)
import unittest
import json
import time
from datetime import datetime
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import io
import matplotlib.pyplot as plt
from skimage import io as img_io
from skimage.util import compare_images
import numpy as np
from flask import Response
import dotmap
import threading
from safrs import jsonapi_rpc
from isp.config import ispConfig, dict_merge
from isp.webapp import ispBaseWebApp
from isp.safrs import db, system, ispSAFRSModel, ispSAFRSDummy, iso2date, isoDateType, isoDateTimeType
from isp.mpdf import PdfGenerator
from isp.plot import plotClass, rcParams
from sqlalchemy import MetaData
import logging
logger = logging.getLogger()
# ordner test/files
files_path = os.path.join( ABSPATH, 'files')
if not os.path.exists( files_path ):
try:
os.makedirs( files_path )
except IOError as e:
print("Unable to create dir.", e)
# weasyprint logging
wp_log_file = os.path.join(files_path, 'weasyprint.log')
if os.path.exists( wp_log_file ):
os.remove( wp_log_file )
wp_logger = logging.getLogger('weasyprint')
wp_logger.addHandler( logging.FileHandler( wp_log_file ) )
wp_logger.setLevel( logging.CRITICAL ) # WARNING, CRITICAL
class dummy( ispSAFRSDummy ):
"""
description: Tests - Test von ispSAFRSDummy
---
"""
__tablename__ = "dummy"
_database_key = ""
config = None
metadata = MetaData()
@classmethod
def init(self, kwargs:dict={} ):
"""
Wird von den jsonapi_rpc funktionen aufgerufen
Parameters
----------
kwargs : dict, optional
DESCRIPTION. The default is {}.
Returns
-------
kwargs : TYPE
DESCRIPTION.
"""
return kwargs
@jsonapi_rpc( http_methods=['GET'] )
def api_list(cls, **kwargs):
"""
summary : alle Angaben
description: alle Angaben
parameters:
- name : _ispcp
type: OrderedMap
in : query
default : {}
description : zusätzliche parameter
----
{'data': [{
'attributes': { }
'id': '1',
'links': {'self': 'http://localhost/api/dbtests/1/'},
'type': 'dbtests'
}]
'included': [],
'jsonapi': {'version': '1.0'},
'links': {'self': 'http://localhost/api/dbtests/?page[offset]=0&page[limit]=250'},
'meta': {'count': 7, 'limit': 250, 'offset': 0},
'App-Error': [],
'App-Info': []
}
ist:
{'data': [{
'function': 'api_list',
'kwargs': {'_ispcp': {}}
}],
'included': [],
'jsonapi': {'version': '1.0'},
'meta': {'count': 0, 'limit': 250, 'offset': 0},
'App-Error': [],
'App-Info': [{'message': 'safrs', 'info': 'Funktion: __main__.dummy.api_list()'}, {'message': 'kwargs', 'info': {'_ispcp': {}}}]}
es fehlt:
links
"""
#print("dummy.api_list")
cls.appInfo("kwargs", kwargs )
_result = [ {
"attributes": { "function": "api_list", "kwargs" : kwargs },
"id":"12",
"links": {"self": "http://localhost/api/dummy/12/"}, # autom. erzeugen
"type": "dummy" # autom. erzeugen
} ]
return cls._int_json_response( { "data": _result } )
@jsonapi_rpc( http_methods=['GET'] )
def api_get(cls, **kwargs):
"""
summary : eine Angabe
description: eine Angabe
parameters:
- name : Id
in : path
type: integer
required : true
description : id - der Informationen
- name : _ispcp
type: OrderedMap
in : query
default : {}
description : zusätzliche parameter
----
{'data': {
'attributes': {},
'id': '7',
'links': {'self': 'http://localhost/api/dbtests/7/'},
'type': 'dbtests'
},
'included': [],
'jsonapi': {'version': '1.0'},
'links': {'self': 'http://localhost/api/dbtests/7/'},
'meta': {'count': 1, 'instance_meta': {}, 'limit': 250, 'offset': 0},
'App-Error': [],
'App-Info': []
}
"""
#print("dummy.api_get")
# log.warning("gqa.api_get: {} id:{}".format( json.dumps(kwargs), cls.object_id ) )
cls.appInfo("kwargs", kwargs )
# normalerweise kein Datansatz in der Datenbank
if kwargs[cls._s_object_id] == "gibtsnicht":
_result = cls._int_get_empty_record( {"attributes": {cls._s_object_id : kwargs[cls._s_object_id] } })
else:
_result = {
"attributes": {cls._s_object_id : kwargs[cls._s_object_id] },
"id": 12,
"links": {"self": "http://localhost/api/{}/{}/".format(cls.__name__, 12)}, # autom. erzeugen
"type": cls.__name__ # autom. erzeugen
}
return cls._int_json_response( { "data": _result } )
@classmethod
@jsonapi_rpc( http_methods=['GET'] )
def test( cls, **kwargs ):
"""
description: test von api Funktionen und Parametern
parameters:
- name : _ispcp
in : query
default : {}
description : zusätzliche parameter
type: object
- name : zahl
in : query
required : true
description : Eine Zahl
type: number
- name : bool
in : query
required : false
default : false
description : Eine boolean Wert
type: boolean
- name : text
in : query
required : false
default : typenlos
description : Eine typenloser Wert mit default
----
"""
#import sqlalchemy
cls.appInfo("kwargs", kwargs )
_result = kwargs
# verschiedene Rückgaben
if kwargs["zahl"] == 1:
# leere liste
result = []
elif kwargs["zahl"] == 2:
# liste mit einem Element
result = [ {"a":1, "b":2} ]
elif kwargs["zahl"] == 3:
# liste mit einem Element
result = cls._int_json_response( "kein result" )
elif kwargs["zahl"] == 4:
# interne prüfungen
cls._int_add_meta( info= "{\"is\":\"dict\"}" )
result = []
elif kwargs["zahl"] == 5:
cls._int_parse_args( )
result = []
elif kwargs["zahl"] == 6:
result = cls._int_query( [ { "A":1 }, { "B":2 } ] )
elif kwargs["zahl"] == 7:
result = cls._int_groupby_query( cls._s_query, { "A":1, "B":2 } )
elif kwargs["zahl"] == 8:
result = []
db = cls.access_cls( "nicht da" )
result.append( {"nicht da": ""} )
db = cls.access_cls( "BigInteger" )
result.append( {"sqlalchemy.BigInteger": ""} )
elif kwargs["zahl"] == 9:
result = [
{'test=None': iso2date(None) },
{'20180415=2018-04-15': iso2date('20180415', True) },
{'2018-04-15=2018-04-15': iso2date('2018-04-15', True) },
{'2018-04-15 14:36:25=2018-04-15': iso2date('2018-04-15 14:36:25', True) },
{'2018-04-15=18-04-15 00:00:00': iso2date('2018-04-15') },
{'2018-04-15 14:36:25=2018-04-15 14:36:25': iso2date('2018-04-15 14:36:25') },
{'20180415 14:36:25=2018-04-15 14:36:25': iso2date('20180415 14:36:25') },
{'20180415 14:36=2018-04-15 14:36:00': iso2date('20180415 14:36') },
{'201A0415 14:36:25=None': iso2date('201A0415 14:36:25') },
{'201A0415 14:36=None': iso2date('201A0415 14:36') },
{'201A0415=None': iso2date('201A0415') },
]
else:
# dict
result = cls._int_json_response( { "data": _result } )
return result
@classmethod
@jsonapi_rpc( http_methods=['GET'] )
def pdf( cls, **kwargs ):
'''
description: test von pdf Funktionen und Parametern
parameters:
- name : _ispcp
in : query
default : {}
description : zusätzliche Json parameter
type: object
- name : name
in : query
required : false
default : nofile
description : Name der PDF Datei bestimmt die Art der pdf Erzeugung
----
'''
cls.appInfo("kwargs", kwargs )
mimetype='text/html'
status = 200
# verschiedene Rückgaben
if kwargs["name"] == "nofile":
status = 400
result = "Keine PDF Datei ({}.pdf) gefunden".format( kwargs["name"] )
cls.appError( "dummy/pdf", result)
# Fehler in der leere liste
return Response(result, status=status, mimetype=mimetype)
pdfFile = "{}.pdf".format(kwargs["name"])
variables = {
"Klinik" : "MedPhyDO",
"Abteilung" : "App Skeleton",
"logo": "logo.png",
"Datenausgabe" : "16.03.2020",
"Titel" : "unittest",
"Betreff" : "PdfGenerator",
"Auswertung" : "mpdf Test auch mit langem Text",
"Erstelldatum": "",
"Erstellt_von": "",
"Geprüft_von": "",
"Gültig_ab": "",
"Freigegeben_von": "",
"tip": "mpdf test tip für die Erstellung eines Unittest mit verschiedenen Elementen und PDF Rückgabe ",
"Version" : "",
"path": osp.join( ABSPATH , "files", "pdf"),
}
# print(pdfFile)
# Inhalte vorbereiten
# Testdateien
test_resources = osp.join( ABSPATH , "resources" )
test_files = {
"alpha" : osp.join( test_resources, 'alphachannel.svg' ),
"python" : osp.join( test_resources, 'python.svg' ),
"text" : osp.join( test_resources, 'test_text.txt' ),
"markdown" : osp.join( test_resources, 'test_markdown.md' ),
"markdown1" : osp.join( test_resources, 'test_markdown1.md' ),
"markdown2" : osp.join( test_resources, 'test_markdown2.md' ),
"logo" : 'logo.png', # immer aus den normalen resources
}
# text
text = """
<h1>Lorem ipsum</h1>
Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.
<br>
<b>kleiner Blindtext</b>
Hallo. Ich bin ein kleiner Blindtext. Und zwar schon so lange ich denken kann. Es war nicht leicht zu verstehen, was es bedeutet, ein blinder Text zu sein: Man ergibt keinen Sinn. Wirklich keinen Sinn. Man wird zusammenhangslos eingeschoben und rumgedreht – und oftmals gar nicht erst gelesen.
Aber bin ich allein deshalb ein schlechterer Text als andere?
<br>
"""
# data
# pandas daten verwenden
data = {
"A" : { "A" : 1, "B": 1.5, "C": "test", "D":-0.2 },
"B" : { "A" : 2, "B": 2.6, "C": "", "D": 1.2 },
"C" : { "A" : 3, "B": 3.2, "C": "test", "D": 0.4 },
"D" : { "A" : 4, "B": 4.1, "C": "", "D": -0.6 }
}
data_frame = pd.DataFrame(data)
# zeilen und spalten tauschen, und nach C sortieren
data_frame = data_frame.transpose().sort_values(by="C", ascending=False)
# Für die tests Fontsize auf 10, sonst wird 20 verwendet
rcParams["font.size"] = 10
# rcParams["figure.figsize"] = (6.4, 4.8)
# plt defaults setzen
plt.rcParams.update( rcParams )
# plot mit Pandas anzeigen
data_frame.plot(kind='bar', title='Rating');
# layout opimieren
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
image_data = io.BytesIO()
plt.savefig( image_data, format='png' )
#
# weasyprint
#
# erstmal nur mit config Angaben
pdf = PdfGenerator( config=ispConfig( mqttlevel=logging.WARNING ) )
# jetzt mit den anderen lädt config intern
pdf = PdfGenerator( filename=pdfFile, variables=variables )
from isp.mpdf import DEFAULT_TEMPLATES
# default templates erneut setzen um config änderungen für den test nicht zu verwenden
# styles bereitstellen
pdf.PAGE_STYLE = DEFAULT_TEMPLATES["PAGE_STYLE"]
pdf.OVERLAY_STYLE = DEFAULT_TEMPLATES["OVERLAY_STYLE"]
# html Gerüst bereitstellen
pdf.header_html = DEFAULT_TEMPLATES["header_html"]
pdf.footer_html = DEFAULT_TEMPLATES["footer_html"]
if kwargs["name"] == "test-1":
# leeres pdf erstellen
# nur update metadata für coverage prüfen
#pdf.updateMetadata( )
pass
elif kwargs["name"] == "test-2":
# einfachen Text darstellen
pdf.textFile( test_files["text"], { "width": 80 })
# testet die HTML Ausgabe
pdf.html( '<b>HTML Test</b>', attrs={ "font-size":"9px" } )
# Markdown darstellen
pdf.textFile( test_files["markdown"], { "width": 80 } )
elif kwargs["name"] == "test-2a":
# wie test 2 aber zuerst markdown und dann text
# Markdown darstellen
pdf.textFile( test_files["markdown"], { "width": 80 } )
# testet die HTML Ausgabe
pdf.html( '<b>HTML Test</b>', attrs={ "font-size":"9px" } )
# einfachen Text darstellen
pdf.textFile( test_files["text"], { "width": 80 })
elif kwargs["name"] == "test-3":
# Seiten erstellung
c1 = pdf.setContentName("Seite 1")
pdf.text( "Inhalt 1" )
# neuer Content / neue Seite
pdf.setContentName("Seite 2")
pdf.text( "Inhalt 2" )
pdf.newPage()
pdf.text( "Inhalt 3" )
pdf.newPage()
pdf.text( "<h2>Seite 4</h2>" )
pdf.text( "Inhalt 4" )
# zum schluß noch in Content 1 auf der ersten Seite etwas einfügen
pdf.setContentName(c1, False)
pdf.text( "Inhalt 5 auf Seite 1" )
elif kwargs["name"] == "test-4":
icon_data = [
{ "acceptance": "True (5)", "icon": pdf.resultIcon( acceptance=True, iconOnly=True ) },
{ "acceptance": "False (1)", "icon": pdf.resultIcon( acceptance=False, iconOnly=True ) },
{ "acceptance": "1", "icon": pdf.resultIcon( acceptance=1, iconOnly=True ) },
{ "acceptance": "2", "icon": pdf.resultIcon( acceptance=2, iconOnly=True ) },
{ "acceptance": "3", "icon": pdf.resultIcon( acceptance=3, iconOnly=True ) },
{ "acceptance": "4", "icon": pdf.resultIcon( acceptance=4, iconOnly=True ) },
{ "acceptance": "5", "icon": pdf.resultIcon( acceptance=5, iconOnly=True ) },
{ "acceptance": "falsch", "icon": pdf.resultIcon( acceptance="falsch", iconOnly=True ) },
]
icon_frame = pd.DataFrame( icon_data )
# Text darstellen
pdf.text( text, { "width": 80 }, attrs={"border":"1px solid #FF0000"})
# Text aus einer nicht vorhandenen Datei verwenden
pdf.textFile( "gibtsnicht.md", { "width": 80 } )
# Text aus einer vorhandenen Datei verwenden
pdf.textFile( test_files["text"], { "width": 40, "top": 130 }, attrs={"border":"1px solid #FF0000"} )
#
# Angegebenes Bild anzeigen (svg)
pdf.image( test_files["alpha"], { "width": 50, "top":125, "left":60 }, attrs={"border":"1px solid #FF0000"} )
# Bild aus resources (png)
pdf.image( test_files["logo"] , { "width": 30, "top":55, "left":95 }, attrs={"border":"1px solid #FF0000"} )
# Bild eines data_frame.plot autom. höhe nach Inhalt
img = '<div style="float:right;">'
img += pdf.image( image_data, { "width": 60 }, render=False)
img += "</div>"
pdf.html( img, { "width": 80, "top":80, "left":10 }, attrs={"border":"1px solid #FF0000"} )
# pandas dataframe als Tabelle
html = (
data_frame.round(2).style
.set_uuid( "test_pandas_" )
.set_table_attributes('class="alayout-fill-width"') \
.format( { 'A':'{0:.1f}', 'B':'{0:.1f}', 'D':'{0:.3f}'} )
.hide_index()
.highlight_max(subset=["D"], color='yellow', axis=0)
.render()
)
pdf.html( html, attrs={ "font-size":"9px", "margin-left": "10px" } )
# ohne Angaben (nicht passiert)
pdf.pandas()
# leeres dataframe (nicht passiert)
pdf.pandas( pd.DataFrame() )
# pandas sofort ohne id
pdf.pandas( data_frame,
area={ "width": 50, "top": 180 },
attrs={ "id": "test", "class":"unittest" }, # id des dataframe
fields=[
{ "field": "gibtsnicht" },
{ "field": "A", "label":"is A", "format":"{}", "style": [('text-align', 'center')] },
{ "field": "D", "format":"{0:.3f}", "style": [('text-align', 'right')] }
]
)
pdf.pandas( icon_frame,
area={ "width": 50, "top": 5, "right": 0 },
# attrs={ "id": "test", "class":"unittest" }, # id des dataframe
)
# pandas sofort mit id
pdf.pandas( data_frame,
area={ "width": 50, "top": 180, "left": 60 },
fields=[
{ "field": "B", "label":"is B" },
{ "field": "D" }
]
)
# pandas ohne passende fields
pdf.pandas( data_frame,
area={ "width": 50, "top": 180, "left": 120 },
fields=[
{ "field": "gibtsnicht" },
]
)
pdf.resultIcon( 1 )
# neuer contentName (erzeugt seitenumbruch)
pdf.setContentName("Seite 3")
# Text aus einer vorhandenen Datei verwenden
pdf.textFile( test_files["markdown2"], { "width": 160 } )
# leeren Text einfügen
pdf.text( )
# text intern einfügen
pdf.text( 12 )
# markdown intern einfügen
pdf.markdown( "* Markdown **List** Element" )
# seitenumbruch immer
pdf.newPage()
pdf.resultIcon( 5 )
# neue Seite
pdf.text( "Seite 3" )
# ohne Angaben (nicht passiert)
pdf.pandasPlot()
# mit Angaben in der Angegebenen größe plotten
pdf.pandasPlot( data_frame, area={ "width": 100, "top": 30, "left": 20 }, kind='line', rot=75 )
# Text und TeX Formel nach SVG mit mathtext
pdf.mathtext( r"$a/b$" )
# nur den htmlcode für eine neue Seite erzeugen
pdf.newPage( False )
# einfach ein icon zum prüfen der fonts
pdf.icon( "mdi-paperclip", "x4")
# Plot Funktionen über die plotClass
# plot anlegen
plot = plotClass( )
fig, ax = plot.initPlot( )
# limits legende und grid
ax.set_ylim( [-2.0, 2.0] )
ax.grid( )
ax.legend( )
# als bar plot ausgeben
data_frame.plot( ax=ax, kind='bar', rot=75)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
# chart im PDF anzeigen
pdf.image( plot.getPlot(), area={ "width": 100, "top": 130, "left": 20 } )
# close all figures
plt.close('all')
# showPlot nur so für coverage durchführen
plot.showPlot()
if kwargs["name"] == "test-1":
#
# finish durchführen (coverage test)
#
# 1. nur pdf erzeugen
result = pdf.finish( )
pdf._variables["unittest"] = True
# 2. als unittest pdf und png erzeugen (wie render_pdf_and_png)
result = pdf.finish( )
else:
#
# pdf und png Datei erstellen
#
result = pdf.render_pdf_and_png( )
#
# pdf und png Datei erstellen
#
result = pdf.render_pdf_and_png( )
return cls._int_json_response( { "data": result } )
@classmethod
#@jsonapi_rpc( http_methods=['GET'] )
def norpc( cls, **kwargs ):
'''
'''
return ""
class dbtestsrel( ispSAFRSModel ):
"""
description: Tests - Test von ispSAFRSModel mit relationen
---
"""
__table_args__ = {'extend_existing': True}
__tablename__ = "dbtestsrel"
id = db.Column('id', db.Integer, primary_key=True, unique=True, autoincrement=True)
dbtests_id = db.Column( 'dbtests_id', db.Integer, db.ForeignKey("dbtests.id") )
rstring = db.Column('rstring', db.String, nullable=False) #
rdate = db.Column('rdate', db.Date, nullable=True) # YYYYMMDD
rinteger = db.Column('rinteger', db.Integer, nullable=True)
rdata = db.Column('rdata', db.JSON ) # .. todo::json type?
# relationen
dbtests = db.relationship("dbtests", back_populates="dbtestsrel", foreign_keys=[dbtests_id]) # one to many
class dbtests( ispSAFRSModel ):
"""
description: Tests - Test von ispSAFRSModel mit relationen
---
In der Datenbank wird immer komplett abgelegt
Specify 'extend_existing=True' to redefine options and columns on an existing Table object.
Numeric auch DECIMAL
precision=None,
scale=None,
decimal_return_scale=None,
asdecimal=True, - es wird ein formatierter string zurückgegeben (gerundet)
db.Float( precision=5, asdecimal=True, decimal_return_scale=4 )
"""
__table_args__ = {'extend_existing': True}
__tablename__ = "dbtests"
id = db.Column('id', db.Integer, primary_key=True, unique=True, autoincrement=True)
string = db.Column('string', db.String, nullable=False) #
date = db.Column('date', db.Date, nullable=True) # YYYYMMDD
isodatetime = db.Column('isodatetime', isoDateTimeType, nullable=True) # YYYY-MM-DD HH:mm:SS
isodate = db.Column('isodate', isoDateType, nullable=True) # YYYY-MM-DD
integer = db.Column('integer', db.Integer, nullable=True)
data = db.Column('data', db.JSON ) # .. todo::json type?
tags = db.Column('tags', db.String, nullable=True)
gruppe = db.Column('gruppe', db.String, nullable=True)
aktiv = db.Column('aktiv', db.Integer, nullable=False, default=True)
float = db.Column('float', db.Float( asdecimal=True ), nullable=False, default=0) # (5,True,4) gibt 0.3333 als str
decimal = db.Column('decimal', db.DECIMAL( 5, 2, 1, True ), nullable=False, default=0)
numeric = db.Column('numeric', db.Numeric( 5, 2, 3, False ), nullable=False, default=0 )
# relationen
dbtestsrel = db.relationship("dbtestsrel", back_populates="dbtests", foreign_keys=[dbtestsrel.dbtests_id], lazy="dynamic", cascade="delete") # one to many
def to_dict(self):
# bei der Angabe asdecimal=True kommt ein str zurück deshalb diesen wieder in float umwandeln
result = ispSAFRSModel.to_dict(self)
result["decimal"] = float( result["decimal"] )
#print( result )
return result
@classmethod
@jsonapi_rpc( http_methods=['GET'] )
def test( cls, **kwargs ):
"""
description : Zusätzliche Funkton
parameters:
- name : _ispcp
in : query
default : {}
description : zusätzliche parameter
type: object
- name : zahl
in : query
required : true
description : Eine Zahl
type: number
- name : bool
in : query
required : false
default : false
description : Eine boolean Wert
type: boolean
- name : text
in : query
required : false
default : typenlos
description : Eine typenloser Wert mit default
----
"""
#print( cls.object_id )
cls.appDialog("dbtests", { "content" : " test Dialog", "dimensions" : [ 500, 200] })
result = []
#_result = kwargs
if kwargs["zahl"] == 8:
# Datenbank Klasse bestimmen
db = cls.access_cls( "dbtests" )
else:
result = cls._int_get_empty_record( {"tags": "defaulttag"} )
cls.appInfo("kwargs", kwargs, status_code=205 )
return cls._int_json_response( { "data": result } )
def run( config:dict={} ):
''' Startet ispBaseWebApp mit zusätzlichen config Angaben
Parameters
----------
config : dict, optional
DESCRIPTION. The default is {}.
Returns
-------
webApp : ispBaseWebApp
Die gestartete WebApplication
'''
# Konfiguration öffnen
_config = ispConfig( config=config )
# _config.update( config )
#print( _config )
_apiConfig = {
"models": [ system, dummy, dbtests, dbtestsrel ],
}
_webconfig = {
# nur um update von webconfig zu testen
"name" : "test_isp",
}
# Webserver starten
webApp = ispBaseWebApp( _config, db, webconfig=_webconfig, apiconfig=_apiConfig )
return webApp
class testBase(unittest.TestCase):
'''
setUp(), tearDown(), and __init__() will be called once per test.
'''
@classmethod
def setUpClass(cls):
''' Wird beim initialisieren der Testklasse aufgerufen
- Api bereitstellen
- test Ergebnisse zum Vergleich laden
'''
# This attribute controls the maximum length of diffs output by assert methods that report diffs on failure.
# It defaults to 80*8 characters
cls.maxDiff = None
files_path = os.path.join( ABSPATH, 'files')
pdf_path = os.path.join( ABSPATH, 'files', 'pdf')
config_path = os.path.join( ABSPATH, '..', 'config')
if not os.path.exists( files_path ):
os.mkdir( files_path )
# alte Datenbank löschen: über Pfad Angaben falls in der config nicht die testdatei steht
db_file = os.path.join( files_path, "tests.db" )
if os.path.exists( db_file ):
os.remove( db_file )
# alle erzeugten pdf und den Pfad pdf löschen
if os.path.exists( pdf_path ):
shutil.rmtree( pdf_path )
swagger_file = os.path.join( files_path, "swagger_test.json" )
if not os.path.exists( swagger_file ):
with open(swagger_file, 'w') as fp:
obj = {
"info": {
"title": "swagger test"
}
}
json.dump(obj, fp, indent=2)
# webapp mit unitest config
cls.webapp = run( {
"loglevel" :{
"safrs" : logging.DEBUG
#"webapp" : logging.INFO,
},
"server" : {
"webserver" : {
"name" : "swagger_test",
"port" : 5001,
"TESTING": True,
"reloader" : False
},
"api": {
"DBADMIN": True,
"custom_swagger_config": os.path.join( files_path, "swagger_test.json" )
}
},
"templates":{
"PDF-HEADER": None
},
"database": {
"main": "tests",
"tests" : {
"connection": "sqlite:///{{BASE_DIR}}/tests/files/tests.db"
}
}
} )
cls.app = cls.webapp.app
#print("setUpClass", cls.webapp.config.get() )
# Grunddaten in die Datenbank laden
data = {
"dbtests" : [
{ "string": "eins", "integer": 1, "gruppe":"A", "tags":"A,K", "aktiv":True },
{ "string": "zwei", "integer": 2, "gruppe":"B", "tags":"B,M", "aktiv":False },
{ "string": "drei", "integer": 3, "gruppe":"C", "tags":"M,K", "aktiv":True },
{ "string": "vier", "integer": 4, "gruppe":"C", "aktiv":False },
{ "string": "fünf", "integer": 5, "gruppe":"B", "tags":"A,K", "aktiv":True }
],
"dbtestsrel" : [
{ "dbtests_id": "1", "rstring": "r_eins", "rinteger": 11 },
{ "dbtests_id": "2", "rstring": "r_zwei", "rinteger": 12 },
{ "dbtests_id": "3", "rstring": "r_drei", "rinteger": 13 },
{ "dbtests_id": "4", "rstring": "r_vier", "rinteger": 14 },
{ "dbtests_id": "5", "rstring": "r_fünf", "rinteger": 15 }
]
}
for d in data["dbtests"]:
response = cls.app.post( "api/dbtests/", headers={'Content-Type': 'application/json'}, data=json.dumps({
"data": {
"attributes": d,
"type":"dbtests"
}
}))
for d in data["dbtestsrel"]:
response = cls.app.post( "api/dbtestsrel/", headers={'Content-Type': 'application/json'}, data=json.dumps({
"data": {
"attributes": d,
"type":"dbtestsrel"
}
}))
@classmethod
def tearDownClass(cls):
"""
config unittest file löschen
"""
#os.remove( cls.unitest_file )
pass
def setUp(self):
''' wird vor jedem test aufgerufen
'''
pass
def tearDown(self):
''' wird nach jeden test aufgerufen
Returns
-------
None.
'''
#self.app.
# close the browser window
#self.driver.quit()
pass
class ispTest( testBase ):
def test_config_mqtt(self):
'''isp.config ispConfig mit MQTTHandler (isp.mqtt) prüfen immer mit neuen kernel für mqttInitLogging
'''
# zuerst ohne parameter aufrufen
config = ispConfig( )
# __repr__ testen soll nicht die Klasse sondern die config selbst (dotmap) geben
self.assertEqual(
repr(config)[:7], 'DotMap(' , "Fehler beim laden __repr__")
# Magic Methods prüfen
self.assertEqual(
config.__dict__["_loadErrors"], [], "Fehler beim laden von _loadErrors")
self.assertEqual(
config._loadErrors, [], "__getitem__ Fehler bei vorhandenen _loadErrors im Object")
self.assertEqual(
type(config.test), dotmap.DotMap, "__getitem__ Fehler bei nicht vorhandenen in der config")
# __getattr__ wird bei nicht vorhandenen aufgerufen
self.assertEqual(
config._test, None, "__getitem__ Fehler bei nicht vorhandenen im Object")
# __getitem__
self.assertEqual(
config["_loadErrors"], [], "__getitem__ Fehler")
# __getitem__
self.assertEqual(
type(config["versions"]), dotmap.DotMap, "__getitem__ mit dotmap Fehler")
# __getattr__ mit dotmap (config Values)
self.assertEqual(
type(config.versions), dotmap.DotMap, "__getattr__ mit dotmap Fehler")
# __setitem__
config["_version"] = '2.unittest' # __setitem__
self.assertEqual(
config.__dict__["_version"], '2.unittest', "__setitem__ Fehler")
# __setitem__ mit dotmap (config Values)
config["unittest"] = '3.unittest' # __setitem__
self.assertEqual(
config.unittest, '3.unittest', "__setitem__ mit dotmap Fehler")
# __setattr__
config._version = '3.unittest' # __setattr__
self.assertEqual(
config.__dict__["_version"], '3.unittest', "__setattr__ Fehler")
# Zugiffe auf die config selbst
#
# komplette config als dict
self.assertEqual(
type( config.get() ), dict, "komplette config als dict")
# config get mit default
self.assertEqual(
config.get("gibtsnicht", "defaultValue"), 'defaultValue', "config get mit default")
# dotmap set oberste ebene
config._config["unittest"] = '4.unittest'
self.assertEqual(
config.get("unittest") , '4.unittest', "dotmap get auf erster ebene")
# dotmap set/get auf einer ebene
config._config.A.unittest = '4A.unittest'
self.assertEqual(
config.get("A.unittest") , '4A.unittest', "dotmap get auf zweiter ebene")
config._config.A.B.unittest = '4AB.unittest'
self.assertEqual(
config.get( ["A", "B", "unittest"] ) , '4AB.unittest', "dotmap get auf dritter ebene")
# dotmap set oberste ebene
config.set("5unittest", '5-unittest')
# dotmap get
self.assertEqual(
config.get("5unittest"), '5-unittest', "dotmap set auf erster ebene anlegen")
# dotmap set oberste ebene überschreiben
config.set("5unittest", '5a-unittest')
# dotmap get
self.assertEqual(
config.get("5unittest"), '5a-unittest', "dotmap set auf erster ebene ändern")
# dotmap set zweite ebene
config.set("B5.unittest", '5B-unittest')
# dotmap get
self.assertEqual(
config.get("B5.unittest"), '5B-unittest', "dotmap set auf zweiter ebene")
# dotmap set zweite ebene als list
config.set(["C5","unittest"], '5C-unittest')
# dotmap get
self.assertEqual(
config.get(["C5","unittest"]), '5C-unittest', "dotmap set/get auf zweiter ebene als list")
# dotmap set zweite ebene neues Element
config.set("B5.unittestA", '5B-unittest')
self.assertEqual(
config.get("B5").toDict(), {'unittest': '5B-unittest', 'unittestA': '5B-unittest'}, "dotmap set zweite ebene neues Element")
# hilfsfunktion dict_merge testen
a = {"A":1}
b = {"B":2}
c = dict_merge(a, b)
self.assertEqual(
c, {'A': 1, 'B': 2}, "dict_merge auch neue keys")
c = dict_merge(a, b, False)
self.assertEqual(
c, {'A': 1}, "dict_merge nur vorhandene keys")
# test in config setzen update prüfen
#
localtime = time.strftime("%Y%m%d %H:%M:%S.%f", time.localtime(time.time()) )
config.test = {"a":1, "time": localtime }
# a verändern
config.update( {
"test": {"a":2}
})
self.assertEqual(
config.test, {"a":2, "time": localtime }, "Fehler bei config update")
# ohne mqtt findet default logging statt (konsole)
# .. todo:: Konsole logger funktionen noch überprüfen
logger = logging.getLogger( "MQTT" )
logger.debug('logger.debug')
logger.info("logger.info")
logger.warning("logger.warning")
logger.error("logger.error")
# mqtt logging prüfen
#
if config.get("server.mqtt.host", "") == "":
print( "(MQTT) keine Angaben in config vorhanden. MQTT wird nicht getestet!")
return;
# config mit anderem mqttLevel
config = ispConfig( mqttlevel=30 )
mqtt = config.mqttGetHandler()
self.assertIsNotNone(
mqtt, "kein MQTT handler vorhanden")
results = {}
mqtt_event = threading.Event()
mqttResult = None
def onMqtt( msg ):
global mqttResult
# in results die empfangenen ablegen
mqttResult = msg
results[ msg["topic"] ] = msg["payload"]
mqtt_event.set()
# funktion bei signal aufrufen
mqtt.signal.connect( onMqtt )
def publishThread( args ):
global mqttResult
mqttResult = None
mqtt_event.clear()
# Als Thread aufrufen, über mq.get() wird die Rückgabe von _retrieve abgerufen
thread = threading.Thread( target=mqtt.publish, args=( args,) )
thread.start()
# max 2 sekunden oder auf mqtt_event aus onMqtt warten
while not mqtt_event.wait( timeout=3 ):
mqtt_event.set()
return mqttResult
# die eigenen script infos
result = publishThread({
"topic": "cmnd/status"
} )
self.assertEqual(
result["topic"], "stat/status", "Fehler bei cmnd/status abfrage")
# python process vorhanden?
result = publishThread({
"topic": "cmnd/process",
"payload" : "python"
} )
#print("----------------------cmnd/process", result )
self.assertEqual(
result["topic"], "stat/process", "Fehler bei process abfrage")
# publish ohne topic - publish wird nicht aufgerufen
# hier wird in publishThread auf timeout gewartet
result = publishThread({
"payload": "publish ohne topic - publish wird nicht aufgerufen"
})
self.assertIsNone(
result, "Fehler bei process abfrage")
# publish ohne payload - publish wird mit leerem payload aufgerufen
result = publishThread({
"topic": "cmnd/test/leer"
})
self.assertEqual(
result["payload"], "", "Fehler bei leerem payload")
# payload mit object - publish wird mit leerem payload aufgerufen nur (str, bytearray, int, float) ist ok
result = publishThread({
"topic": "cmnd/test/object",
"payload": object()
})
self.assertEqual(
result["payload"], "", "Fehler bei object payload")
# payload als Text
result = publishThread({
"topic": "cmnd/test/string",
"payload": "payload als Text"
})
self.assertEqual(
result["payload"], "payload als Text", "Fehler bei text payload")
# payload als dict
result = publishThread({
"topic": "cmnd/test/dict",
"payload": {"text":"payload als dict"}
})
self.assertEqual(
result["payload"], {"text":"payload als dict"}, "Fehler bei dict payload")
# mqtt.client.subscribe( "gqa_dev/logging/#" )
# mqtt funktionen über logger
logger = logging.getLogger( "MQTT" )
logger.setLevel( logging.DEBUG )
logger.send()
logger.send("test/publish")
logger.progressStart( "test" )
logger.progress( "test", 50 )
logger.progressReady( "test" )
# test über mqtt anstatt über sofort über logger
mqtt.logging = True
mqtt.info("config.info")
mqtt.warning("config.warning")
mqtt.error("config.error")
# .. todo:: config ohne mqtt Ausgabe auf der Konsole
config.mqttCleanup()
mqtt.info("config.info nach cleanup")
mqtt.warning("config.warning nach cleanup")
mqtt.error("config.error nach cleanup")
# config mit falschen mqtt Angaben
#
config = ispConfig( )
port = config._config.server.mqtt.port
config._config.server.mqtt.port = 111111
config.mqttInitLogger( cleanup=True )
mqtt = config.mqttGetHandler()
self.assertIsNone(
mqtt, "Trotz init Fehler MQTT handler vorhanden")
#mqtt.info("config.info nach Fehler bei MQTT config")
config._config.server.mqtt.port = port
config.mqttInitLogger( cleanup=True )
time.sleep(4) # Sleep for 2 seconds um mqtt zu empfangen
# hier gibt es keine Ausgaben, da mqtt nicht mehr da ist
logger.info("logger.info nach MQTT init Fehler")
logger.send("cmnd/test/publish", "nach MQTT init Fehler")
time.sleep(2) # Sleep for 2 seconds um logger mqtt zu empfangen
#print( results )
self.assertIn(
"cmnd/test/publish", results, "Fehler nach MQTT init Fehler")
#mqtt.publish({
# "topic": "cmnd/status"
#})
# mqtt in config schließen
config.mqttCleanup( )
#print( results )
def test_config_files( self ):
# einfach config bereitstellen
config = ispConfig( )
temp_conf = {
"unittest": True,
"version" : "0.0.1",
"variables": {
"Version" : "0.0.1a",
},
"value": 0,
"content": "test"
}
config = ispConfig( config = temp_conf )
test = {
"value" : config.get("value"),
"content" : config.get("content"),
"info" : config.get("info")
}
self.assertDictEqual(test, {
"value" : 0,
"content" : "test",
"info" : None
}, "config Rückgabe stimmt nicht")
# Versions Angabe prüfen
# zusätzliche Dateien anlegen
unitest_json_file_00 = os.path.join( config.BASE_DIR, "config", "config-18200000.json")
with open(unitest_json_file_00, 'w') as f:
f.write( '{ "value": 0, "content": "test" }' )
unitest_json_file_01 = os.path.join( config.BASE_DIR, "config", "config-18200101.json")
with open(unitest_json_file_01, 'w') as f:
f.write( '{ "value": 1, "info": "info 18200101" }' )
unitest_json_file_05 = os.path.join( config.BASE_DIR, "config", "config-18200105.json")
with open(unitest_json_file_05, 'w') as f:
f.write( '{ "value": 5, "info": "info 18200105" }' )
config = ispConfig( )
test = {
"value" : config.get("value"),
"content" : config.get("content"),
"info" : config.get("info")
}
self.assertDictEqual(test, {
"value" : 5,
"content" : "test",
"info" : "info 18200105"
}, "config Rückgabe stimmt nicht")
config = ispConfig( lastOverlay="18200101" )
test = {
"value" : config.get("value"),
"content" : config.get("content"),
"info" : config.get("info")
}
self.assertDictEqual(test, {
"value" : 1,
"content" : "test",
"info" : "info 18200101"
}, "config Rückgabe stimmt nicht")
os.remove( unitest_json_file_00 )
os.remove( unitest_json_file_01 )
os.remove( unitest_json_file_05 )
# config-0000.json mit falschen Inhalt erzeugen,
# Fehler prüfen und Datei wieder löschen
#
error_json_file = os.path.join( config.BASE_DIR, "config", "config-0000.json")
with open(error_json_file, 'w') as f:
f.write( "#Falscher Inhalt" )
config = ispConfig()
self.assertEqual(
config._loadErrors, [ error_json_file ], "load error wurde nicht ausgelöst")
os.remove( error_json_file )
def test_config_jinja(self):
'''jinja Template Funktionen der config testen.
'''
# eine eigene config mit resources im tests Ordner
config = ispConfig( config={
"server": {
"webserver": {
"resources" : os.path.join( ABSPATH, "resources" )
}
}
})
# das aktuelle datum
datum = datetime.now().strftime('%d.%m.%Y')
result_A = """<ul>
<li>testuser</li>
</ul>
<ul>
<li>Datum aus Parameter <strong>datum</strong> :{{datum}}</li>
<li>Inhalt aus Parameter: {{user}}</li>
</ul>
Datum mit now: #datum#""".replace( "#datum#", datum )
result_B = """<ul>
<li>testuser</li>
</ul>
<ul>
<li>Datum aus Parameter <strong>datum</strong> :#datum#</li>
<li>Inhalt aus Parameter: testuser</li>
</ul>
Datum mit now: #datum#""".replace( "#datum#", datum )
meta = {
"user" : "testuser",
"datum": "{{ now.strftime('%d.%m.%Y') }}",
"name": "{{user}}"
}
tpl = """{% markdown %}
* {{ user }}
{% endmarkdown %}
{% include "test_template.tmpl" %}
Datum mit now: {{ now.strftime('%d.%m.%Y') }}"""
result = config.render_template( tpl, meta, deep_replace=False )
self.assertEqual(result, result_A, "template nicht OK")
result = config.render_template( tpl, meta, deep_replace=True )
self.assertEqual(result, result_B, "template nicht OK")
def test_webapp_base_system( self ):
''' Webapp Aufruf auf system funktionen
'''
response = self.app.get( "api/system" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
response = self.app.get( "api/system", query_string = { "format" : "html" } )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
response = self.app.get( "api/system/test", query_string = { "zahl" : 12 } )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertDictEqual(
response.json["data"],
{ "_ispcp": {}, "bool": False, "text": "typenlos", "zahl": 12.0},
"Response data nicht OK"
)
response = self.app.get( "api/system/15" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertDictEqual(
response.json["data"]["kwargs"],
{'format': 'html', 'info': 'kwargs', 'systemId': '15'},
"Response data nicht OK"
)
# print("test_webapp_base_system", response.json )
def test_webapp_base_statics( self ):
''' Webapp Aufruf auf Statische Inhalte
'''
# index auf zwei arten aufrufen
response = self.app.get( "/" )
#self.assertEqual(response.status_code, 200, "Api Status nicht 200")
index = response.data
response = self.app.get( "/render/index", query_string = {
"zahl":"012",
"bool":True,
"test":1,
"_ispcp": json.dumps( {"name":"B"} )
} )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(index, response.data, "index und render/index nicht gleich")
# render auf nicht auf nicht vorhandenes Template in ui
response = self.app.get( "/render/keintemplate" )
self.assertEqual(response.status_code, 404, "render auf nicht auf nicht vorhandenes Template in ui")
# load auf nicht vorhandene Datei testen
response = self.app.get( "/globals/js/keinedatei" )
self.assertEqual(response.status_code, 404, "load auf nicht vorhandene Datei")
# in ui eine unittest_route.phtml erzeugen
route_file = os.path.join( ABSPATH , "..", "ui", "unittest_route.phtml")
with open(route_file, 'w') as f:
f.write( "value={{ value }}" )
# ohne parameter
response = self.app.get( "/unittest_route" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(response.data, b"value=None", "Inhalt ist nicht value=None;_ispcp=")
# zwei gleiche parameter (nur der erste wird verwendet)
response = self.app.get( "/unittest_route?value=12&value=1" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(response.data, b"value=12", "Inhalt ist nicht value=12;_ispcp= FirstValueURIParser")
# unittest_route.phtml in ui wieder entfernen
os.remove( route_file )
# in ui eine unittest_route_ispcp.phtml erzeugen
route_file1 = os.path.join( ABSPATH , "..", "ui", "unittest_route_ispcp.phtml")
with open(route_file1, 'w') as f:
f.write( "{{ params }}" )
# Parameter als dict
response = self.app.get( '/unittest_route_ispcp' , query_string = {
"name":"A",
"uuid":1,
"id":1,
"_ispcp": json.dumps( {"name":"B"} )
} )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertDictEqual( json.loads( response.data.decode('utf-8') ), {"uuid": "1", "id": "1", "name": "B"}, "Inhalt ist nicht mit dict")
# unittest_route_ispcp.phtml in ui wieder entfernen
os.remove(route_file1)
#
# mit fehler bei _ispcp
response = self.app.get( "/render/index", query_string = {
"zahl":"012",
"bool":True,
"test":1,
"_ispcp": "name"
} )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
def test_webapp_base_extras( self ):
''' Website Aufruf für zusätzliche Inhalte
'''
# htmlcov laden geht nur wenn es schon erzeugt wurde
htmlcov_path = osp.join( ABSPATH , "..", ".htmlcov")
if osp.isdir( htmlcov_path ):
response = self.app.get( "/coverage" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
response = self.app.get( "/coverage/coverage.css" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
else:
print( "(coverage) Test erst nach dem Erstellen möglich." )
# über resources laden
response = self.app.get( "resources/logo.png" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
# über fonts laden aber mit Fehler für coverage
response = self.app.get( "fonts/irgendwas" )
self.assertEqual(response.status_code, 404, "Api Status nicht 404")
# über dbadminframe laden
response = self.app.get( "dbadminframe" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
# docs iframe laden
response = self.app.get( "/docs" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
# /docs/ wird zu /docs also auch iframe laden
response = self.app.get( "/docs/" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
# docs laden (beim ersten Aufruf erzeugen)
response = self.app.get( "/docs/index.html" )
# es kommt vor das erst beim 2. Aufruf alles erzeugt wird
if response.status_code == 404:
# 2. Versuch
response = self.app.get( "/docs/index.html" )
# jetzt OK
self.assertEqual(response.status_code, 200, "docs Aufruf Api Status nicht 200. Wurde docs erzeugt?")
# dbadmin laden
response = self.app.get( "/dbadmin" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
# neue webapp ohne parameter
webbapp =ispBaseWebApp( )
self.assertEqual(webbapp._config.get("server.webserver.TESTING"), True, "Testing ist nicht True")
# neue webapp mit dict nur mit TESTING Angabe
webbapp =ispBaseWebApp( {"server" : {"webserver" : { "TESTING": True } } } )
self.assertEqual(webbapp._config.get("server.webserver.TESTING"), True, "Testing ist nicht True")
def test_webapp_base_api( self ):
# Inhalt von swagger mit der Angabe in custom_swagger_path prüfen
response = self.app.get( "api/swagger.json" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
response.json["info"]["title"], "swagger test", "swagger file nicht ok")
self.assertEqual(
list( response.json["paths"].keys() ),
['/dbtests/', '/dbtests/groupby', '/dbtests/test', '/dbtests/undefined', '/dbtests/{dbtestsId}/', '/dbtests/{dbtestsId}/dbtestsrel',
'/dbtestsrel/', '/dbtestsrel/groupby', '/dbtestsrel/undefined', '/dbtestsrel/{dbtestsrelId}/', '/dbtestsrel/{dbtestsrelId}/dbtests',
'/dummy/', '/dummy/pdf', '/dummy/test', '/dummy/{dummyId}/',
'/system/', '/system/test', '/system/{systemId}/'
],
"Fehlerhafte paths Angaben in swagger.json")
response = self.app.get( "api/gibtsnicht" )
self.assertEqual(response.status_code, 404, "Fehlerhafter api Zugriff ist nicht 404")
def test_webapp_dummy_test( self ):
''' Api aufruf durchführen
GET /api/dummy/
'''
# --- dummy Klasse abfragen
# dummy api_list abfragen
response = self.app.get( "api/dummy" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
response.json["data"], [{
'attributes': {'function': 'api_list', 'kwargs': {'_ispcp': {}}},
'id': '12',
'links': {'self': 'http://localhost/api/dummy/12/'},
'type': 'dummy'
}],
"falsche api_list Rückgabe"
)
# dummy api_get abfragen wird dummyId mitgegeben
response = self.app.get( "api/dummy/12" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
#print(response.json["data"][0])
self.assertDictEqual(
response.json["data"], {
'attributes': {'dummyId': '12'},
'id': 12,
'links': {'self': 'http://localhost/api/dummy/12/'},
'type': 'dummy'
},
"falsche id Rückgabe"
)
#print( response.json )
# ohne Pflichfeld Angabe test gibt es nicht
response = self.app.get( "api/dummy/test" )
# print("api/dummy/test", response.json )
self.assertEqual(response.status_code, 400, "Api Status nicht 400")
self.assertDictEqual(
response.json,
{
"message": {
"zahl": "Eine Zahl"
}
},
"nicht abgelehnt ohne Pflichfeld Angabe"
)
# ohne text (hat default) mit test (nicht vorhanden)
# /api/system/test?zahl=012&bool=True&test=1&_ispcp={"name":"B"}
response = self.app.get( "api/dummy/test", query_string={
"zahl":"012",
"bool":True,
"test":1,
"_ispcp": json.dumps( {"name":"B"} )
} )
# kommen auch zusätzliche Angaben und werden unnötige ausgefiltert
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertDictEqual(
response.json["data"],
{
"_ispcp": {"name": "B"},
"bool": True,
"text": "typenlos",
"zahl": 12.0
},
"Parameter Auswertung falsch"
)
response = self.app.get( "api/dummy/undefined" )
# einen undefined holen
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
response.json["data"],
[{'attributes': {}, 'id': 'undefined', 'type': 'dummy'}],
"undefined fehlerhaft"
)
# Dummy ohne funktion gibt undefined Datensatz
response = self.app.get( "api/dummy/gibtsnicht" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
response.json["data"],
{ 'attributes': {}, 'id': 'undefined', 'type': 'dummy' },
"Dummy ohne funktion gibt keine undefined datensatz "
)
#
response = self.app.get( "api/dummy/test", query_string={ "zahl": 1 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json["data"],
[],
"Test leere Liste"
)
response = self.app.get( "api/dummy/test", query_string={ "zahl": 2 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json["data"],
[{"a": 1, "b": 2}],
"Test Liste mit einem Element"
)
# fehler bei der Umwandlung data bleibt leer
response = self.app.get( "api/dummy/test", query_string={ "zahl": 3 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json["data"],
[],
"fehler bei der Umwandlung data bleibt leer"
)
response = self.app.get( "api/dummy/test", query_string={ "zahl": 4 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
#print( response.json )
response = self.app.get( "api/dummy/test", query_string={ "zahl": 5, "_ispcp" : "{test}"} )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json['App-Error'],
[{'message': 'swagger Parameter Json Error', 'info': '_ispcp={test}'}],
"Parameter Json Error"
)
# _int_query selbst aufrufen
response = self.app.get( "api/dummy/test", query_string={ "zahl": 6 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json['data'],
[{'A': 1}, {'B': 2}],
"Parameter Json Error"
)
# _int_group_query selbst aufrufen
response = self.app.get( "api/dummy/test", query_string={ "zahl": 7 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json['App-Error'],
[],
# [{'message': 'Fehler bei _int_group', 'info': "'dummyQuery' object has no attribute 'group_by'"}],
"_int_group_query selbst aufrufen"
)
# access_cls selbst aufrufen
response = self.app.get( "api/dummy/test", query_string={ "zahl": 8 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json['data'],
[{'nicht da': ''}, {'sqlalchemy.BigInteger': ''}],
"access_cls selbst aufrufen"
)
# iso2date aufrufen
response = self.app.get( "api/dummy/test", query_string={ "zahl": 9 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json['data'],
[
{'test=None': None},
{'20180415=2018-04-15': '2018-04-15'},
{'2018-04-15=2018-04-15': '2018-04-15'},
{'2018-04-15 14:36:25=2018-04-15': '2018-04-15'},
{'2018-04-15=18-04-15 00:00:00': '2018-04-15 00:00:00'},
{'2018-04-15 14:36:25=2018-04-15 14:36:25': '2018-04-15 14:36:25'},
{'20180415 14:36:25=2018-04-15 14:36:25': '2018-04-15 14:36:25'},
{'20180415 14:36=2018-04-15 14:36:00': '2018-04-15 14:36:00'},
{'201A0415 14:36:25=None': None},
{'201A0415 14:36=None': None},
{'201A0415=None': None}
],
"iso2date aufrufen"
)
# versuchen eine vorhandene Funktion ohne rpc Kennung aufzurufen
response = self.app.get( "api/dummy/norpc" )
self.assertEqual(response.status_code, 400, "Status nicht 400")
self.assertEqual(
response.json,
{},
"versuchen eine vorhandene Funktion ohne rpc Kennung aufzurufen"
)
#print( response.json )
def test_webapp_db_tests_A( self ):
''' Api aufruf durchführen
GET /tests/
'''
# zuerst den zugriff testen und prüfen ob die tabelle 5 datensätze hat
#
response = self.app.get( "api/dbtests/", query_string={})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
len(response.json["data"]), 5, "keine 5 Datensätze"
)
#
# einen Datensatz zusätzlich einfügen
#
response = self.app.post( "api/dbtests/", headers={'Content-Type': 'application/json'}, data=json.dumps({
"data" : {
"attributes": {
"string":"sechs", # Pflichtfeld
#"date":"2020-08-19",
"integer":6
},
"type":"dbtests"
}
}), follow_redirects=True)
self.assertEqual(response.status_code, 201, "Api Status nicht 201 (Created)")
self.assertEqual( response.json["data"]["id"], '6', "Datensatz id ist nicht 6")
# record merken
newRecord6 = response.json["data"]["attributes"]
id6 = response.json["data"]["id"]
link6 = response.json["data"]["links"]["self"]
#
# einen zweiten einfügen
#
response = self.app.post( "api/dbtests/", headers={'Content-Type': 'application/json'}, data=json.dumps({
"data" : {
"attributes": {
"string":"sieben", # Pflichtfeld
#"date":"2020-08-19",
"integer":7
},
"type":"dbtests"
}
}), follow_redirects=True)
self.assertEqual(response.status_code, 201, "Api Status nicht 201 (Created)")
self.assertEqual( response.json["data"]["id"], '7', "Datensatz id ist nicht 7")
# record merken
newRecord7 = response.json["data"]["attributes"]
id7 = response.json["data"]["id"]
link7 = response.json["data"]["links"]["self"]
#
# jetzt alle holen und prüfen
#
response = self.app.get( "api/dbtests/")
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( len(response.json["data"]), 7, "Datensatzanzahl ist nicht 7")
id = response.json["data"][5]["id"] # zählung ab 0 (5 ist record 6)
record = response.json["data"][5]["attributes"]
link = response.json["data"][5]["links"]["self"]
self.assertEqual( id, id6, "Datensatz id=6 vom ersten stimmt nicht")
self.assertEqual( record, newRecord6, "Datensatz Inhalt vom ersten stimmt nicht")
#
# den siebten Datensatz über den angegebenen link holen
#
response = self.app.get( link7 )
self.assertEqual( response.json["data"]["id"], '7', "Datensatz Id Rückgabe ist nicht 7")
self.assertEqual( type(response.json["data"]), dict, "Datensatz data ist kein dict")
# Inhalt vergleichen
self.assertEqual( response.json["data"]["attributes"], newRecord7, "Datensatz Inhalt stimmt nicht")
#
# siebten Datensatz ändern - die id muss in body und path angegeben werden
#
response = self.app.patch( link7, headers={'Content-Type': 'application/json'}, data=json.dumps({
"data" : {
"attributes": {
# "date":"2020-08-19 00:00", # 2020-08-20, 00:00
"string":"changed",
},
"id": '7',
"type":"dbtests"
}
}), follow_redirects=True)
# 200 - Request fulfilled, document follows
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
# Inhalt darf nicht mehr gleich sein
self.assertNotEqual( response.json["data"], newRecord7, "Datensatz Inhalt ist noch gleich")
#
# den zweiten Datensatz über den angegebenen link holen und Änderungen prüfen
#
response = self.app.get( link7 )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"]["attributes"]["string"], "changed", "Feldinhalt ist nicht changed")
# alle holen
response = self.app.get( "api/dbtests/")
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
lastCount = len(response.json["data"] )
# Datensatz 6 und 7 löschen
response = self.app.delete( link6, headers={'Content-Type': 'application/json'} )
self.assertEqual(response.status_code, 204, "Api Status nicht 204")
# alle verbleibenden holen und Anzahl prüfen
response = self.app.get( "api/dbtests/")
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(len(response.json["data"] ), lastCount - 1 , "Api Status nicht {}".format( lastCount - 1 ))
# jetzt noch 7 löschen
response = self.app.delete( link7, headers={'Content-Type': 'application/json'} )
self.assertEqual(response.status_code, 204, "Api Status nicht 204")
# nach dem löschen Anzahl prüfen
response = self.app.get( "api/dbtests/", query_string={})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
len(response.json["data"]), 5, "keine 5 Datensätze nach dem löschen von 6 und 7"
)
# fehler bei falschem patch
response = self.app.patch( link7, headers={'Content-Type': 'application/json'}, data=json.dumps({
"data" : {
"attributes": {
"string_gibtsnicht":"changed",
},
"id": '99',
"type":"dbtests"
}
}), follow_redirects=True)
self.assertEqual(response.status_code, 500, "Api Status nicht 500")
self.assertEqual(
response.json["App-Error"],
[{'message': 'patch - unbekannter Fehler', 'info': '500'}],
"fehler bei falschem patch"
)
def test_webapp_db_tests_B( self ):
''' Api aufruf durchführen
GET /tests/
'''
# einen undefined holen
response = self.app.get( "api/dbtests/undefined")
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
response.json["data"],
[{'attributes': {
'aktiv': None,
'data': None,
'date': None,
'decimal': None,
'float': None,
'gruppe': None,
'integer': None,
'isodate': None,
'isodatetime': None,
'numeric': None,
'string': None,
'tags': None
}, 'id': 'undefined', 'type': 'dbtests'}],
"einen undefined holen"
)
# funktion test in dbtests aufrufen - gibt 205 als code
response = self.app.get( "api/dbtests/test", query_string={
"zahl" : 12 # Pflichfeld
})
#print(response.json["data"])
self.assertEqual(response.status_code, 205, "Api Status nicht 205")
self.assertDictEqual(
response.json["data"],
{'attributes': {
'aktiv': None,
'data': None,
'date': None,
'decimal': None,
'float': None,
'gruppe': None,
'integer': None,
'isodate': None,
'isodatetime': None,
'numeric': None,
'string': None,
'tags': 'defaulttag'
}, 'id': 'undefined', 'type': 'dbtests'},
"einen undefined holen"
)
# fehler bei falscher Filterangabe
response = self.app.get( "api/dbtests/", query_string={
"zahl" : 12, # Pflichfeld
"filter" : "eq(tid=1)"
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
response.json["App-Error"],
[{
'message': '_int_filter',
'info': 'RQL Syntax error: (\'eq(tid=1)\', 6, \'Expected ")"\')'
}],
"fehler bei falscher Filterangabe "
)
# wird nur für htmlcov aufgerufen
response = self.app.get( "api/dbtests/test", query_string={
"dbtestsId" : 2, # mit cls.object_id
"zahl" : 12 # Pflichfeld
})
self.assertEqual(response.status_code, 205, "Api Status nicht 205")
def test_webapp_db_tests_C( self ):
# einen nicht vorhandenen Datensatz abrufen
# FIXME: Meldung auf der Konsole unterdrücken in method_wrapper vorher abfangen ?
response = self.app.get( "api/dbtests/100")
self.assertEqual(response.status_code, 404, "Api Status nicht 404 - notFound")
def test_webapp_db_relation( self ):
''' Api aufruf für relative Tabellen
api/dbtestsrel?filter=eq(dbtests_id,2)
[{'attributes': {'dbtests_id': 2, 'rdata': None, 'rdate': None, 'rgroup': 'B', 'rinteger': 12, 'rstring': 'r_zwei'}, 'id': '2', 'links': {'self': 'http://localhost/api/dbtestsrel/2/'}, 'relationships': {'dbtests': {'data': None, 'links': {'self': 'http://localhost/api/dbtestsrel/2/dbtests'}}}, 'type': 'dbtestsrel'}]
'''
# zuerst den zugriff testen und prüfen ob die tabelle leer ist
#
response = self.app.get( "api/dbtests/")
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
len( response.json["data"] ), 5, "keine 5 Datensätze"
)
response = self.app.get( "api/dbtestsrel/")
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
len(response.json["data"]), 5, "keine 5 Datensätze"
)
# daten über path und filter müssen gleich sein nur die globale links Angabe unterscheidet sich
# http://127.0.0.1:5000/api/nutzung?_ispcp={%22_default%22:{%22ersatz_id%22:1754}}&filter=eq(ersatz_id,1754)&page[offset]=0&page[limit]=25
response = self.app.get( "api/dbtests/2/dbtestsrel")
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
reldata = response.json
response = self.app.get( "api/dbtestsrel", query_string={
"filter":"eq(dbtests_id,2)"
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
reldata["data"], response.json["data"],
"Rückgaben sind nicht gleich"
)
def test_webapp_db_group( self ):
''' Api aufruf für relative Tabellen
# ohne group Angabe wird fields verwendet
/api/<modul>/groupby?fields[<modul>]=<feld1>
# mit group
/api/<modul>/groupby?fields[<modul>]=<feld1,feld2>&groups=<feld1,feld2>
# mit group und delimiter
/api/<modul>/groupby?fields[<modul>]=<feld1,feld2>&groups[<modul>]=<feld1,feld2>&delimiter=,
# mit Filter
/api/<modul>/groupby?fields[<modul>]=<feld1,feld2>&filter=eq(aktiv,true)
# mit labels
/api/<modul>/groupby?fields[<modul>]=<feld1,feld2>&labels={"dbtests.gruppe": "Hallo"}
'''
# mit fields Angabe
response = self.app.get( "api/dbtests/groupby", query_string={
"fields[dbtests]":"gruppe"
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"],[
{'attributes': {'hasChildren': 1, 'gruppe': 'A'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'gruppe': 'B'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'gruppe': 'C'}, 'id': None, 'type': 'dbtests'}
], "groupby mit fields Angabe Rückgabe fehlerhaft " )
# mit groups Angabe
response = self.app.get( "api/dbtests/groupby", query_string={
"groups":"gruppe"
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"],[
{'attributes': {'hasChildren': 1, 'gruppe': 'A'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'gruppe': 'B'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'gruppe': 'C'}, 'id': None, 'type': 'dbtests'}
], "groupby mit groups Angabe Rückgabe fehlerhaft " )
# mit Filter und zwei Gruppierungs Feldern
response = self.app.get( "api/dbtests/groupby", query_string={
"groups[dbtests]":"gruppe,tags",
"filter":"eq(aktiv,true)"
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"],[
{'attributes': {'gruppe': 'A', 'hasChildren': 1, 'tags': 'A,K'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'gruppe': 'B', 'hasChildren': 1, 'tags': 'A,K'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'gruppe': 'C', 'hasChildren': 1, 'tags': 'M,K'}, 'id': None, 'type': 'dbtests'}
], "groupby mit Filter und zwei Gruppierungs Feldern fehlerhaft " )
# mit delimiter
response = self.app.get( "api/dbtests/groupby", query_string={
"groups":"tags",
"delimiter": ","
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"],[
{'attributes': {'tags': 'A'}},
{'attributes': {'tags': 'B'}},
{'attributes': {'tags': 'K'}},
{'attributes': {'tags': 'M'}}
], "groupby mit delimiter Rückgabe fehlerhaft " )
# groupby mit label testen
response = self.app.get( "api/dbtests/groupby", query_string={
"groups":"gruppe",
"labels": '{"dbtests.gruppe": "lGruppe"}'
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"],
[
{'attributes': {'hasChildren': 1, 'lGruppe': 'A'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'lGruppe': 'B'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'lGruppe': 'C'}, 'id': None, 'type': 'dbtests'}
]
, "groupby mit label fehlerhaft " )
# groupby mit zweifachen label testen
response = self.app.get( "api/dbtests/groupby", query_string={
"groups":"gruppe",
"labels": '{"dbtests.gruppe": ["lGruppeA", "lGruppeB"]}'
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"],
[
{'attributes': {'hasChildren': 1, 'lGruppeA': 'A', 'lGruppeB': 'A'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'lGruppeA': 'B', 'lGruppeB': 'B'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'lGruppeA': 'C', 'lGruppeB': 'C'}, 'id': None, 'type': 'dbtests'}
]
, "groupby mit label fehlerhaft " )
# groupby mit fields und label testen
response = self.app.get( "api/dbtests/groupby", query_string={
"fields[dbtests]":"gruppe",
"labels": '{"dbtests.gruppe": "lGruppe"}'
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(response.json["data"],
[
{'attributes': {'lGruppe': 'A', 'hasChildren': 1}, 'id': None, 'type': 'dbtests'},
{'attributes': {'lGruppe': 'B', 'hasChildren': 2}, 'id': None, 'type': 'dbtests'},
{'attributes': {'lGruppe': 'C', 'hasChildren': 2}, 'id': None, 'type': 'dbtests'}
]
, "groupby mit fields und label fehlerhaft" )
# groupby mit fields und zweifachen label testen
response = self.app.get( "api/dbtests/groupby", query_string={
"fields[dbtests]":"gruppe",
"labels": '{"dbtests.gruppe": ["lGruppeA", "lGruppeB"]}'
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"],
[
{'attributes': {'hasChildren': 1, 'lGruppeA': 'A', 'lGruppeB': 'A'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'lGruppeA': 'B', 'lGruppeB': 'B'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'lGruppeA': 'C', 'lGruppeB': 'C'}, 'id': None, 'type': 'dbtests'}
]
, "groupby mit fields und label fehlerhaft" )
# id als gruppe wird ausgefiltert
response = self.app.get( "api/dbtests/groupby", query_string={
"groups":"id"
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"], [
{'attributes': {'hasChildren': 1}, 'id': 1, 'type': 'dbtests'},
{'attributes': {'hasChildren': 1}, 'id': 2, 'type': 'dbtests'},
{'attributes': {'hasChildren': 1}, 'id': 3, 'type': 'dbtests'},
{'attributes': {'hasChildren': 1}, 'id': 4, 'type': 'dbtests'},
{'attributes': {'hasChildren': 1}, 'id': 5, 'type': 'dbtests'}
] , "id als gruppe wird ausgefiltert" )
def test_webapp_db_typen( self ):
''' Verschiedene feldtype testen
'''
# .. todo:: numerische Felder -
# datums Felder - date
# json Felder - data
response = self.app.post( "api/dbtests/", headers={'Content-Type': 'application/json'}, data=json.dumps({
"data" : {
"attributes": {
"string":"sechs", # Pflichtfeld
"date":"2020-08-19",
"integer": 6,
"data": {"A":1},
"float": 1/3,
"decimal" : 1.2345, # soll nur 1.23 ergeben
"numeric" : 5.6789,
"isodate" :"2020-08-19",
"isodatetime" :"2020-08-19 14:37"
},
"type":"dbtests"
}
}), follow_redirects=True)
#print( response.json["data"] )
#self.assertEqual( response.status_code, 201, "Api Status nicht 201 (Created)")
#self.assertEqual( response.json["data"]["attributes"]["date"], '2020-08-19', "Datensatz datum ist nicht 2020-08-19")
#self.assertEqual( response.json["data"]["attributes"]["data"], {"A":1}, 'Datensatz data ist nicht {"A":1}')
#self.assertEqual( response.json["data"]["attributes"]["float"], 0.3333333333333333, 'Datensatz float ist nicht 0.3333333333333333')
response = self.app.post( "api/dbtests/", headers={'Content-Type': 'application/json'}, data=json.dumps({
"data" : {
"attributes": {
"string":"sechs", # Pflichtfeld
"date":"2020-08-19",
"integer": 6,
"data": {"A":1},
"float": 1/3,
"decimal" : 12345.3456, # soll nur 12345.35 ergeben
"numeric" : 5.6789,
"isodate" :"2020-08-19",
"isodatetime" :"2020-08-19 14:37"
},
"type":"dbtests"
}
}), follow_redirects=True)
#print( response.json["data"] )
pass
def check_pdf_data( self, data, contents=-1, pages=-1, intern_check:bool=False ):
''' Prüft pdf data mit vorher gespeicherten data
Erzeugt im unittest dir resources ein dir 'check', um dort die Vergleichsdaten zu speichern
Gibt es dieses schon werden die dort vorhandenen Dateien als check verwendet
Parameters
----------
data : dict
- body: dict
- overlays: dict
- pages: int
- pdf_filename: string
- pdf_filepath: string
- png_filename: string
- png_filepath: string
contents : int
Anzahl der Seiten im Content
pages : int
Anzahl der Seiten im PDF
intern_check:
Wenn True wird in tests und nicht im normalem pdf Ablegeort geprüft. Default is False
Returns
-------
None.
'''
#print( data["content"] )
self.assertIn("pdf_filename", data,
"PDF data fehlerhaft filename fehlt"
)
self.assertIn("png_filepath", data,
"PNG data fehlerhaft filepath fehlt"
)
check = {}
if intern_check == True:
check_dir = osp.join( ABSPATH, "resources", "check" )
else:
check_dir = osp.join( os.path.dirname( data["pdf_filepath"] ), "check" )
# create the folders if not already exists
if not os.path.exists( check_dir ):
try:
os.makedirs( check_dir )
except IOError as e:
print("Unable to create dir.", e)
# Dateiname für den Inhalt festlegen
json_check_name = osp.join( check_dir, data["pdf_filename"] ) + ".json"
png_check_name = osp.join( check_dir, data["png_filename"] )
png_new_name = data["png_filepath"]
# akltuellen content speichern
with open( data["pdf_filepath"] + ".json" , "w" ) as json_file:
json.dump( data["content"] , json_file, indent=2 )
# beim erstenmal pdfData content in unittest anlegen
if not os.path.exists( json_check_name ):
with open(json_check_name, "w" ) as json_file:
# print("save", json_check_name)
json.dump( data["content"] , json_file, indent=2 )
if intern_check == True:
pdf_check_name = osp.join( check_dir, data["pdf_filename"] )
# beim erstenmal pdf nach check kopieren
if not os.path.exists( pdf_check_name ):
# adding exception handling
try:
copyfile( data["pdf_filepath"], pdf_check_name)
except IOError as e:
print("Unable to copy file.", e)
# beim erstenmal png nach check kopieren
if not os.path.exists( png_check_name ):
# adding exception handling
try:
copyfile(png_new_name, png_check_name)
except IOError as e:
print("Unable to copy file.", e)
page_names = data["content"].keys()
#print(page_names)
# ggf Anzahl der Bereiche prüfen
if contents > -1:
self.assertEqual(
len( page_names ),
contents,
"Anzahl der content Bereiche in '{}' stimmt nicht.".format( data["pdf_filepath"] )
)
# ggf Anzahl der Seiten prüfen
if pages > -1:
self.assertEqual(
data["pages"],
pages,
"Anzahl der Seiten in '{}' stimmt nicht.".format( data["pdf_filepath"] )
)
# erzeugte png vergleichen und diff speichern
png_check = img_io.imread( png_check_name )
png_new = img_io.imread( png_new_name )
self.assertEqual(
png_check.shape,
png_new.shape,
"Die Bildgrößen in '{}' stimmen nicht.".format( data["pdf_filepath"] )
)
# Bild verleich erstellen und speichern
compare = compare_images(png_check, png_new, method='diff')
img_io.imsave( png_new_name + ".diff.png", compare )
# passende check daten (json_check_name) laden
with open( json_check_name ) as json_file:
check = json.load( json_file )
# einige content Inhalte prüfen
from bs4 import BeautifulSoup
for page_name, content in data["content"].items():
bs_data = BeautifulSoup( content, 'html.parser')
bs_check = BeautifulSoup( check[ page_name ], 'html.parser')
# zuerst die texte
data_text = bs_data.find_all('div', {"class": "text"} )
check_text = bs_check.find_all('div', {"class": "text"} )
self.assertEqual(
data_text,
check_text,
"PDF content .text in '{}' ist fehlerhaft".format( data["pdf_filepath"] )
)
# gesamt check der Bilder
def check_mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
# MeanCheck durchführen
try:
mse = check_mse( png_check, png_new )
except:
mse = -1
#print( "Der PNG Vergleichsbild MSE von '{}' ist '{}'.".format( data["png_filepath"] + ".diff.png", mse ) )
#mse=0.0
self.assertEqual( 0.0, mse,
"Der PNG Vergleichsbild MSE stimmt nicht. Diff image '{}' prüfen. Test erneut durchführen.".format( data["png_filepath"] + ".diff.png" )
)
def test_isp_mpdf_fonts( self ):
"""Testet Fonts für die PDF Erstellung mit fc-list
Benötigte Fonts:
* DejaVuSerif
* Material Design Icons
Returns
-------
None.
"""
import subprocess
cmd = '/usr/bin/fc-list --format="%{family[0]}\n" | sort | uniq'
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE ).communicate()[0]
self.assertIn(
b"Material Design Icons",
output,
"Der Font 'Material Design Icons' fehlt im System"
)
self.assertIn(
b"DejaVu Serif",
output,
"Der Font 'DejaVuSerif' fehlt im System"
)
def test_isp_mpdf_base( self ):
''' Ein PDF Dokument erstellen
'''
response = self.app.get( "api/dummy/pdf" )
self.assertEqual(response.status_code, 400, "Status nicht 400")
self.assertEqual(
response.data,
b"Keine PDF Datei (nofile.pdf) gefunden",
"Testet Fehler bei Rückgabe eine fehlenden PDF Datei "
)
# zuerst nur ein leeres PDF mit overlay
response = self.app.get( "api/dummy/pdf", query_string={
"name" : "test-1"
} )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual( response.json["data"]["body"], "", "PDF body ist nicht leer" )
self.check_pdf_data( response.json["data"], contents=0, pages=1, intern_check=True )
# text und markdown mit Header (h2)
response = self.app.get( "api/dummy/pdf", query_string={
"name" : "test-2"
} )
self.assertEqual(response.status_code, 200, "Status nicht 200")
# kommt es hier zu einem Fehler stimmt die css Einbindung von weasyprint nicht
self.check_pdf_data( response.json["data"], contents=1, pages=1, intern_check=True )
# wie test 2 aber markdown zuerst
response = self.app.get( "api/dummy/pdf", query_string={
"name" : "test-2a"
} )
self.assertEqual(response.status_code, 200, "Status nicht 200")
#print( response.json["data"] )
self.check_pdf_data( response.json["data"], contents=1, pages=1, intern_check=True )
response = self.app.get( "api/dummy/pdf", query_string={
"name" : "test-3"
} )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.check_pdf_data( response.json["data"], contents=2, pages=4, intern_check=True )
response = self.app.get( "api/dummy/pdf", query_string={
"name" : "test-4"
} )
self.assertEqual(response.status_code, 200, "Status nicht 200")
# kommt es hier zu einem Fehler stimmt die font Einbindung von weasyprint nicht
self.check_pdf_data( response.json["data"], contents=2, pages=3, intern_check=True )
#print( response.json )
# .. todo:: rückgabe als pdf
def check_weasyprint( self ):
''' Ein PDF Dokument mit weasyprint erstellen
'''
# pdf weasyprint test
from weasyprint import HTML, CSS
from weasyprint.fonts import FontConfiguration
font_config = FontConfiguration()
from weasyprint import default_url_fetcher
files_loaded = []
def log_url_fetcher(url):
files_loaded.append( url )
return default_url_fetcher(url)
# HTML('<h1>foo') would be filename
base_dir = os.path.join( ABSPATH, "..", "resources" )
html = HTML(string='''
<h1>The title</h1>
<div class="blue-text">blauer Text</div>
<span>mdi-check-outline: </span><span><i class="mdi mdi-check-outline"></></span><span> Oder?</span>
''')
css = CSS(string='''
@import url(mpdf_styles.css);
h1 { font-family: Arial,"Helvetica Neue",Helvetica,sans-serif }
''', font_config=font_config, url_fetcher=log_url_fetcher, base_url=base_dir )
pdf_file_name = os.path.join( ABSPATH, 'files', 'weasyprint.pdf')
html.write_pdf( pdf_file_name, stylesheets=[css], font_config=font_config)
# es sollten min. 5 Dateien eingelesen werden
self.assertGreaterEqual(len(files_loaded), 5, "Anzahl nicht >= 5")
# only test 4
response = self.app.get( "api/dummy/pdf", query_string={
"name" : "test-4"
} )
self.assertEqual(response.status_code, 200, "Status nicht 200")
# kommt es hier zu einem Fehler stimmt die font Einbindung von weasyprint nicht
self.check_pdf_data( response.json["data"], contents=2, pages=3, intern_check=True )
# print( files_loaded, len(files_loaded) )
def suite( testClass:None ):
'''Fügt alle Funktionen, die mit test_ beginnen aus der angegeben Klasse der suite hinzu
Parameters
----------
testClass : unittest.TestCase
Zu testende Klasse
Returns
-------
suite : unittest.TestSuite
'''
if not testClass:
testClass = ispTest
suite = unittest.TestSuite( )
logger.setLevel( logging.ERROR ) # ERROR DEBUG WARNING
if testClass:
#suite.addTest( testClass('test_config_jinja') )
#suite.addTest( testClass('check_weasyprint') )
#suite.addTest( testClass('test_webapp_db_tests_C') )
#suite.addTest( testClass('test_webapp_db_tests_B') )
#return suite
for m in dir( testClass ):
if m.startswith('test_config_'):
suite.addTest( testClass(m), )
pass
elif m.startswith('test_webapp_base_'):
suite.addTest( testClass(m), )
pass
elif m.startswith('test_webapp_dummy_'):
suite.addTest( testClass(m), )
pass
elif m.startswith('test_webapp_db_'):
suite.addTest( testClass(m), )
pass
elif m.startswith('test_isp_mpdf_'):
suite.addTest( testClass(m), )
pass
return suite
# -----------------------------------------------------------------------------
if __name__ == '__main__':
'''
0 (quiet): you just get the total numbers of tests executed and the global result
1 (default): you get the same plus a dot for every successful test or a F for every failure
2 (verbose): you get the help string of every test and the result
'''
runner = unittest.TextTestRunner()
runner.run( suite( ispTest ) )
| 37.376049 | 329 | 0.518061 |
7ed44f341fa2054528408df6fb4e681538fe6935
| 5,249 |
py
|
Python
|
src/mitglieder/models.py
|
Sumarbrander/Stura-Mitgliederdatenbank
|
691dbd33683b2c2d408efe7a3eb28e083ebcd62a
|
[
"MIT"
] | 1 |
2021-12-20T20:15:26.000Z
|
2021-12-20T20:15:26.000Z
|
src/mitglieder/models.py
|
Sumarbrander/Stura-Mitgliederdatenbank
|
691dbd33683b2c2d408efe7a3eb28e083ebcd62a
|
[
"MIT"
] | null | null | null |
src/mitglieder/models.py
|
Sumarbrander/Stura-Mitgliederdatenbank
|
691dbd33683b2c2d408efe7a3eb28e083ebcd62a
|
[
"MIT"
] | null | null | null |
from datetime import date
from django.db import models
from django.db.models import Q
from simple_history.models import HistoricalRecords
from aemter.models import Funktion
class Mitglied(models.Model):
"""
Datenbankmodell Mitglied
Felder:
* name: Nachname des Mitglieds. Darf nicht null sein.
* vorname: Vorname des Mitglieds. Darf nicht null sein.
* spitzname: Spitzname des Mitglieds. Kann null sein.
* strasse: Straße des Mitglieds. Kann null sein.
* hausnr: Hausnummer des Mitglieds. Kann null sein.
* plz: Postleitzahl des Mitglieds. Kann null sein.
* ort: Ort des Mitglieds. Kann null sein.
* tel_mobil: Telefonnummer des Mitglieds. Kann null sein.
* tel_weitergabe: Stellt dar, ob die Telefonnummer des Mitglieds im Notfall weitergegeben werden darf. Standard ist False.
* wahl_angenommen: Stellt dar, ob das Mitglied die Wahl bereits angenommen hat. Standard ist False.
* kenntnis_ordn: Stellt dar, ob das Mitglied die Kenntnis der Ordnungen bereits eingereicht hat. Standard ist False.
* verpfl_datengeheimnis: Stellt dar, ob das Mitglied die Verpflichtung zum Datengeheimnis bereits akzeptiert hat. Standard ist False.
* stammdatenblatt: Stellt dar, ob das Mitglied das Stammdatenblatt bereits eingereicht hat. Standard ist False.
* history
"""
name = models.CharField(max_length=50, null=False)
vorname = models.CharField(max_length=50, null=False)
spitzname = models.CharField(max_length=50, null=True)
strasse = models.CharField(max_length=50, null=True)
hausnr = models.CharField(null=True, max_length=10)
plz = models.CharField(max_length=5, null=True)
ort = models.CharField(max_length=50, null=True)
tel_mobil = models.CharField(max_length=15, null=True)
tel_weitergabe = models.BooleanField(default=False, null=False)
wahl_angenommen = models.BooleanField(default=False, null=False)
kenntnis_ordn = models.BooleanField(default=False, null=False)
verpfl_datengeheimnis = models.BooleanField(default=False, null=False)
stammdatenblatt = models.BooleanField(default=False, null=False)
history = HistoricalRecords()
def __str__(self):
return self.vorname + " " + self.name
def curr_funktion_count(self):
"""
Funktion, die die Anzahl der derzeitigen Funktionen des Mitglieds zurückgibt.
"""
return self.mitgliedamt_set\
.filter(Q(amtszeit_ende__isnull=True) | Q(amtszeit_ende__gte=date.today()))\
.count()
def curr_funktion_first(self):
"""
Funktion, die die erste Funktion des Mitglieds zurückgibt
oder None wenn das Mitglied keine Funktion innehat.
"""
if self.mitgliedamt_set\
.filter(Q(amtszeit_ende__isnull=True) | Q(amtszeit_ende__gte=date.today())):
return self.mitgliedamt_set\
.filter(Q(amtszeit_ende__isnull=True) | Q(amtszeit_ende__gte=date.today()))\
.first()\
.funktion
else:
return None
def admission_data_complete(self):
"""
Funktion, die prüft, ob alle Daten für die Aufnahme als Mitglied vorhanden sind.
"""
if self.wahl_angenommen and self.kenntnis_ordn and self.verpfl_datengeheimnis and self.stammdatenblatt:
return True
else:
return False
class Meta:
verbose_name = "Mitglied"
verbose_name_plural = "Mitglieder"
class MitgliedAmt(models.Model):
"""
Datenbankmodell Zuordnung Mitglied-Amt
Felder:
* mitglied: Referenziert eine Mitglied. Darf nicht null sein.
* funktion: Referenziert eine Funktion. Darf nicht null sein.
* amtszeit_beginn: Datum des Beginns der Amtszeit. Kann null sein.
* amtszeit_ende: Datum des Endes der Amtszeit. Kann null sein.
* history
Es ist zu beachten, dass diese Zuordnung gelöscht wird, wenn das Mitglied oder die Funktion gelöscht wird. (Cascade)
"""
mitglied = models.ForeignKey(Mitglied, on_delete=models.CASCADE, null=False)
funktion = models.ForeignKey(Funktion, on_delete=models.CASCADE, null=False)
amtszeit_beginn = models.DateField(null=True)
amtszeit_ende = models.DateField(null=True)
history = HistoricalRecords()
def __str__(self):
return self.mitglied.__str__() + ", " + self.funktion.__str__()
class Meta:
verbose_name = "Zuordnung Mitglied-Amt"
verbose_name_plural = "Zuordnungen Mitglied-Amt"
class MitgliedMail(models.Model):
"""
Datenbankmodell Zuordnung Mitglied-Mail
Felder:
* mitglied: Referenziert ein Mitglied. Darf nicht null sein.
* email: Eine E-Mail-Adresse des Mitglieds. Darf nicht null sein.
* history
Es ist zu beachten, dass diese Zuordnung gelöscht wird, wenn das Mitglied gelöscht wird. (Cascade)
"""
mitglied = models.ForeignKey(Mitglied, on_delete=models.CASCADE, null=False)
email = models.CharField(max_length=50, null=False)
history = HistoricalRecords()
def __str__(self):
return self.email + " " + self.mitglied.__str__()
class Meta:
verbose_name = "Zuordnung Mitglied-Mail"
verbose_name_plural = "Zuordnungen Mitglied-Mail"
| 39.765152 | 137 | 0.701086 |
7eebabc1140cdbc9040d4a64ccc9a499542c1cdd
| 5,101 |
py
|
Python
|
official/cv/xception/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/xception/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/xception/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""eval Xception."""
import time
import os
from mindspore import context, nn
from mindspore.train.model import Model
from mindspore.common import set_seed
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.Xception import xception
from src.dataset import create_dataset
from src.loss import CrossEntropySmooth
from src.model_utils.config import config as args_opt, config_gpu, config_ascend
from src.model_utils.moxing_adapter import moxing_wrapper
from src.model_utils.device_adapter import get_device_id, get_device_num
set_seed(1)
def modelarts_pre_process():
'''modelarts pre process function.'''
def unzip(zip_file, save_dir):
import zipfile
s_time = time.time()
if not os.path.exists(os.path.join(save_dir, args_opt.modelarts_dataset_unzip_name)):
zip_isexist = zipfile.is_zipfile(zip_file)
if zip_isexist:
fz = zipfile.ZipFile(zip_file, 'r')
data_num = len(fz.namelist())
print("Extract Start...")
print("Unzip file num: {}".format(data_num))
data_print = int(data_num / 100) if data_num > 100 else 1
i = 0
for file in fz.namelist():
if i % data_print == 0:
print("Unzip percent: {}%".format(int(i * 100 / data_num)), flush=True)
i += 1
fz.extract(file, save_dir)
print("Cost time: {}min:{}s.".format(int((time.time() - s_time) / 60),
int(int(time.time() - s_time) % 60)))
print("Extract Done.")
else:
print("This is not zip.")
else:
print("Zip has been extracted.")
if args_opt.modelarts_dataset_unzip_name:
zip_file_1 = os.path.join(args_opt.data_path, args_opt.modelarts_dataset_unzip_name + ".zip")
save_dir_1 = os.path.join(args_opt.data_path)
sync_lock = "/tmp/unzip_sync.lock"
# Each server contains 8 devices as most.
if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):
print("Zip file path: ", zip_file_1)
print("Unzip file save dir: ", save_dir_1)
unzip(zip_file_1, save_dir_1)
print("===Finish extract data synchronization===")
try:
os.mknod(sync_lock)
except IOError:
pass
while True:
if os.path.exists(sync_lock):
break
time.sleep(1)
print("Device: {}, Finish sync unzip data from {} to {}.".format(get_device_id(), zip_file_1, save_dir_1))
args_opt.test_data_dir = args_opt.data_path
if args_opt.modelarts_dataset_unzip_name:
args_opt.test_data_dir = os.path.join(args_opt.test_data_dir, args_opt.folder_name_under_zip_file)
args_opt.checkpoint_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), args_opt.checkpoint_path)
@moxing_wrapper(pre_process=modelarts_pre_process)
def run_eval():
if args_opt.device_target == "Ascend":
config = config_ascend
elif args_opt.device_target == "GPU":
config = config_gpu
else:
raise ValueError("Unsupported device_target.")
context.set_context(device_id=args_opt.device_id)
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, save_graphs=False)
# create dataset
dataset = create_dataset(args_opt.test_data_dir, do_train=False, batch_size=config.batch_size, device_num=1, rank=0)
# step_size = dataset.get_dataset_size()
# define net
net = xception(class_num=config.class_num)
# load checkpoint
param_dict = load_checkpoint(args_opt.checkpoint_path)
load_param_into_net(net, param_dict)
net.set_train(False)
# define loss, model
loss = CrossEntropySmooth(smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
# define model
eval_metrics = {'Loss': nn.Loss(),
'Top_1_Acc': nn.Top1CategoricalAccuracy(),
'Top_5_Acc': nn.Top5CategoricalAccuracy()}
model = Model(net, loss_fn=loss, metrics=eval_metrics)
# eval model
res = model.eval(dataset, dataset_sink_mode=True)
print("result:", res, "ckpt=", args_opt.checkpoint_path)
if __name__ == '__main__':
run_eval()
| 39.542636 | 120 | 0.649873 |
bc11199f000e7d5428e98fb11fb984a97f704c6b
| 233 |
py
|
Python
|
user/drivers/sequencer-linux/linux/gen_bytes.py
|
losfair/FlatMk-v0
|
6e78666e8982e41688c24828093ea6b73b76ea11
|
[
"MIT"
] | 5 |
2020-01-11T22:38:34.000Z
|
2021-06-01T13:40:55.000Z
|
user/drivers/sequencer-linux/linux/gen_bytes.py
|
losfair/FlatMk-v0
|
6e78666e8982e41688c24828093ea6b73b76ea11
|
[
"MIT"
] | null | null | null |
user/drivers/sequencer-linux/linux/gen_bytes.py
|
losfair/FlatMk-v0
|
6e78666e8982e41688c24828093ea6b73b76ea11
|
[
"MIT"
] | null | null | null |
import sys
with open(sys.argv[1], "rb") as f:
data = f.read()
bstr = []
for b in data:
bstr.append(hex(b))
print("const uint8_t LINUX_INIT_ELF_BYTES[{}] = {};\n".format(len(bstr), "{" + ",".join(bstr) + "}"))
| 29.125 | 105 | 0.540773 |
70b3d1c5ee575cb84f905181cfc006bc4ccc3918
| 637 |
py
|
Python
|
Main.py
|
PeerPressured/Covid_Genome_Scanner
|
a9579b9e862e4a46001d6eb7f8ad54aaf9d9dd41
|
[
"MIT"
] | null | null | null |
Main.py
|
PeerPressured/Covid_Genome_Scanner
|
a9579b9e862e4a46001d6eb7f8ad54aaf9d9dd41
|
[
"MIT"
] | null | null | null |
Main.py
|
PeerPressured/Covid_Genome_Scanner
|
a9579b9e862e4a46001d6eb7f8ad54aaf9d9dd41
|
[
"MIT"
] | null | null | null |
def sequenzenfinder(gen, seq):
copy = gen
displ = 0
indizes = []
while True:
if seq in copy:
index = copy.index(seq)
indizes.append(index+displ)
copy = copy[index+1:]
displ += index+1
else:
return indizes
filename = 'LongCovid.txt'
file = open(filename,'r')
content = file.read().splitlines()
file.close()
genom = ''.join(content) #inhalt der text-datei beschönigt
basensequenz = input('Fügen Sie hier die Genomsequenz ein').lower()
if basensequenz in genom:
print(sequenzenfinder(gen = genom, seq = basensequenz))
else:
print(False)
| 24.5 | 67 | 0.613815 |
70f04531610aef6d7f7dd4e81208f71d9f91a43a
| 941 |
py
|
Python
|
src/milk2.py
|
xiaonanln/python-usaco
|
8f0fef19cb5f89232d985f79d955f0de5ef4e10d
|
[
"MIT"
] | null | null | null |
src/milk2.py
|
xiaonanln/python-usaco
|
8f0fef19cb5f89232d985f79d955f0de5ef4e10d
|
[
"MIT"
] | null | null | null |
src/milk2.py
|
xiaonanln/python-usaco
|
8f0fef19cb5f89232d985f79d955f0de5ef4e10d
|
[
"MIT"
] | null | null | null |
"""
ID: isaiahl1
LANG: PYTHON2
TASK: milk2
"""
import operator
fin = open ('milk2.in', 'r')
fout = open ('milk2.out', 'w')
def main():
"""
3
300 1000
700 1200
1500 2100
:return:
"""
N = int(fin.readline())
ranges = []
for _ in xrange(N):
a, b = map(int, fin.readline().strip().split())
ranges.append( (a, b) )
r1, r2 = solve(ranges)
print >>fout, r1, r2
def solve(ranges):
N = len(ranges)
ranges.sort(key=operator.itemgetter(0)) # sort by start time
# print 'ranges', ranges
beginMilkTime, stopMilkTime = ranges[0]
maxMilk = stopMilkTime - beginMilkTime
maxGap = 0
for i in xrange(1, N):
a, b = ranges[i]
if a <= stopMilkTime:
if b > stopMilkTime:
stopMilkTime = b
maxMilk = max(maxMilk, stopMilkTime - beginMilkTime)
else: # a > stopMilkTime
gap = a - stopMilkTime
maxGap = max(maxGap, gap)
beginMilkTime, stopMilkTime = a, b
return maxMilk, maxGap
with fin:
with fout:
main()
| 17.109091 | 61 | 0.63762 |
387b0102da03f4155797036fdc325784fc4c950e
| 860 |
py
|
Python
|
spo/scripts/custom_scripts/leave_day_calculations.py
|
libracore/spo
|
efff6da53a776c4483f06d9ef1acc8a7aa96b28e
|
[
"MIT"
] | null | null | null |
spo/scripts/custom_scripts/leave_day_calculations.py
|
libracore/spo
|
efff6da53a776c4483f06d9ef1acc8a7aa96b28e
|
[
"MIT"
] | 6 |
2019-08-23T18:36:26.000Z
|
2019-11-12T13:12:12.000Z
|
spo/scripts/custom_scripts/leave_day_calculations.py
|
libracore/spo
|
efff6da53a776c4483f06d9ef1acc8a7aa96b28e
|
[
"MIT"
] | 1 |
2021-08-14T22:22:43.000Z
|
2021-08-14T22:22:43.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, libracore and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.data import nowdate, getdate
from frappe import _
@frappe.whitelist()
def get_leaves_taken(employee, leave_type):
now = nowdate()
first_of_year = getdate().strftime('%Y') + "-01-01"
leaves_taken_query = """SELECT SUM(`total_leave_days`)
FROM `tabLeave Application`
WHERE `employee` = '{employee}'
AND `leave_type` = '{leave_type}'
AND `status` = 'Approved'
AND `to_date` <= '{now}'
AND `from_date` >= '{first_of_year}'
AND `docstatus` = 1""".format(employee=employee, leave_type=leave_type, now=now, first_of_year=first_of_year)
return frappe.db.sql(leaves_taken_query, as_list=True)[0][0]
| 39.090909 | 117 | 0.682558 |
2a4a40af96b3dbe0c7c6d60b31801973b1e4d514
| 702 |
py
|
Python
|
setup.py
|
mnes-io/chff
|
9f450183ae508a56bf6869f59e7ffce24f56b269
|
[
"MIT"
] | null | null | null |
setup.py
|
mnes-io/chff
|
9f450183ae508a56bf6869f59e7ffce24f56b269
|
[
"MIT"
] | null | null | null |
setup.py
|
mnes-io/chff
|
9f450183ae508a56bf6869f59e7ffce24f56b269
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(name='chff',
version='0.1.5',
install_requires=[
"aiortc",
"nose",
],
description='Overlay Network Reference Implementation',
classifiers=[
'Development Status :: 1 - Planning',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Security :: Cryptography',
],
url='http://github.com/mnes-io/chff',
author='Charles Perkins',
author_email='[email protected]',
license='MIT',
packages=['chff'],
scripts=['bin/chffline'],
test_suite='nose.collector',
tests_require=['nose'],
zip_safe=False)
| 28.08 | 61 | 0.578348 |
2d918125e046e92b5b2fcec1aaec79e936bbddfa
| 28,984 |
py
|
Python
|
test/test_npu/test_distributed.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_distributed.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_distributed.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020 Huawei Technologies Co., Ltd
# Copyright (c) 2019, Facebook CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import errno
import fcntl
import multiprocessing
import os
import six
import sys
import time
import unittest
from contextlib import contextmanager
from functools import wraps
from itertools import groupby
from functools import reduce
import operator
import torch
import torch.npu
from torch import nn
import torch.nn.functional as F
import torch.distributed as c10d
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import MultiProcessTestCase
from common_utils import TestCase, run_tests
DEFAULT_TIMEOUT = 100
BACKEND = "hccl"
TEMP_DIR = "/tmp"
INIT_METHOD = "env://"
os.environ['WORLD_SIZE'] = '8'
#change this to your IP
os.environ['MASTER_ADDR'] = 'IP'
os.environ['MASTER_PORT'] = '29501'
SKIP_IF_NO_NPU_EXIT_CODE = 75
SKIP_IF_BACKEND_UNAVAILABLE = 78
def get_timeout():
return DEFAULT_TIMEOUT
if not dist.is_available():
print("Distributed not available, skipping tests")
sys.exit(0)
def skip_if_no_npu_distributed(func):
""" Hccl multigpu tests requires at least 2 NPUS. Skip if this is not met"""
func.skip_if_no_npu = True
@wraps(func)
def wrapper(*args, **kwargs):
if not torch.npu.is_available():
sys.exit(SKIP_IF_NO_NPU_EXIT_CODE)
if torch.npu.device_count() < int(os.environ["WORLD_SIZE"]):
sys.exit(SKIP_IF_NO_NPU_EXIT_CODE)
return func(*args, **kwargs)
return wrapper
@contextmanager
def _lock():
lockfile = os.path.join(TEMP_DIR, "lockfile")
with open(lockfile, "w") as lf:
try:
fcntl.flock(lf.fileno(), fcntl.LOCK_EX)
yield
finally:
fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
lf.close()
def _build_tensor(size, value=None):
if value is None:
value = size
return torch.FloatTensor(size, size, size).fill_(value)
class Barrier(object):
barrier_id = 0
@classmethod
def init(cls):
cls.barrier_id = 0
barrier_dir = os.path.join(TEMP_DIR, "barrier")
if not os.path.exists(barrier_dir):
os.makedirs(barrier_dir)
for f_name in os.listdir(barrier_dir):
os.unlink(os.path.join(barrier_dir, f_name))
@classmethod
def sync(cls, wait_for=None, timeout=5):
if wait_for is None:
wait_for = dist.get_world_size()
cls.barrier_id += 1
barrier_dir = os.path.join(TEMP_DIR, "barrier")
if not os.path.exists(barrier_dir):
os.makedirs(barrier_dir)
pid = str(os.getpid())
barrier_file = os.path.join(barrier_dir, pid)
with _lock():
with open(barrier_file, "w") as f:
f.write(str(cls.barrier_id))
start_time = time.time()
while True:
arrived = 0
with _lock():
for f_name in os.listdir(barrier_dir):
with open(os.path.join(barrier_dir, f_name), "r") as f:
data = f.read()
if int(data) >= cls.barrier_id:
arrived += 1
if arrived == wait_for:
break
if time.time() - start_time > timeout:
raise RuntimeError("barrier timeout")
time.sleep(0.1)
class _DistTestBase(object):
def _barrier(self, *args, **kwargs):
Barrier.sync(*args, **kwargs)
def _init_global_test(self):
group = list(range(0, dist.get_world_size()))
group_id = dist.group.WORLD
rank = dist.get_rank()
return (group, group_id, rank)
# HELPER FOR MULTINPU TESTS
def _init_multinpu_helper(self):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
nNPUs = torch.npu.device_count()
world_size = dist.get_world_size()
visible_devices = range(nNPUs)
nNPUs_per_process = nNPUs // world_size
rank_to_NPU = {
i: list(
visible_devices[i * nNPUs_per_process: (i + 1) * nNPUs_per_process]
)
for i in range(world_size)
}
return rank_to_NPU
# ALL REDUCE
def _test_all_reduce_helper(
self,
group,
group_id,
rank,
op,
master_value,
worker_value,
expected_value,
rank_to_NPU=None,
):
for src in group:
if rank == src:
tensor = _build_tensor(src + 1).fill_(master_value)
device = "npu:" + str(rank_to_NPU[rank][0])
torch.npu.set_device(device)
tensor = tensor.to(device)
dist.all_reduce(tensor, op, group_id)
tensor = tensor.to("cpu")
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
else:
tensor = _build_tensor(src + 1).fill_(worker_value)
device = "npu:" + str(rank_to_NPU[rank][0])
torch.npu.set_device(device)
tensor = tensor.to(device)
dist.all_reduce(tensor, op, group_id)
tensor = tensor.to("cpu")
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
self._barrier()
@skip_if_no_npu_distributed
def test_all_reduce_sum_npu(self):
group, group_id, rank = self._init_global_test()
rank_to_NPU = self._init_multinpu_helper()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
rank_to_NPU,
)
# BROADCAST
def _test_broadcast_helper(self, group, group_id, rank, rank_to_NPU=None):
for ttype, value, requires_npu in [
("torch.FloatTensor", -1e-10, False),
("torch.DoubleTensor", -1e-100, False),
("torch.HalfTensor", -0.1, True),
("torch.CharTensor", -2, False),
("torch.ByteTensor", 129, False),
("torch.IntTensor", -1e5, False),
("torch.LongTensor", -1e15, False),
]:
if requires_npu:
continue
for src in group:
expected_tensor = _build_tensor(src + 1, value)
device = "npu:" + str(rank_to_NPU[rank][0])
torch.npu.set_device(device)
expected_tensor = expected_tensor.to(device)
if rank == src:
dist.broadcast(expected_tensor, src, group_id)
else:
tensor = _build_tensor(src + 1, -1)
device = "npu:" + str(rank_to_NPU[rank][0])
torch.npu.set_device(device)
tensor = tensor.to(device)
dist.broadcast(tensor, src, group_id)
tensor = tensor.to("cpu")
expected_tensor = expected_tensor.to("cpu")
self.assertEqual(tensor.size(), expected_tensor.size())
self.assertEqual(tensor.ne(expected_tensor).max(), torch.tensor(False))
self._barrier()
@skip_if_no_npu_distributed
def test_broadcast_npu(self):
group, group_id, rank = self._init_global_test()
rank_to_NPU = self._init_multinpu_helper()
self._test_broadcast_helper(group, group_id, rank, rank_to_NPU)
# ALL GATHER
def _test_all_gather_helper(self, group, group_id, rank, rank_to_NPU=None):
for dest in group:
#input
tensor = _build_tensor(dest + 1, rank)
device = "npu:" + str(rank_to_NPU[rank][0])
torch.npu.set_device(device)
tensor = tensor.to(device)
#output
tensors = [_build_tensor(dest + 1, -1) for i in group]
new_tensors = []
for t in tensors:
torch.npu.set_device(device)
t = t.to(device)
new_tensors.append(t)
# output_list, input, group
dist.all_gather(new_tensors, tensor, group_id)
#label
expected_tensors = [_build_tensor(dest + 1, i).to("cpu") for i in group]
new_tensors = [tensor.to("cpu") for tensor in new_tensors]
for t1, t2 in zip(new_tensors, expected_tensors):
self.assertEqual(t1, t2)
self._barrier()
@skip_if_no_npu_distributed
def test_all_gather_npu(self):
group, group_id, rank = self._init_global_test()
rank_to_NPU = self._init_multinpu_helper()
self._test_all_gather_helper(group, group_id, rank, rank_to_NPU)
# REDUCE_SCATTER
def _test_reduce_scatter_helper(self, group, group_id, rank, op, rank_to_NPU=None):
for dest in group:
#output
tensor = _build_tensor(dest + 1, -1)
device = "npu:" + str(rank_to_NPU[rank][0])
torch.npu.set_device(device)
tensor = tensor.to(device)
#input
tensors = [_build_tensor(dest + 1, i) for i in group]
new_tensors = []
for t in tensors:
t= t.to(device)
new_tensors.append(t)
# output, input_list, op, group, async_op
dist.reduce_scatter(tensor, new_tensors, op, group_id)
#label
expected_tensor = _build_tensor(dest + 1, rank * len(group))
tensor = tensor.to("cpu")
self.assertEqual(tensor, expected_tensor)
self._barrier()
@skip_if_no_npu_distributed
def test_reduce_scatter_sum_npu(self):
group, group_id, rank = self._init_global_test()
rank_to_NPU = self._init_multinpu_helper()
self._test_reduce_scatter_helper(group, group_id, rank, dist.ReduceOp.SUM, rank_to_NPU)
# BARRIER
def _test_barrier_helper(
self, group, group_id, rank, rank_to_NPU=None):
WAIT_TIME = 0.3 # seconds
for dest in group:
expected_time = torch.FloatTensor(1).fill_(0.0)
device = "npu:" + str(rank_to_NPU[rank][0])
torch.npu.set_device(device)
expected_time = expected_time.to(device)
if dest == rank:
expected_time.fill_(time.time() + WAIT_TIME)
dist.broadcast(expected_time, dest, group_id)
time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer
dist.barrier(group_id)
else:
dist.broadcast(expected_time, dest, group_id)
dist.barrier(group_id)
self.assertGreaterEqual(
float(time.time()),
float(expected_time[0]),
"destination rank: %d, my rank: %d" % (dest, rank) +
" (if you see this failure, please report in #14554)")
# Use higher timeout for the instance where the test runs
# against a subgroup and uses a CUDA tensor for expected time.
# The NPU initialization for the participating processes can
# take long enough for the barrier timeout to trigger on the
# process that doesn't participate in the group.
self._barrier(timeout=20)
def test_barrier_npu(self):
group, group_id, rank = self._init_global_test()
rank_to_NPU = self._init_multinpu_helper()
self._test_barrier_helper(group, group_id, rank, rank_to_NPU)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class DistributedDataParallelTest(MultiProcessTestCase):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
self._fork_processes()
def tearDown(self):
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 2
def _prepare_single_device_module(self, process_group, devices, device_ids, global_batch_size):
model = Net()
torch.npu.set_device(devices[0])
ddp_model = DistributedDataParallel(
copy.deepcopy(model).to(devices[0]),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001)
model.to(devices[0])
input = torch.randn(global_batch_size, 2).to(devices[0])
target = torch.randn(global_batch_size, 4).to(devices[0])
return model, ddp_model, input, target
def _test_ddp_with_process_group(self, process_group, devices, device_ids, multi_device=False):
local_batch_size = len(devices)
global_batch_size = self.world_size * local_batch_size
model, ddp_model, input, target = \
self._prepare_single_device_module(
process_group, devices, device_ids, global_batch_size)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
def update_parameters(model):
for param in model.parameters():
with torch.no_grad():
param -= param.grad
param.grad = None
# Forward
step_model(model, input, target)
step_model(ddp_model,
input[self.rank * local_batch_size: (self.rank + 1) * local_batch_size],
target[self.rank * local_batch_size: (self.rank + 1) * local_batch_size])
# Update weights
update_parameters(model)
update_parameters(ddp_model)
# Check result
ddp_model = ddp_model.to("cpu")
model = model.to("cpu")
self.assertEqual(len(list(model.parameters())), len(list(ddp_model.parameters())))
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertEqual(i, j)
def _test_hccl_backend(self, devices, device_ids, multi_device=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupHCCL(store, self.rank, self.world_size)
self._test_ddp_with_process_group(process_group, devices, device_ids, multi_device)
def test_hccl_backend_whole_train_1step(self):
npu_rank = [[0],[1],[2],[3],[4],[5],[6],[7]]
int_devices = npu_rank[self.rank]
devices = list([torch.device('npu:' + str(i)) for i in int_devices])
self._test_hccl_backend(devices, int_devices)
class ReducerModule(nn.Module):
def __init__(self):
super(ReducerModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, use_fc3=True):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
if use_fc3:
x = self.fc3(x)
return F.softmax(x, dim=1)
class ReducerTest(TestCase):
def setUp(self):
self.store = c10d.FileStore("/dev/null", 1)
self.process_group = c10d.ProcessGroupHCCL(self.store, 0, 1)
def _create_single_precision_model(self):
npu_loc = 'npu:0'
torch.npu.set_device(npu_loc)
model = ReducerModule().to(npu_loc)
return model
def _create_mixed_precision_model(self):
npu_loc = 'npu:0'
model = ReducerModule()
model.float()
model.fc1.double()
return model
def _create_reducer_for_models(self, models):
parameters = [list(model.parameters()) for model in models]
group_by_type = groupby(
range(len(parameters[0])),
key=lambda i: parameters[0][i].type())
buckets = [list(indices) for _, indices in group_by_type]
return dist.Reducer(parameters, buckets, self.process_group)
def test_single_dtype_single_bucket(self):
model = self._create_single_precision_model()
parameters = list(model.parameters())
buckets = [list(range(len(parameters)))]
dist.Reducer([parameters], buckets, self.process_group)
def test_multi_dtype_single_bucket(self):
model = self._create_mixed_precision_model()
# Raise if there are multiple types per bucket.
# In this case we create one bucket for all parameters.
with self.assertRaises(RuntimeError):
parameters = [list(model.parameters())]
buckets = [list(range(len(parameters[0])))]
dist.Reducer(parameters, buckets, self.process_group)
def test_multi_dtype_multi_bucket(self):
model = self._create_mixed_precision_model()
parameters = [list(model.parameters())]
group_by_type = groupby(
range(len(parameters[0])),
key=lambda i: parameters[0][i].type())
buckets = [list(indices) for _, indices in group_by_type]
dist.Reducer(parameters, buckets, self.process_group)
def test_forward_backward_unused_parameters(self):
batch_size = 10
model = self._create_single_precision_model()
reducer = self._create_reducer_for_models([model])
input = torch.rand([batch_size, 2])
target = torch.rand([batch_size, 4])
npu_loc = "npu:0"
input = input.to(npu_loc)
target = target.to(npu_loc)
output = F.mse_loss(model(input, use_fc3=False), target)
# Check that the grad of fc3 is not set.
self.assertEqual(None, model.fc3.weight.grad)
# Compute and accumulate gradients.
reducer.prepare_for_backward(output)
output.backward()
# The reducer will have marked the grad of fc3 as ready, because
# it doesn't show up in the autograd graph of `output`. Since fc3.weight
# is considered being globally unused, it will be kept untouched as None.
self.assertEqual(None, model.fc3.weight.grad)
def test_forward_backward_optimizer(self):
batch_size = 10
model = self._create_single_precision_model()
reducer = self._create_reducer_for_models([model])
optimizer = torch.optim.Adam(model.parameters())
for i in range(3):
input = torch.rand([batch_size, 2])
target = torch.rand([batch_size, 4])
npu_loc = "npu:0"
input = input.to(npu_loc)
target = target.to(npu_loc)
# The `zero_grad` function calls `detach_` and `zero_` on the grad
# tensors of model parameters. If we tried to set the grad tensors
# to a view of the reducer's bucket tensors, this would blow up.
optimizer.zero_grad()
# Unused parameter only in the first iteration.
output = F.mse_loss(model(input, use_fc3=(i > 0)), target)
reducer.prepare_for_backward(output)
output.backward()
optimizer.step()
class ComputeBucketAssignmentTest(TestCase):
def test_single_limit_single_dtype(self):
tensors = [
torch.empty([100], dtype=torch.float),
torch.empty([200], dtype=torch.float),
torch.empty([100], dtype=torch.float),
torch.empty([50], dtype=torch.float),
]
result = dist._compute_bucket_assignment_by_size(tensors, [400])
self.assertEqual([[0], [1], [2], [3]], result)
def test_single_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result = dist._compute_bucket_assignment_by_size(tensors, [400])
self.assertEqual([[0, 2], [1, 3], [4], [5]], result)
def test_multi_limit_single_dtype(self):
tensors = [
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
]
result = dist._compute_bucket_assignment_by_size(tensors, [40, 80])
self.assertEqual([[0], [1, 2], [3]], result)
def test_multi_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result = dist._compute_bucket_assignment_by_size(tensors, [200, 400])
self.assertEqual([[0], [1], [2, 4], [3, 5]], result)
class CommTest(MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
self._fork_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 2
def _test_broadcast_coalesced(self, process_group, device):
target = torch.arange(60, dtype=torch.float32).chunk(5)
target += torch.arange(60, dtype=torch.float32).chunk(5)
# The tensors to pass to broadcast are idential to the target
# only on the process that is the root of the broadcast.
if self.rank == 0:
tensors = list(tensor.clone() for tensor in target)
torch.npu.set_device(device)
npu_tensors = []
for tensor in tensors:
npu_tensor = tensor.to(device)
npu_tensors.append(npu_tensor)
else:
tensors = list(torch.empty_like(tensor) for tensor in target)
torch.npu.set_device(device)
npu_tensors = []
for tensor in tensors:
npu_tensor = tensor.to(device)
npu_tensors.append(npu_tensor)
c10d._broadcast_coalesced(
process_group,
npu_tensors,
buffer_size=256)
tensors = []
for tensor in npu_tensors:
cpu_tensor = tensor.to("cpu")
tensors.append(cpu_tensor)
self.assertEqual(tensors, target)
def test_broadcast_coalesced_hccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupHCCL(store, self.rank, self.world_size)
device = torch.device('npu:%d' % self.rank)
self._test_broadcast_coalesced(process_group, device)
if BACKEND == "hccl":
WORLD_SIZE = os.environ["WORLD_SIZE"]
class TestDistBackend(TestCase, _DistTestBase):
MANAGER_PROCESS_RANK = -1
TEST_ERROR_EXIT_CODE = 10
@staticmethod
def manager_join(fn):
@wraps(fn)
def wrapper(self):
if self.rank == self.MANAGER_PROCESS_RANK:
#TODO
self._join_and_reduce(fn)
else:
fn(self)
return wrapper
@classmethod
def setUpClass(cls):
for attr in dir(cls):
if attr.startswith("test"):
fn = getattr(cls, attr)
if not getattr(fn, "__unittest_skip__", False):
setattr(cls, attr, cls.manager_join(fn))
def setUp(self):
super(TestDistBackend, self).setUp()
self.skip_return_code_checks = []
self.processes = []
self.rank = self.MANAGER_PROCESS_RANK
self.temporary_file = None
Barrier.init()
for rank in range(int(WORLD_SIZE)):
self.processes.append(self._spawn_process(rank))
def tearDown(self):
super(TestDistBackend, self).tearDown()
# Clean up temporary file if we used one.
if self.temporary_file:
try:
os.unlink(self.temporary_file.name)
except OSError as err:
# ENOENT is OK because the test is supposed to clean it up.
if err.errno != errno.ENOENT:
raise
for p in self.processes:
p.terminate()
def _spawn_process(self, rank):
os.environ["RANK"] = str(rank)
name = "process " + str(rank)
# test_distributed.py test suite does not work with spawn
# mode, so we enforce fork mode for now. In the long term, we should
# enable spawn mode and refactor this suite to inherit from
# common_distributed.MultiProcessTestCase.
if six.PY3:
# Note: explicitly specifying fork, as spawn is the default in
# py3.8+ on macos.
proc_handler = multiprocessing.get_context("fork").Process
else:
# fork is the default on Python 2
proc_handler = multiprocessing.Process
process = proc_handler(target=self._run, name=name, args=(rank,))
process.start()
return process
def _run(self, rank):
self.rank = rank
try:
dist.init_process_group(
init_method=INIT_METHOD,
backend=BACKEND,
world_size=int(WORLD_SIZE),
rank=self.rank
)
except RuntimeError as e:
if "recompile" in e.args[0]:
sys.exit(SKIP_IF_BACKEND_UNAVAILABLE)
# sys.exit(0)
raise
# Execute barrier prior to running test to ensure that every process
# has finished initialization and that the following test
# immediately exiting due to a skip doesn't cause flakiness.
self._barrier()
getattr(self, self.id().split(".")[2])()
self._barrier()
dist.destroy_process_group()
sys.exit(0)
def _join_and_reduce(self, fn):
skip_ok = (
getattr(fn, "skip_if_no_npu_distributed", False)
)
join_timeout = get_timeout()
for rank, process in enumerate(self.processes):
process.join(join_timeout)
self.assertFalse(
process.is_alive(),
"Timeout waiting for rank %d to terminate" % rank)
first_process = self.processes[0]
for p in self.processes:
self.assertEqual(p.exitcode, first_process.exitcode)
if first_process.exitcode == SKIP_IF_BACKEND_UNAVAILABLE:
raise unittest.SkipTest("Compiled without the " + BACKEND + " backend")
if skip_ok:
assert (
first_process.exitcode == 0 or
first_process.exitcode == SKIP_IF_NO_NPU_EXIT_CODE
)
if first_process.exitcode == SKIP_IF_NO_NPU_EXIT_CODE:
raise unittest.SkipTest(
"One unique gpu per process is not available"
)
self.assertEqual(first_process.exitcode, 0)
else:
print("backend is not hccl")
if __name__ == "__main__":
run_tests()
| 34.962606 | 99 | 0.5866 |
2dc58389fb563c1c108a32e8f11b2fd0b00e2563
| 8,683 |
py
|
Python
|
watchlist_app/api/views.py
|
attachemd/moviedb
|
c2e3b7bc3c1537c64834c2ee94492122dafdfcd4
|
[
"MIT"
] | null | null | null |
watchlist_app/api/views.py
|
attachemd/moviedb
|
c2e3b7bc3c1537c64834c2ee94492122dafdfcd4
|
[
"MIT"
] | null | null | null |
watchlist_app/api/views.py
|
attachemd/moviedb
|
c2e3b7bc3c1537c64834c2ee94492122dafdfcd4
|
[
"MIT"
] | null | null | null |
from django.shortcuts import get_object_or_404
from rest_framework import status, generics, mixins, viewsets
from rest_framework.decorators import api_view
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from rest_framework.views import APIView
from watchlist_app.api.serializers import (
WatchListSerializer,
StreamPlatformSerializer,
ReviewSerializer
)
from core.models import WatchListModel, StreamPlatformModel, ReviewModel
class ReviewCreateView(
generics.CreateAPIView
):
serializer_class = ReviewSerializer
def get_queryset(self):
return ReviewModel.objects.all()
def perform_create(self, serializer):
pk = self.kwargs['pk']
watchlist = WatchListModel.objects.get(pk=pk)
user = self.request.user
review_queryset = ReviewModel.objects.filter(
watchlist=watchlist,
user=user
)
if review_queryset.exists():
raise ValidationError(
'you are already reviewed this movie.'
)
if watchlist.num_rating == 0:
watchlist.avg_rating = serializer.validated_data['rating']
else:
watchlist.avg_rating = (watchlist.avg_rating + serializer.validated_data['rating']) / 2
watchlist.num_rating = watchlist.num_rating + 1
watchlist.save()
serializer.save(watchlist=watchlist, user=user)
class ReviewView(
generics.ListAPIView
):
# queryset = ReviewModel.objects.all()
serializer_class = ReviewSerializer
def get_queryset(self):
pk = self.kwargs['pk']
return ReviewModel.objects.filter(watchlist=pk)
class ReviewDetailView(
generics.RetrieveUpdateDestroyAPIView
):
queryset = ReviewModel.objects.all()
serializer_class = ReviewSerializer
# class ReviewDetailView(
# mixins.RetrieveModelMixin,
# generics.GenericAPIView
# ):
# queryset = ReviewModel.objects.all()
# serializer_class = ReviewSerializer
#
# def get(self, request, *args, **kwargs):
# return self.retrieve(request, *args, **kwargs)
# class ReviewView(
# mixins.ListModelMixin,
# mixins.CreateModelMixin,
# generics.GenericAPIView
# ):
# queryset = ReviewModel.objects.all()
# serializer_class = ReviewSerializer
#
# def get(self, request, *args, **kwargs):
# return self.list(request, *args, **kwargs)
#
# def post(self, request, *args, **kwargs):
# return self.create(request, *args, **kwargs)
class StreamPlatformView(viewsets.ModelViewSet):
queryset = StreamPlatformModel.objects.all()
serializer_class = StreamPlatformSerializer
# class StreamPlatformView(viewsets.ViewSet):
#
# def list(self, request):
# queryset = StreamPlatformModel.objects.all()
# serializer = StreamPlatformSerializer(queryset, many=True)
# return Response(serializer.data)
#
# def retrieve(self, request, pk=None):
# queryset = StreamPlatformModel.objects.all()
# watchlist = get_object_or_404(queryset, pk=pk)
# serializer = StreamPlatformSerializer(watchlist)
# return Response(serializer.data)
#
# def create(self, request):
# serializer = StreamPlatformSerializer(data=request.data)
# if not serializer.is_valid():
# return Response(serializer.errors)
# serializer.save()
# return Response(serializer.data)
# class StreamPlatformView(APIView):
# def get(self, request):
# stream_platform = StreamPlatformModel.objects.all()
# serializer = StreamPlatformSerializer(
# stream_platform,
# many=True
# )
# return Response(serializer.data)
#
# def post(self, request):
# serializer = StreamPlatformSerializer(data=request.data)
# if not serializer.is_valid():
# return Response(
# serializer.errors,
# status=status.HTTP_400_BAD_REQUEST
# )
# serializer.save()
# return Response(serializer.data)
#
#
# class StreamPlatformDetailView(APIView):
# def get(self, request, pk):
# try:
# stream_platform = StreamPlatformModel.objects.get(pk=pk)
# serializer = StreamPlatformSerializer(stream_platform)
# return Response(serializer.data)
# except StreamPlatformModel.DoesNotExist:
# return Response(
# {'error': 'Movie not found'},
# status=status.HTTP_404_NOT_FOUND
# )
#
# def put(self, request, pk):
# stream_platform = StreamPlatformModel.objects.get(pk=pk)
# serializer = StreamPlatformSerializer(
# stream_platform,
# data=request.data
# )
# if not serializer.is_valid():
# return Response(
# serializer.errors,
# status=status.HTTP_400_BAD_REQUEST
# )
#
# serializer.save()
# return Response(serializer.data)
#
# def delete(self, request, pk):
# stream_platform = StreamPlatformModel.objects.get(pk=pk)
# stream_platform.delete()
# return Response(
# {'message': 'File deleted'},
# status=status.HTTP_204_NO_CONTENT
# )
class WatchListView(APIView):
def get(self, request):
movies = WatchListModel.objects.all()
serializer = WatchListSerializer(
movies,
many=True
)
return Response(serializer.data)
def post(self, request):
serializer = WatchListSerializer(data=request.data)
if not serializer.is_valid():
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
serializer.save()
return Response(serializer.data)
class WatchDetail(APIView):
def get(self, request, pk):
try:
movie = WatchListModel.objects.get(pk=pk)
serializer = WatchListSerializer(movie)
return Response(serializer.data)
except WatchListModel.DoesNotExist:
return Response(
{'error': 'Movie not found'},
status=status.HTTP_404_NOT_FOUND
)
def put(self, request, pk):
movie = WatchListModel.objects.get(pk=pk)
serializer = WatchListSerializer(
movie,
data=request.data
)
if not serializer.is_valid():
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
serializer.save()
return Response(serializer.data)
def delete(self, request, pk):
movie = WatchListModel.objects.get(pk=pk)
movie.delete()
return Response(
{'message': 'File deleted'},
status=status.HTTP_204_NO_CONTENT
)
# @api_view(['GET', 'POST'])
# def movie_list(request):
# if request.method == 'GET':
# movies = Movie.objects.all()
# serializer = MovieSerializer(
# movies,
# many=True
# )
# return Response(serializer.data)
#
# if request.method == 'POST':
# serializer = MovieSerializer(data=request.data)
# if not serializer.is_valid():
# return Response(
# serializer.errors,
# status=status.HTTP_400_BAD_REQUEST
# )
# serializer.save()
# return Response(serializer.data)
#
#
# @api_view(['GET', 'PUT', 'DELETE'])
# def movie_details(request, pk):
# if request.method == 'GET':
# try:
# movie = Movie.objects.get(pk=pk)
# serializer = MovieSerializer(movie)
# return Response(serializer.data)
# except Movie.DoesNotExist:
# return Response(
# {'error': 'Movie not found'},
# status=status.HTTP_404_NOT_FOUND
# )
#
# if request.method == 'PUT':
# movie = Movie.objects.get(pk=pk)
# serializer = MovieSerializer(
# movie,
# data=request.data
# )
# if not serializer.is_valid():
# return Response(
# serializer.errors,
# status=status.HTTP_400_BAD_REQUEST
# )
#
# serializer.save()
# return Response(serializer.data)
#
# if request.method == 'DELETE':
# movie = Movie.objects.get(pk=pk)
# movie.delete()
# return Response(
# {'message': 'File deleted'},
# status=status.HTTP_204_NO_CONTENT
# )
| 30.254355 | 99 | 0.605897 |
932edf00d3009f4f8c524a59f380c5435549a3e4
| 375 |
py
|
Python
|
NeuralNet/Alex/main.py
|
alex-ta/Fontinator
|
7ca9effe3b61ded032176557520127e1d4b7a5ef
|
[
"Apache-2.0"
] | 6 |
2017-04-12T14:05:19.000Z
|
2021-01-29T11:23:50.000Z
|
NeuralNet/Alex/main.py
|
alex-ta/Fontinator
|
7ca9effe3b61ded032176557520127e1d4b7a5ef
|
[
"Apache-2.0"
] | null | null | null |
NeuralNet/Alex/main.py
|
alex-ta/Fontinator
|
7ca9effe3b61ded032176557520127e1d4b7a5ef
|
[
"Apache-2.0"
] | null | null | null |
# include data pipe
from modelpipe import pipe
import numpy as np
#create pipe
datapipe = pipe.Pipe(data_path = "../../images/Dataset_1", train_size=0.6)
#load data
datapipe.load_data(flatten=0, print_out=1)
#train model
for i in range(10):
np.random.seed(10)
result = datapipe.run(model_name = "models/model_0"+str(i), epochsize = 300, batch_size = 100)
print(result)
| 25 | 95 | 0.733333 |
e2124adc78315561fa6c4c434dfb3c2da235befc
| 12,429 |
py
|
Python
|
scripts/experimental_matching.py
|
tliu68/maggot_connectome
|
ef4bbd2011fa9e03da187fcca8c8c1ca79209a36
|
[
"MIT"
] | null | null | null |
scripts/experimental_matching.py
|
tliu68/maggot_connectome
|
ef4bbd2011fa9e03da187fcca8c8c1ca79209a36
|
[
"MIT"
] | null | null | null |
scripts/experimental_matching.py
|
tliu68/maggot_connectome
|
ef4bbd2011fa9e03da187fcca8c8c1ca79209a36
|
[
"MIT"
] | null | null | null |
#%% [markdown]
# # Hemisphere matching
#%% [markdown]
# ## Preliminaries
#%%
from pkg.utils import set_warnings
set_warnings()
import datetime
import pprint
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from graspologic.plot import pairplot
from graspologic.utils import (
augment_diagonal,
binarize,
multigraph_lcc_intersection,
pass_to_ranks,
)
from pkg.data import load_maggot_graph
from pkg.io import savefig
from pkg.plot import set_theme
from pkg.utils import get_paired_inds, get_paired_subgraphs
from src.visualization import adjplot # TODO fix graspologic version and replace here
t0 = time.time()
def stashfig(name, **kwargs):
foldername = "hemisphere_matching"
savefig(name, foldername=foldername, **kwargs)
colors = sns.color_palette("Set1")
palette = dict(zip(["Left", "Right"], colors))
set_theme()
#%%
import operator
import numpy as np
from scipy._lib._util import check_random_state
from scipy.optimize import OptimizeResult, linear_sum_assignment
def _check_init_input(P0, n):
row_sum = np.sum(P0, axis=0)
col_sum = np.sum(P0, axis=1)
tol = 1e-3
msg = None
if P0.shape != (n, n):
msg = "`P0` matrix must have shape m' x m', where m'=n-m"
elif (
(~np.isclose(row_sum, 1, atol=tol)).any()
or (~np.isclose(col_sum, 1, atol=tol)).any()
or (P0 < 0).any()
):
msg = "`P0` matrix must be doubly stochastic"
if msg is not None:
raise ValueError(msg)
def _split_matrix(X, n):
# definitions according to Seeded Graph Matching [2].
upper, lower = X[:n], X[n:]
return upper[:, :n], upper[:, n:], lower[:, :n], lower[:, n:]
def _doubly_stochastic(P, tol=1e-3, max_iter=1000):
# Adapted from @btaba implementation
# https://github.com/btaba/sinkhorn_knopp
# of Sinkhorn-Knopp algorithm
# https://projecteuclid.org/euclid.pjm/1102992505
c = 1 / P.sum(axis=0)
r = 1 / (P @ c)
P_eps = P
for it in range(max_iter):
if it % 100 == 0: # only check every so often to speed up
if (np.abs(P_eps.sum(axis=1) - 1) < tol).all() and (
np.abs(P_eps.sum(axis=0) - 1) < tol
).all():
# All column/row sums ~= 1 within threshold
break
c = 1 / (r @ P)
r = 1 / (P @ c)
P_eps = r[:, None] * P * c
return P_eps
def quadratic_assignment_ot(A, B, method="faq", options=None):
if options is None:
options = {}
method = method.lower()
methods = {"faq": _quadratic_assignment_faq_ot}
if method not in methods:
raise ValueError(f"method {method} must be in {methods}.")
res = methods[method](A, B, **options)
return res
def _calc_score(A, B, perm):
# equivalent to objective function but avoids matmul
return np.sum(A * B[perm][:, perm])
def _common_input_validation(A, B, partial_match):
A = np.atleast_2d(A)
B = np.atleast_2d(B)
if partial_match is None:
partial_match = np.array([[], []]).T
partial_match = np.atleast_2d(partial_match).astype(int)
msg = None
if A.shape[0] != A.shape[1]:
msg = "`A` must be square"
elif B.shape[0] != B.shape[1]:
msg = "`B` must be square"
elif A.shape != B.shape:
msg = "`A` and `B` matrices must be of equal size"
elif partial_match.shape[0] > A.shape[0]:
msg = "`partial_match` can have only as many seeds as there are nodes"
elif partial_match.shape[1] != 2:
msg = "`partial_match` must have two columns"
elif partial_match.ndim != 2:
msg = "`partial_match` must have exactly two dimensions"
elif (partial_match < 0).any():
msg = "`partial_match` must contain only positive indices"
elif (partial_match >= len(A)).any():
msg = "`partial_match` entries must be less than number of nodes"
elif not len(set(partial_match[:, 0])) == len(partial_match[:, 0]) or not len(
set(partial_match[:, 1])
) == len(partial_match[:, 1]):
msg = "`partial_match` column entries must be unique"
if msg is not None:
raise ValueError(msg)
return A, B, partial_match
import functools
def _layered_product(*args):
# l is the index of the layer, which remains fixed
# j is the index of summation in the matrix product
# i is the row index of A
# k is the col index of b
return functools.reduce(
lambda A, B: np.einsum("ijl,jkl->ikl", A, B, optimize=True), args
)
def _single_layered_product(A, B):
n_layers = max(A.shape[-1], B.shape[-1])
output = np.empty((A.shape[0], B.shape[1], n_layers))
if A.ndim == 2:
for layer in range(n_layers):
output[:, :, layer] = A @ B[:, :, layer]
elif B.ndim == 2:
for layer in range(n_layers):
output[:, :, layer] = A[:, :, layer] @ B
else:
for layer in range(n_layers):
output[:, :, layer] = A[:, :, layer] @ B[:, :, layer]
return output
def _layered_product(*args):
return functools.reduce(_single_layered_product, args)
def _transpose(A):
return np.transpose(A, axes=(1, 0, 2))
def _quadratic_assignment_faq_ot(
A,
B,
maximize=False,
partial_match=None,
rng=None,
P0="barycenter",
shuffle_input=False,
maxiter=30,
tol=0.03,
reg=100,
thr=5e-2,
ot=False,
):
maxiter = operator.index(maxiter)
rng = check_random_state(rng)
A, B, partial_match = _common_input_validation(A, B, partial_match)
n = A.shape[0] # number of vertices in graphs
n_seeds = partial_match.shape[0] # number of seeds
n_unseed = n - n_seeds
n_layers = A.shape[-1]
obj_func_scalar = 1
if maximize:
obj_func_scalar = -1
nonseed_B = np.setdiff1d(range(n), partial_match[:, 1])
if shuffle_input:
nonseed_B = rng.permutation(nonseed_B)
# shuffle_input to avoid results from inputs that were already matched
nonseed_A = np.setdiff1d(range(n), partial_match[:, 0])
perm_A = np.concatenate([partial_match[:, 0], nonseed_A])
perm_B = np.concatenate([partial_match[:, 1], nonseed_B])
# definitions according to Seeded Graph Matching [2].
A11, A12, A21, A22 = _split_matrix(A[perm_A][:, perm_A], n_seeds)
B11, B12, B21, B22 = _split_matrix(B[perm_B][:, perm_B], n_seeds)
# TODO also split contralaterals
# [1] Algorithm 1 Line 1 - choose initialization
if isinstance(P0, str):
# initialize J, a doubly stochastic barycenter
J = np.ones((n_unseed, n_unseed)) / n_unseed
if P0 == "barycenter":
P = J
elif P0 == "randomized":
# generate a nxn matrix where each entry is a random number [0, 1]
# would use rand, but Generators don't have it
# would use random, but old mtrand.RandomStates don't have it
K = rng.uniform(size=(n_unseed, n_unseed))
# Sinkhorn balancing
K = _doubly_stochastic(K)
P = J * 0.5 + K * 0.5
else:
P0 = np.atleast_2d(P0)
_check_init_input(P0, n_unseed)
P = P0
currtime = time.time()
const_sum = _layered_product(A21, _transpose(B21)) + _layered_product(
_transpose(A12), B12
)
print(f"{time.time() - currtime:.3f} seconds elapsed for const_sum.")
# [1] Algorithm 1 Line 2 - loop while stopping criteria not met
for n_iter in range(1, maxiter + 1):
# [1] Algorithm 1 Line 3 - compute the gradient of f(P) = -tr(APB^tP^t)
# TODO einsum
currtime = time.time()
# P = np.repeat(P, n_layers, axis=2)
# grad_fp = (
# const_sum
# + _layered_product(A22, P, _transpose(B22))
# + _layered_product(_transpose(A22), P, B22)
# )
grad_fp = const_sum
grad_fp += _layered_product(_layered_product(A22, P), _transpose(B22))
grad_fp += _layered_product(_layered_product(_transpose(A22), P), B22)
grad_fp = grad_fp.sum(axis=-1)
print(f"{time.time() - currtime:.3f} seconds elapsed for grad_fp.")
# [1] Algorithm 1 Line 4 - get direction Q by solving Eq. 8
currtime = time.time()
if ot:
Q = alap(grad_fp, n_unseed, maximize, reg, thr)
else:
_, cols = linear_sum_assignment(grad_fp, maximize=maximize)
Q = np.eye(n_unseed)[cols]
print(f"{time.time() - currtime:.3f} seconds elapsed.")
# Q = np.eye(n_unseed)[cols]
# [1] Algorithm 1 Line 5 - compute the step size
# Noting that e.g. trace(Ax) = trace(A)*x, expand and re-collect
# terms as ax**2 + bx + c. c does not affect location of minimum
# and can be ignored. Also, note that trace(A@B) = (A.T*B).sum();
# apply where possible for efficiency.
# TODO all einsums?
currtime = time.time()
R = P - Q
b21 = (_layered_product(R.T[..., None], A21) * B21).sum()
b12 = (
_layered_product(R.T[..., None], _transpose(A12)) * _transpose(B12)
).sum()
AR22 = _layered_product(_transpose(A22), R[..., None])
BR22 = _layered_product(B22, R.T[..., None])
b22a = (AR22 * (_layered_product(Q[..., None], _transpose(B22)))).sum()
b22b = (A22 * _layered_product(Q[..., None], BR22)).sum()
a = (_transpose(AR22) * BR22).sum()
b = b21 + b12 + b22a + b22b
print(f"{time.time() - currtime:.3f} seconds elapsed for quadradic terms.")
# critical point of ax^2 + bx + c is at x = -d/(2*e)
# if a * obj_func_scalar > 0, it is a minimum
# if minimum is not in [0, 1], only endpoints need to be considered
if a * obj_func_scalar > 0 and 0 <= -b / (2 * a) <= 1:
alpha = -b / (2 * a)
else:
alpha = np.argmin([0, (b + a) * obj_func_scalar])
# [1] Algorithm 1 Line 6 - Update P
P_i1 = alpha * P + (1 - alpha) * Q
if np.linalg.norm(P - P_i1) / np.sqrt(n_unseed) < tol:
P = P_i1
break
P = P_i1
# [1] Algorithm 1 Line 7 - end main loop
# [1] Algorithm 1 Line 8 - project onto the set of permutation matrices
# print(P)
_, col = linear_sum_assignment(-P)
perm = np.concatenate((np.arange(n_seeds), col + n_seeds))
unshuffled_perm = np.zeros(n, dtype=int)
unshuffled_perm[perm_A] = perm_B[perm]
score = _calc_score(A, B, unshuffled_perm)
res = {"col_ind": unshuffled_perm, "fun": score, "nit": n_iter}
return OptimizeResult(res)
# from numba import jit
# @jit(nopython=True)
def alap(P, n, maximize, reg, tol):
power = 1 if maximize else -1
lamb = reg / np.max(np.abs(P))
P = np.exp(lamb * power * P)
# ones = np.ones(n)
# P_eps = sinkhorn(ones, ones, P, power/lamb, stopInnerThr=5e-02) # * (P > np.log(1/n)/lamb)
P_eps = _doubly_stochastic(P, tol)
return P_eps
from graspologic.simulations import er_corr
options = dict(maximize=True, shuffle_input=True, ot=False, maxiter=1)
res = quadratic_assignment_ot(left_adjs, right_adjs, method="faq", options=options)
#%%
n = 20
p = 0.4
rs = [0.6, 0.7, 0.8, 0.9, 1.0]
n_sims = 1
options = dict(maximize=True, shuffle_input=True)
rows = []
arange = np.arange(n)
def compute_match_ratio(perm_inds):
return (perm_inds == arange).mean()
for r in rs:
for i in range(n_sims):
A1, B1 = er_corr(n, p, r)
A2, B2 = er_corr(n, p, r)
A = np.stack((A1, A2), axis=2)
B = np.stack((B1, B2), axis=2)
layer_res = quadratic_assignment_ot(A, B, options=options)
layer_res["method"] = "multilayer"
layer_res["rho"] = r
layer_res["match_ratio"] = compute_match_ratio(layer_res["col_ind"])
rows.append(layer_res)
# layer_perm_inds = layer_res["col_ind"]
A_sum = A.sum(axis=-1).reshape((n, n, 1))
B_sum = B.sum(axis=-1).reshape((n, n, 1))
flat_res = quadratic_assignment_ot(A_sum, B_sum, options=options)
flat_res["method"] = "flat"
flat_res["rho"] = r
flat_res["match_ratio"] = compute_match_ratio(flat_res["col_ind"])
rows.append(flat_res)
results = pd.DataFrame(rows)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
sns.lineplot(data=results, x="rho", y="match_ratio", hue="method")
stashfig("multilayer-er")
| 30.463235 | 100 | 0.605439 |
35a265077c70045abbf438cc7307e1011e7d9745
| 4,073 |
py
|
Python
|
MT-LSTM/train.py
|
baowj-678/TC
|
4c9bf6bf2202c9930616259d3f3e1a2b0529a6e6
|
[
"MIT"
] | null | null | null |
MT-LSTM/train.py
|
baowj-678/TC
|
4c9bf6bf2202c9930616259d3f3e1a2b0529a6e6
|
[
"MIT"
] | null | null | null |
MT-LSTM/train.py
|
baowj-678/TC
|
4c9bf6bf2202c9930616259d3f3e1a2b0529a6e6
|
[
"MIT"
] | null | null | null |
""" 训练文件
@Author: Bao Wenjie
@Email: [email protected]
@Date: 2020/10/31
"""
import pandas as pd
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
import numpy as np
import torch
import sys
import os
abs_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(abs_path)
from dataset.vocab import Vocab
from dataset.dataset import DataSet
from dataset.dataloader import DataLoader
from model import Model
from seed_all import seed_all
if __name__ == '__main__':
# 设置随机种子
seed_all(42)
data_dir = '/home/baowenjie/TC/dataset/SST-1'
# data_dir = 'D:/NLP/TC/dataset/SST-1'
train_data_path = os.path.join(data_dir, 'train.tsv')
dev_data_path = os.path.join(data_dir, 'dev.tsv')
test_data_path = os.path.join(data_dir, 'test.tsv')
vocab_path = os.path.join(data_dir, 'vocab.txt')
save_path = 'output/MT_LSTM.pkl'
BATCH_SIZE = 32
max_length = 56
embedding_size = 128
hidden_size = 64
lr = 0.003
output_per_batchs = 10
test_per_batchs = 60
test_batchs = 10
groups = 3
p=0.1
# 加载字典
vocab = Vocab(vocab_path)
# 创建数据集
train_data_set = DataSet(train_data_path, vocab, max_length)
test_data_set = DataSet(test_data_path, vocab, max_length)
# 创建加载器
train_data_loader = DataLoader(train_data_set, shuffle=True, batch_size=BATCH_SIZE)
test_data_loader = DataLoader(test_data_set, shuffle=True, batch_size=BATCH_SIZE)
# 是否用GPU
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
# 模型初始化
model = Model(vocab=vocab,
hidden_size=hidden_size,
device=device,
ave_length=19,
embedding_size=embedding_size,
num_class=5,
g=groups,
p=0.1)
# 优化器
optimizer = torch.optim.Adam(model.parameters(),
lr=lr,
weight_decay=1e-5)
# 开始训练
for i in range(100):
print('='*8 + '开始训练' + '='*8)
model.train()
loss_sum = 0
for epoch, data in enumerate(train_data_loader):
X, Y = data
optimizer.zero_grad()
loss = model(X, Y)
loss.backward()
optimizer.step()
loss_sum += loss.detach()
# 打印训练情况
if((epoch + 1) % output_per_batchs == 0):
print('itor: {}: epoch: {}/{} loss: {}'.format(i + 1, epoch + 1, len(train_data_set), loss_sum / output_per_batchs))
loss_sum = 0
############################### 测试 ######################################
if (epoch + 1) % test_per_batchs == 0:
print('-'*8 + '开始测试' + '-'*8)
with torch.no_grad():
accuracy = 0
model.eval()
for epoch, data in enumerate(test_data_loader):
X, Y = data
Y = Y.to(device=device).squeeze(dim=1)
y = model(X).detach()
accuracy += torch.sum(y == Y).cpu()
if (epoch + 1) % test_batchs == 0:
break
print('正确个数:{}, 总数:{}, 测试结果accu: {}'.format(accuracy, len(test_data_set), float(accuracy) / len(test_data_set)))
torch.save(model.state_dict(), save_path)
model.train()
######################################## 最终测试 #############################
print('-'*8 + '开始测试' + '-'*8)
with torch.no_grad():
accuracy = 0
model.eval()
for epoch, data in enumerate(test_data_loader):
X, Y = data
Y = Y.to(device=device).squeeze(dim=1)
y = model(X).detach()
accuracy += torch.sum(y == Y).cpu()
print('正确个数:{}, 总数:{}, 测试结果accu: {}'.format(accuracy, len(test_data_set), float(accuracy) / len(test_data_set)))
torch.save(model.state_dict(), save_path)
| 35.72807 | 133 | 0.534741 |
ea4cd17d154bc3605bd6ca146ad0c7082637a748
| 2,187 |
py
|
Python
|
aoc2020/cli.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/cli.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/cli.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
from argparse import ArgumentParser
from time import perf_counter
from importlib import import_module
from os.path import dirname, basename, join as join_path
from .errors import NoSolutionError
from .solution_abc import SolutionABC
def get_opts():
ap = ArgumentParser()
ap.add_argument("--test", action="store_true", default=False, help="Runs using test input.")
ap.add_argument("--check", action="store_true", default=False, help="Runs using test input and checks result with "
"expected.")
ap.add_argument("--args", nargs='*', help="Pass additional arguments to the solution.")
ap.add_argument("DAY", choices=[str(i) for i in range(1, 26)], help="The Day")
ap.add_argument("PART", choices=['1', '2'], help="The Part")
return ap.parse_args()
def build_solution(day, part, testing=False, *args, **kwargs) -> SolutionABC:
module = import_module(f".day_{day.zfill(2)}.part_{part}", "aoc2020")
module_path = dirname(module.__file__)
solution = getattr(module, "Solution")
return solution(join_path(module_path, "resources"), testing, *args, **kwargs)
def main():
opts = get_opts()
args = [_ for _ in opts.args if "=" not in _] if opts.args else []
kwargs = {_.split('=')[0]: _.split('=')[1] for _ in opts.args if "=" in _} if opts.args else {}
solution = build_solution(opts.DAY, opts.PART, opts.test or opts.check, *args, **kwargs)
def show(result, detail=None):
print(f"AoC[2020.{opts.DAY}.{opts.PART}] -> {result}")
if detail:
print(f"[+], {detail}")
def run_check():
try:
success = solution.check()
show("PASS" if success else "FAILED")
if not success:
exit(1)
except Exception as ex:
show("FAILED", str(ex))
exit(2)
def run_solve():
try:
show(solution.solve())
except NoSolutionError:
show("Unable to find a solution.")
exit(1)
perf_counter()
if opts.check:
run_check()
else:
run_solve()
print(f"[+] Perf: {perf_counter()}")
| 35.274194 | 119 | 0.600366 |
17fdcf6678bed3d9c6f85756d4e7f97ad803236f
| 1,799 |
py
|
Python
|
venv/Lib/site-packages/pynance/data/lab.py
|
LeonardoHMS/imobi
|
6b2b97a05df67ea7d493f7b601382f65c6629cc2
|
[
"MIT"
] | 35 |
2015-03-12T04:16:14.000Z
|
2020-12-17T18:10:15.000Z
|
venv/Lib/site-packages/pynance/data/lab.py
|
LeonardoHMS/imobi
|
6b2b97a05df67ea7d493f7b601382f65c6629cc2
|
[
"MIT"
] | 31 |
2015-03-16T21:31:04.000Z
|
2021-01-26T00:12:34.000Z
|
venv/Lib/site-packages/pynance/data/lab.py
|
LeonardoHMS/imobi
|
6b2b97a05df67ea7d493f7b601382f65c6629cc2
|
[
"MIT"
] | 18 |
2015-09-30T10:40:26.000Z
|
2021-01-25T21:20:44.000Z
|
"""
.. Copyright (c) 2014- Marshall Farrier
license http://opensource.org/licenses/MIT
Data - building labels (:mod:`pynance.data.lab`)
====================================================
.. currentmodule:: pynance.data.lab
These functions are intended to be used in conjunction
with :func:`functools.partial` to pass to
:func:`pynance.data.combine.labeledfeatures`.
For example::
>>> from functools import partial
>>> features, labels = pn.data.labeledfeatures(eqdata, 256,
... partial(pn.data.lab.growth, 32))
"""
import pandas as pd
def growth(interval, pricecol, eqdata):
"""
Retrieve growth labels.
Parameters
--------------
interval : int
Number of sessions over which growth is measured. For example, if
the value of 32 is passed for `interval`, the data returned will
show the growth 32 sessions ahead for each data point.
eqdata : DataFrame
Data for evaluating growth.
pricecol : str
Column of `eqdata` to be used for prices (Normally 'Adj Close').
Returns
--------
labels : DataFrame
Growth labels for the specified period
skipatend : int
Number of rows skipped at the end of `eqdata` for the given labels.
Used to synchronize labels and features.
Examples
---------------
>>> from functools import partial
>>> features, labels = pn.data.labeledfeatures(eqdata, 256,
... partial(pn.data.lab.growth, 32, 'Adj Close'))
"""
size = len(eqdata.index)
labeldata = eqdata.loc[:, pricecol].values[interval:] /\
eqdata.loc[:, pricecol].values[:(size - interval)]
df = pd.DataFrame(data=labeldata, index=eqdata.index[:(size - interval)],
columns=['Growth'], dtype='float64')
return df
| 31.561404 | 77 | 0.622012 |
a4aca6cdfc51635e5bb0a033367ae85e9487e710
| 4,904 |
py
|
Python
|
test/test_npu/test_floor_divide.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_floor_divide.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_floor_divide.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
import random
import math
class TestFloorDivide(TestCase):
# pylint: disable=unused-variable,unused-argument
def cpu_op_exec(self, input1, input2):
output = torch.floor_divide(input1,input2)
output = output.numpy()
return output
def cpu_op_exec_fp16(self, input1, input2):
input1 = input1.to(torch.float32)
input2 = input2.to(torch.float32)
output = torch.floor_divide(input1, input2)
output = output.numpy()
output = output.astype(np.float16)
return output
def npu_op_exec(self, input1, input2):
input1 = input1.to("npu")
input2 = input2.to("npu")
output = torch.floor_divide(input1,input2)
output = output.to("cpu")
output = output.numpy()
return output
def test_floor_divide_common_shape_format(self, device):
shape_format = [
[[np.float32, -1, (4, 3, 3)]],
[[np.float32, -1, (4, 5, 5)]],
[[np.float32, -1, (3, 3, 3)]],
[[np.float32, -1, (4, 4, 4)]],
[[np.float32, -1, (2, 0, 2)]]
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 10, 100)
cpu_input2, npu_input2 = create_common_tensor(item[0], 10, 100)
cpu_output = self.cpu_op_exec(cpu_input1, cpu_input2)
npu_output = self.npu_op_exec(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output)
def test_floor_divide_float16_shape_format(self, device):
shape_format = [
[[np.float16, -1, (4, 2, 6, 6)]],
[[np.float16, -1, (4, 2, 8, 8)]],
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 100)
cpu_input2, npu_input2 = create_common_tensor(item[0], 1, 100)
cpu_output = self.cpu_op_exec_fp16(cpu_input1, cpu_input2)
npu_output = self.npu_op_exec(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output)
def test_floor_divide_int32_shape_format(self, device):
shape_format = [
[[np.int32, -1, (4, 3)]],
[[np.int32, -1, (4, 5)]],
[[np.int32, -1, (3, 3)]],
[[np.int32, -1, (4, 4)]]
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 100, 1000)
cpu_input2, npu_input2 = create_common_tensor(item[0], 100, 1000)
cpu_output = self.cpu_op_exec(cpu_input1, cpu_input2)
npu_output = self.npu_op_exec(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output)
def test_floor_divide_int8_shape_format(self, device):
shape_format = [
[[np.int8, -1, (4, 8, 3)]],
[[np.int8, -1, (4, 7, 5)]],
[[np.int8, -1, (3, 6, 3)]],
[[np.int8, -1, (4, 5, 4)]]
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 10, 100)
cpu_input2, npu_input2 = create_common_tensor(item[0], 10, 100)
cpu_output = self.cpu_op_exec(cpu_input1, cpu_input2)
npu_output = self.npu_op_exec(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output)
def test_floor_divide_uint8_shape_format(self, device):
shape_format = [
[[np.uint8, -1, (4, 3, 3)]],
[[np.uint8, -1, (4, 5, 5)]],
[[np.uint8, -1, (3, 3, 3)]],
[[np.uint8, -1, (4, 4, 4)]]
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 10, 100)
cpu_input2, npu_input2 = create_common_tensor(item[0], 10, 100)
cpu_output = self.cpu_op_exec(cpu_input1, cpu_input2)
npu_output = self.npu_op_exec(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestFloorDivide, globals(), except_for='cpu')
if __name__ == '__main__':
run_tests()
| 40.528926 | 77 | 0.617659 |
5e0a20530c94cb511f529986c16deb5e62506b21
| 499 |
py
|
Python
|
Points.py
|
BogyMitutoyoCTL/Riesen-Tetris-3
|
349d7b6f12c2ddf0b66683c904406a5e8596228d
|
[
"MIT"
] | 1 |
2019-10-22T14:02:34.000Z
|
2019-10-22T14:02:34.000Z
|
Points.py
|
BogyMitutoyoCTL/Riesen-Tetris-3
|
349d7b6f12c2ddf0b66683c904406a5e8596228d
|
[
"MIT"
] | null | null | null |
Points.py
|
BogyMitutoyoCTL/Riesen-Tetris-3
|
349d7b6f12c2ddf0b66683c904406a5e8596228d
|
[
"MIT"
] | null | null | null |
class Points:
def __init__(self):
self.points = 0
def lines(self, i):
if i == 1:
self.points = self.points + 40
if i == 2:
self.points = self.points + 100
if i == 3:
self.points = self.points + 300
if i == 4:
self.points = self.points + 1200
def traversed_lines(self, y):
if y > 0:
self.points = self.points + y
def new_block(self):
self.points = self.points + 2
| 22.681818 | 44 | 0.486974 |
52499dc53897fb3902fd3b2dc5e13c8e4c1f0086
| 16,905 |
py
|
Python
|
wz/ui_modules/tab_grade_editor_term.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
wz/ui_modules/tab_grade_editor_term.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
wz/ui_modules/tab_grade_editor_term.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
ui/tab_grade_editor_term.py
Last updated: 2021-03-19
Editor for grades – manage grades for school terms.
=+LICENCE=============================
Copyright 2021 Michael Towers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=-LICENCE========================================
"""
#TODO ...
### Messages
_MADE_REPORTS = "Notenzeugnisse erstellt"
_NO_REPORTS = "Keine Notenzeugnisse erstellt"
_NOT_INTERRUPTABLE = "+++ Der Prozess kann nicht unterbrochen werden +++"
_TITLE_TABLE_REPLACE = "Neue Tabelle speichern"
# Would need to be a bit different for individual pupils:
_TABLE_REPLACE = "Die neue Tabelle wird die alte ersetzen.\n" \
"Soll sie jetzt gespeichert werden?"
_NO_GRADE_FILES = "Keine Tabellen zur Aktualisierung"
_BAD_GRADE_FILE = "Ungültige Tabellendatei:\n {fpath}"
_UPDATED_GRADES = "Notentabelle aktualisiert: {n} Quelldatei(en)"
_GRADE_TABLE_MISMATCH = "{error}:\n Jahr: {year}, Gruppe: {group}," \
" Anlass: {term}"
### Labels, etc.
_EDIT_GRADES = "Noten verwalten"
_TERM = "Anlass:"
_GROUP = "Klasse/Gruppe:"
_SAVE = "Änderungen speichern"
_TABLE_XLSX = "Noteneingabe-Tabelle\nerstellen"
_TT_TABLE_XLSX = "Tabelle der unterrichteten Fächer als xlsx-Datei erstellen"
_TABLE_PDF = "Tabelle als PDF"
_REPORT_PDF = "Zeugnis(se) erstellen"
_TABLE_IN1 = "Notentabelle ersetzen,\n externe einlesen"
_TT_TABLE_IN1 = "Ersetze die Notentabelle durch die gewählte Datei" \
" (xlsx, ods, tsv)"
_TABLE_IN_DIR = "Noten aktualisieren,\n von externem Ordner"
_TT_TABLE_IN_DIR = "Aktualisiere die Notentabelle von den Dateien" \
" (xlsx, ods, tsv) im gewählten Ordner"
_FILESAVE = "Datei speichern"
_FILEOPEN = "Datei öffnen"
_DIROPEN = "Ordner öffnen"
_EXCEL_FILE = "Excel-Datei (*.xlsx)"
_TABLE_FILE = "Tabellendatei (*.xlsx *.ods *.tsv)"
#####################################################
import os, glob
from qtpy.QtWidgets import QHBoxLayout, QVBoxLayout, QLabel, \
QPushButton, QFileDialog
from qtpy.QtCore import SIGNAL, QObject
from ui.grid import GridView
from ui.grade_grid import GradeGrid
from ui.abitur_pupil_view import AbiPupilView
from ui.ui_support import VLine, KeySelect, TabPage, QuestionDialog
class ManageTerm(QWidget):
"""The controls for managing term grades.
"""
def __init__(self, gridview):
self.grid_view = gridview
super().__init__()
vbox = QVBoxLayout(self)
### Select group (might be just one entry ... perhaps even none)
self.group_select = KeySelect(changed_callback = self.group_changed)
vbox.addWidget(QLabel(_GROUP))
vbox.addWidget(self.group_select)
vbox.addSpacing(30)
self.pb_save = QPushButton(_SAVE)
vbox.addWidget(self.pb_save)
self.pb_save.connect(self.save)
vbox.addStretch(1)
pbTable = QPushButton(_TABLE_XLSX)
pbTable.setToolTip(_TT_TABLE_XLSX)
cbox.addWidget(pbTable)
pbTable.clicked.connect(self.make_table)
cbox.addSpacing(10)
pbTableIn1 = QPushButton(_TABLE_IN1)
pbTableIn1.setToolTip(_TT_TABLE_IN1)
cbox.addWidget(pbTableIn1)
pbTableIn1.clicked.connect(self.input_table)
pbTableInDir = QPushButton(_TABLE_IN_DIR)
pbTableInDir.setToolTip(_TT_TABLE_IN_DIR)
cbox.addWidget(pbTableInDir)
pbTableInDir.clicked.connect(self.input_tables)
cbox.addSpacing(30)
pbPdf = QPushButton(_TABLE_PDF)
cbox.addWidget(pbPdf)
pbPdf.clicked.connect(self.print_table)
cbox.addSpacing(10)
pbReport = QPushButton(_REPORT_PDF)
cbox.addWidget(pbReport)
pbReport.clicked.connect(self.make_reports)
topbox.addLayout(cbox)
### External notification (from data-view module)
gridview.changes_notification_handler(self.set_changed)
#
def set_changed(self, show):
"""Handler for change of "modification status".
<show> is true when the viewed data is different from the saved
version, false when the data is identical.
"""
self.pb_save.setEnabled(show)
#
def save(self):
#TODO: self.grade_scene?
BACKEND('GRADES_save', changes = self.grade_scene.change_values())
# -> redisplay of term/group
###
class GradeEdit(TabPage):
def __init__(self):
super().__init__(_EDIT_GRADES)
topbox = QHBoxLayout()
self.vbox.addLayout(topbox)
#*********** The "main" widget ***********
self.gradeView = GView()
self.grade_scene = None
topbox.addWidget(self.gradeView)
topbox.addWidget(VLine())
cbox = QVBoxLayout()
self.term_select = KeySelect(changed_callback = self.term_changed)
cbox.addWidget(QLabel(_TERM))
cbox.addWidget(self.term_select)
### Select group (might be just one entry ... perhaps even none)
self.group_select = KeySelect(changed_callback = self.group_changed)
cbox.addWidget(QLabel(_GROUP))
cbox.addWidget(self.group_select)
#TODO: Would a reworking of "special" reports for only individual pupils
# be sensible? (It would need a new input table ...)
# Would it ever be useful to have "special" reports for a whole group?
#TODO: Name tags for tests rather than dates?
# A "New" button to start a new entry? Is renaming possible? (It should
# be, surely?). A name field instead of the grade-date field?
### List of dates ("special" reports and "tests")
self.date_select = KeySelect(changed_callback = self.date_changed)
cbox.addWidget(self.date_select)
### List of pupils ("Abitur" only)
self.pselect = KeySelect(changed_callback = self.pupil_changed)
cbox.addWidget(self.pselect)
cbox.addSpacing(30)
self.gradeView.pbSave = QPushButton(_SAVE)
cbox.addWidget(self.gradeView.pbSave)
# Special connection, see <self.save>
QObject.connect(self.gradeView.pbSave, SIGNAL('clicked()'), self.save)
cbox.addStretch(1)
pbTable = QPushButton(_TABLE_XLSX)
pbTable.setToolTip(_TT_TABLE_XLSX)
cbox.addWidget(pbTable)
pbTable.clicked.connect(self.make_table)
cbox.addSpacing(10)
pbTableIn1 = QPushButton(_TABLE_IN1)
pbTableIn1.setToolTip(_TT_TABLE_IN1)
cbox.addWidget(pbTableIn1)
pbTableIn1.clicked.connect(self.input_table)
pbTableInDir = QPushButton(_TABLE_IN_DIR)
pbTableInDir.setToolTip(_TT_TABLE_IN_DIR)
cbox.addWidget(pbTableInDir)
pbTableInDir.clicked.connect(self.input_tables)
cbox.addSpacing(30)
pbPdf = QPushButton(_TABLE_PDF)
cbox.addWidget(pbPdf)
pbPdf.clicked.connect(self.print_table)
cbox.addSpacing(10)
pbReport = QPushButton(_REPORT_PDF)
cbox.addWidget(pbReport)
pbReport.clicked.connect(self.make_reports)
topbox.addLayout(cbox)
#
def clear(self):
"""Check for changes in the current "scene", allowing these to
be discarded if desired. If accepted (or no changes), clear the
"scene" and return <True>, otherwise leave the display unaffected
and return <False>.
"""
return self.gradeView.set_scene(None)
#
def year_change_ok(self):
return self.clear()
#
def enter(self):
BACKEND('GRADES_init')
#
def leave(self):
if self.clear():
# Drop the data structures associated with the grade view
self.grade_scene = None
return True
else:
return False
#
# def year_changed(self):
# if not self.clear():
# self.year_select.reset(ADMIN.current_year())
# return
# self.term_select.trigger()
#
def SET_TERMS(self, terms, term):
"""CALLBACK: Supplies the terms as a list of pairs:
[[key1, term1], [key2, term2], ...]
Also the selected term is passed. Set the term selection widget
and trigger a "change of term" signal.
"""
ix = 0
for t, tdisp in terms:
if term == t:
break
ix += 1
else:
ix = 0
self.term_select.set_items(terms, index = ix)
self.term_select.trigger()
return True
#
def term_changed(self, key):
if not self.clear():
return False
BACKEND('GRADES_set_term', term = key)
self.term = key
return True
#
#TODO: group to set?
def SET_GROUPS(self, groups):
glist = [(grp, grp) for grp in groups]
self.group_select.set_items(glist)
self.group_select.trigger()
#
def group_changed(self, group):
if not self.clear():
return False
BACKEND('GRADES_set_group', group = group)
return True
#
def date_changed(self, date):
if not self.clear():
return False
BACKEND('GRADES_set_report_date', date = date)
return True
#
def SET_PUPILS(self, termx, group, pid_name_list, pid):
self.pselect.set_items(pid_name_list)
self.pselect.reset(pid)
#? self.pselect.trigger()
#
def SET_GRID(self, **parms):
self.grade_scene = GradeGrid(self.gradeView, **parms)
self.gradeView.set_scene(self.grade_scene)
#
def SET_GRADES(self, grades):
"""<grades> is a list: [[pid, sid, val], ... ]
"""
self.grade_scene.set_grades(grades)
#
def pupil_changed(self, pid):
"""A new pupil has been selected: reset the grid accordingly.
"""
if not self.clear():
return False
self.pid = pid
if pid:
if self.term == 'A':
self.grade_scene = AbiPupilView(self.gradeView)
self.gradeView.set_scene(self.grade_scene)
self.grade_scene.set_pupil(pid)
return True
if self.term[0] not in ('S', 'T'):
#TODO:
REPORT("TODO: Change pupil %s" % pid)
return True
self.group_changed(None)
return True
#
def abitur_INIT_CELLS(self, data):
self.grade_scene.init_cells(data)
#
def abitur_SET_CELLS(self, data):
self.grade_scene.set_cells(data)
#
#TODO ...
# Must connect to this specifying signal with no argument:
# QObject.connect(button, SIGNAL('clicked()'), self.save)
def save(self, force = True):
if self.clear(force): # no question dialog
if self.term[0] in ('S', 'T'):
pid = self.grade_scene.grade_table.term
self.group_changed(None)
#
def make_table(self):
"""Generate input table for the grades.
"""
self.save(force = False)
gtable = self.grade_scene.grade_table
qbytes = gtable.make_grade_table()
dir0 = ADMIN._savedir or os.path.expanduser('~')
filename = os.path.basename(GradeBase.table_path(
gtable.group, gtable.term)) + '.xlsx'
fpath = QFileDialog.getSaveFileName(self.gradeView, _FILESAVE,
os.path.join(dir0, filename), _EXCEL_FILE)[0]
if fpath:
ADMIN.set_savedir(os.path.dirname(fpath))
with open(fpath, 'wb') as fh:
fh.write(bytes(qbytes))
#
def input_table(self):
"""Import a single grade table, replacing the internal table.
"""
self.clear()
dir0 = ADMIN._loaddir or os.path.expanduser('~')
fpath = QFileDialog.getOpenFileName(self.gradeView, _FILEOPEN,
dir0, _TABLE_FILE)[0]
if fpath:
ADMIN.set_loaddir(os.path.dirname(fpath))
gtable = GradeTableFile(ADMIN.current_year(), fpath)
# Check that it matches the currently selected group/term
try:
self.grade_scene.grade_table.check_group_term(gtable)
# ... only returns if ok
except GradeTableError as e:
REPORT('ERROR', _GRADE_TABLE_MISMATCH.format(error = e,
year = gtable.schoolyear, group = gtable.group,
term = gtable.term))
else:
if QuestionDialog(_TITLE_TABLE_REPLACE, _TABLE_REPLACE):
gtable.save() # save table
# Redisplay table
self.grade_scene = GradeGrid(self.gradeView, ADMIN.current_year(),
self.group, self.term)
self.gradeView.set_scene(self.grade_scene)
#
def input_tables(self):
"""Import a folder of grade tables, collate the contents and
update the internal table.
Only non-empty cells in the imported tables are taken into
consideration and only one imported table may supply the
value for a given cell.
The "information" fields are not affected.
"""
self.clear()
dir0 = ADMIN._loaddir or os.path.expanduser('~')
dpath = QFileDialog.getExistingDirectory(self.gradeView,
_DIROPEN, dir0,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks)
if dpath:
ADMIN.set_loaddir(dpath)
# Reload grade table, in case changes were not saved
grade_table = GradeTable(ADMIN.current_year(), self.group,
self.term, ok_new = True)
fn = _UpdateGrades(grade_table, dpath)
cc = REPORT('RUN', runme = fn)
# Redisplay table
self.grade_scene = GradeGrid(self.gradeView, ADMIN.current_year(),
self.group, self.term)
self.gradeView.set_scene(self.grade_scene)
#
def make_reports(self):
"""Generate the grade report(s).
"""
self.save(force = False)
greports = GradeReports(ADMIN.current_year(), self.group, self.term)
fn = _MakeReports(greports)
files = REPORT('RUN', runme = fn)
#
def print_table(self):
"""Output the table as pdf.
"""
self.save(force = False)
if self.grade_scene:
self.grade_scene.to_pdf()
###
class _MakeReports:#(ThreadFunction):
def __init__(self, grade_reports):
super().__init__()
self._grade_reports = grade_reports
#
def run(self):
files = self._grade_reports.makeReports()
if files:
REPORT('INFO', "%s:\n --> %s" % (_MADE_REPORTS,
'\n --> '.join(files)))
else:
REPORT('ERROR', _NO_REPORTS)
#
def terminate(self):
return False
###
class _UpdateGrades:#(ThreadFunction):
def __init__(self, grade_table, dpath):
super().__init__()
self.grade_table = grade_table
self.dpath = dpath
#
def run(self):
self._cc = 0
gtables = []
for f in os.listdir(self.dpath):
self.message("FILE: %s" % f)
if self._cc:
return -1
fpath = os.path.join(self.dpath, f)
try:
gtable = GradeTableFile(ADMIN.current_year(), fpath,
full_table = False)
except:
REPORT('WARN', _BAD_GRADE_FILE.format(fpath = fpath))
else:
# Check that it matches the currently selected group/term
try:
self.grade_table.check_group_term(gtable)
# ... only returns if ok
except GradeTableError as e:
REPORT('ERROR', _GRADE_TABLE_MISMATCH.format(error = e,
year = gtable.schoolyear, group = gtable.group,
term = gtable.term))
gtables.append(gtable)
if gtables:
self.grade_table.integrate_partial_data(*gtables)
REPORT('INFO', _UPDATED_GRADES.format(n = len(gtables)))
return len(gtables)
else:
REPORT('WARN', _NO_GRADE_FILES)
return 0
tab_grade_editor = GradeEdit()
TABS.append(tab_grade_editor)
FUNCTIONS['grades_SET_TERMS'] = tab_grade_editor.SET_TERMS
FUNCTIONS['grades_SET_GROUPS'] = tab_grade_editor.SET_GROUPS
FUNCTIONS['grades_SET_PUPILS'] = tab_grade_editor.SET_PUPILS
FUNCTIONS['grades_SET_GRADES'] = tab_grade_editor.SET_GRADES
FUNCTIONS['grades_SET_GRID'] = tab_grade_editor.SET_GRID
FUNCTIONS['abitur_INIT_CELLS'] = tab_grade_editor.abitur_INIT_CELLS
FUNCTIONS['abitur_SET_CELLS'] = tab_grade_editor.abitur_SET_CELLS
| 35.366109 | 78 | 0.625377 |
2173f0a79dff091b71860dab7d3bacdd5b8d0b20
| 3,601 |
py
|
Python
|
Algorithms/notes/recursion_practice.py
|
tobias-fyi/02_algorithms
|
ab1a8a07c3560ad66712992e3af906e8fd316fe2
|
[
"MIT"
] | null | null | null |
Algorithms/notes/recursion_practice.py
|
tobias-fyi/02_algorithms
|
ab1a8a07c3560ad66712992e3af906e8fd316fe2
|
[
"MIT"
] | 8 |
2020-03-24T17:47:23.000Z
|
2022-03-12T00:33:21.000Z
|
cs/lambda_cs/02_algorithms/Algorithms/notes/recursion_practice.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | null | null | null |
"""
Algorithms :: Practice - recursion
"""
# %%
def foo1(x=3):
def foo2(y=2):
def foo3(z=1):
return z
return foo3() + y
return foo2() + x
print(foo1())
# %%
def summ(n: int) -> int:
if n == 1:
return n
else:
return n + summ(n - 1)
print(summ(4))
# %%
def factorial(n: int) -> int:
# Base case
if n == 1:
print(f"Base case frame: n = {n}")
return n
print(f"Pre-recursive: n = {n} | Calling `{n} * factorial({n - 1})`")
# Recursive case
r = factorial(n - 1)
print(f"Post-recursive: r = {r}")
return n * r
print(factorial(7))
# %%
def dec_to_bin(n):
if n == 0:
print(f"Base case frame: n == {n}")
return 0
else:
print(f"Pre-recursive: n = {n}")
r = 10 * dec_to_bin(int(n / 2))
print(f"Post-recursive: r = {r}")
return n % 2 + r
print(dec_to_bin(7))
# %%
# Greatest common denominator - iterative solution
def gcd_iter(a, b):
counter = 1
print(f"step = {counter}")
while b:
print(f"before: a = {a}, b = {b}, a % b = {a % b}")
a, b = b, a % b
print(f"after : a = {a}, b = {b}")
counter += 1
return a
print(gcd_iter(12, 20))
# %%
# Greatest common denominator - recursive solution
def gcd_recur(a, b):
if b == 0:
return a
else:
return gcd_recur(b, a % b)
print(gcd_recur(12, 20))
# %%
"""
Implement a recursive algorithm itos() that converts a number, digit by digit, to a string.
- Don’t convert the entire integer to a string and return it - that’s cheating!
- The final returned result should be a single string representing the entire number.
- For example, if we passed the integer 1234 to itos(), the function would return
'1234' such that type('1234') == str.
- You can break this problem down into three parts.
- How do you identify your base case?
- The pre-recursive work:
- How do you get to that base case?
- How do you need to seed your frames on the way to the base case?
- The post-recursive work:
- What would you add to the base case as it works its way back through the
recursed calls?
- Does the order of what is returned and what is added matter?
- Annotate your solution with print statements that show, at each frame:
- the state of the function, specifying what is being passed and
what is being returned
- a counter that tracks the frames as they are opened and closed
"""
# %%
frame = 1
print(f"At global frame = {frame}")
def itos(n: int) -> str:
global frame
frame += 1
# TODO: Base case
if n % 10 == n:
print(f"Base case frame = {frame}, n = {n}")
return str(n)
# TODO: Recursive case
print(f"pre-recursive: n = {n}")
digit = n % 10 # Extract last digit
print(f"Last digit: {digit}")
rest = n // 10 # `Pop` last digit off of number
print(f"Rest of number: {rest}")
r = itos(rest) # recurse into rest of number
frame -= 1
print(f"post-recursive: r = {r}")
return r + str(digit)
print(f"Result: {itos(1234)} (type of {type(itos(1234))})")
# %%
def itos(n: int) -> str:
# Base case: one digit number
if n % 10 == n: # Clever way of saying `if n < 10`
return str(n)
# Recursive case
digit = n % 10 # Extract last digit
rest = n // 10 # `Pop` last digit off of number
print(f"{n} -> itos({rest}) + str({digit})")
r = itos(rest) # recurse into rest of number
return r + str(digit)
result = itos(54321)
print(f"Result: {result} (type of {type(result)})")
# %%
| 22.936306 | 91 | 0.581783 |
1d3ce454d05b00409952f22337450b978253643b
| 1,049 |
py
|
Python
|
marthakarpeter/ex2.py
|
appfs/appfs
|
8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3
|
[
"MIT"
] | 11 |
2017-04-21T11:39:55.000Z
|
2022-02-11T20:25:18.000Z
|
marthakarpeter/ex2.py
|
appfs/appfs
|
8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3
|
[
"MIT"
] | 69 |
2017-04-26T09:30:38.000Z
|
2017-08-01T11:31:21.000Z
|
marthakarpeter/ex2.py
|
appfs/appfs
|
8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3
|
[
"MIT"
] | 53 |
2017-04-20T16:16:11.000Z
|
2017-07-19T12:53:01.000Z
|
# ex1 APPFS Martha Karpeter 367847
import sys
import xml.etree.ElementTree as ET
file = sys.argv[-1]
tree = ET.parse(open(file, "r"))
root = tree.getroot()
def find(tree, node):
found = tree.find(node)
if found == None:
print("No {} in file".format(node))
found = []
return found
csv = ""
for gasDay in root:
if str(gasDay.tag).split("}")[-1] == "gasDay":
date = gasDay.get("date")
startH = int(gasDay.get("gasDayStartHourInUTC"))
for bdyNode in gasDay:
if str(bdyNode.tag).split("}")[-1] == "boundaryNode":
for time in bdyNode:
if str(time.tag).split("}")[-1] == "time":
HH = int(time.get("hour"))%25
value = int(time.find("{http://gaslab.zib.de/kwpt/measured}amountOfPower").get("value"))
csv += "{}; {}; {}\n".format(date,HH+startH,value)
print(csv)
# gasDayStartHourInUTC -> during winter: 5, summer 4 o'clock
# HH in {1..25}
# amountOfPower max length = 1024
| 28.351351 | 112 | 0.551954 |
d5a0b13c631785f7d23a94a1fef11d921258f702
| 2,105 |
py
|
Python
|
__init__.py
|
JoanChirinos/IWasSick
|
f7ab65407e210a3e20b942596fe9a741207f8beb
|
[
"MIT"
] | null | null | null |
__init__.py
|
JoanChirinos/IWasSick
|
f7ab65407e210a3e20b942596fe9a741207f8beb
|
[
"MIT"
] | null | null | null |
__init__.py
|
JoanChirinos/IWasSick
|
f7ab65407e210a3e20b942596fe9a741207f8beb
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
from flask import (Flask, render_template, redirect, url_for,
session, request, flash)
from util import db
app = Flask(__name__)
app.secret_key = 'DONT LOOK!!!!'
@app.route('/')
def home():
if 'email' in session:
return render_template('logged_in_index.html')
return render_template('index.html')
@app.route('/excuse')
def excuse():
ex = db.getExcuse()
ex = 'Hehe i didn\'t feel like it'
if 'email' in session:
return render_template('logged_in_results.html', excuse=ex,
email=session['email'])
return render_template('results.html', excuse=ex)
@app.route('/create_account')
def create_account():
return render_template('register.html')
@app.route('/register', methods=["POST"])
def register():
email = request.form['email']
password = request.form['password']
password_check = request.form['password_check']
success = db.register(email, password, password_check)
if success:
flash('Success!')
else:
flash('Couldn\'t create account')
return redirect(url_for('home'))
@app.route('/login', methods=["POST"])
def login():
email = request.form['email']
password = request.form['password']
success = db.login(email, password)
if success:
flash('Logged in!')
session['email'] = email
else:
flash('Incorrect credentials!')
return redirect(url_for('home'))
@app.route('/logout')
def logout():
if 'email' in session:
session.pop['emaill']
return redirect(url_for('home'))
@app.route('/my_excuses')
def my_excuses():
if 'email' not in session:
return redirect(url_for('home'))
excuses = db.getMyExcuses(session['email'])
return render_template('excuses.html', excuses=excuses)
@app.route('/save/<excuse>')
def save_excuse(excuse):
if 'email' not in session:
return redirect(url_for('home'))
db.saveExcuse(session['email'], excuse)
return redirect(url_for('my_excuses'))
if __name__ == '__main__':
app.debug = True
app.run()
| 24.476744 | 67 | 0.63943 |
98a2e78a3c24d70f1ba3dd03d0ee0d5b875687e4
| 9,189 |
py
|
Python
|
Python/Buch_Python3_Das_umfassende_Praxisbuch/Kapitel_07_Sequenzen_Mengen_und_Generatoren/01_grundlagen.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | null | null | null |
Python/Buch_Python3_Das_umfassende_Praxisbuch/Kapitel_07_Sequenzen_Mengen_und_Generatoren/01_grundlagen.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | 6 |
2020-12-24T15:15:09.000Z
|
2022-01-13T01:58:35.000Z
|
Python/Buch_Python3_Das_umfassende_Praxisbuch/Kapitel_07_Sequenzen_Mengen_und_Generatoren/01_grundlagen.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
###
# File: 01_grundlagen.py
# Project: Kapitel_07_Sequenzen_Mengen_und_Generatoren
# Created Date: Tuesday 26.02.2019, 11:02
# Author: Apop85
# -----
# Last Modified: Tuesday 05.03.2019, 15:39
# -----
# Copyright (c) 2019 Apop85
# This software is published under the MIT license.
# Check http://www.opensource.org/licenses/MIT for further informations
# -----
# Description: Basic knowledge about sequences, quantities and generators.
###
import re
def output(title, string):
max_length=80
max_delta=20
string+=' '*max_length
print('╔'+'═'*max_length+'╗')
print('║'+title.center(max_length).upper()+'║')
print('╠'+'═'*max_length+'╣')
search_pattern=re.compile(r'(.{'+str(max_length-max_delta-10)+r','+str(max_length-10)+r'}[^\w"])')
reg_lines=search_pattern.findall(string)
for line in reg_lines:
print('║ '+line+' '*(max_length-len(line)-1)+'║')
print('╚'+'═'*max_length+'╝')
input()
output('Definition Sequenzen','Sequenzen sind Objekte, die aus einer linearen Anordnung anderer Objekte bestehen, die man Items oder Elemente der Sequenz nennt.')
output('Sequenztypen','Python unterscheidet zwischen drei verschiedenen Sequenztypen: Liste, Tupel und String.')
output('Sequenzeigenschaften','Sequenzen besitzen besondere Eigenschaften welche sie von anderen Typen unterscheidet. Sie besitzen alle eine Länge, heisst alle Items sind durchnummeriert und in einer bestimmten Reihenfolge angeordnet.')
output('Sequenzoperationen','Mit Sequenzen lassen sich folgende Operationen durchführen:')
output('Sequenzprüfung','"x in sequenz", "x not in sequenz" um zu Prüfen ob das Objekt x in einer Sequenz vorkommt oder nicht.')
output('Sequenzkonkatination und Multiplikation','"sequenz+sequenz", "sequenz*integer" um Squenzen zu verbinden oder zu vervielfältigen.')
output('Zugriff auf Elemente in einer Sequenz','"sequenz[integer]" um auf ein bestimmtes Element einer Sequenz zuzugreiffen oder "sequenz[integer:]", "sequenz[:integer]", "sequenz[integer:integer+n]" um ein Slice einer Sequenz zu erhalten.')
output('Sequenzfunktionen','"len(sequenz)" um die Länge der Sequenz herauszufinden, "min(sequenz)" und "max(sequenz)" um den kleinsten bzw. grössten Eintrag in der Sequenz zu finden.')
output('Sequenzunpacking','Wenn man die Länge einer Sequenz kennt kann man sie "auspacken", heisst alle Elemente einer Sequenz werden in einer einzigen Zuweisung Namen zugeordnet. Beispiel: liste=["hallo","du"] | a,b=liste somit ist a="hallo" und b="du". Bei unbekannter Listenlänge kann man auch a,*b=liste verwenden. Somit wird das erste Element zu a zugewiesen und die restlichen Elemente zu b.')
output('Tupel','Tupel sind unveränderbare Sequenzen. Man verwendet Tupel dann wenn man ein zusammenhängendes Objekt, das aus mehreren Elementen besteht, repräsentieren möchte. Beispiel Vor- und Nachname einer Person oder X- und Y-Koordinate eines Objekts.')
output('Listen','Im Gegensatz zu Tupeln sind Listen veränderbar. Sie können Objekte beliebigen Typs, auch bunt gemischt, enthalten. Mithilfe von Listen kann man komplexe Strukturen datentechnisch modellieren, von einfachen linearen Anordnungen wie Telefonlisten bis zu virtuellen Welten, wie etwa einem Gebäude aus Gängen und Räumen die über Türen und Treppen miteinander verbunden sind.')
output('Listenfunktionen','Listen verfügen durch ihre veränderbarkeit zusätzliche Operationen bzw Methoden um diese zu verändern.')
output('Listen: Items ersetzen','Mittels den Operationen liste[n]=x wird dem n-ten Element der Liste der Wert x zugewiesen. Mittels liste[n-p]=liste2 werden den Elementen n bis p mit den Einträgen aus liste2 ersetzt.')
output('Listen: Items hinzufügen','Durch liste.append(x) wird die Liste um das Item x an letzter Stelle ergänzt. Mit liste.extend(liste2) kann die Liste um den Inhalt einer anderen Liste ergänzt werden. Mittels liste.insert(n, x) wird das Objekt x an n-ter Stelle in der Liste eingefügt.')
output('Listen: Items entfernen','Mittels del liste[n] wird das n-te Element der Liste entfernt. Durch del liste[n-p] werden die Elemente n bis p entfernt. Mit der Funktion liste.pop() wird das letzte Element der Liste entfernt und der Wert davon wird zurückgegeben. Um ein Item mit einem bestimmten Wert aus der Liste zu entfernen verwendet man liste.remove(x), in diesem Beispiel um das erste Element welches gleich x ist zu entfernen.')
output('Listen: Items suchen','Um herauszufenden an welcher Stelle der Liste das Item x sich befindet verwendet man liste.index(x). Um herauszufinden wie oft das Element x in der Liste vorkommt verwendet man liste.count(x).')
output('Listen: Reihenfolge ändern','Um die Reihenfolge einer Liste zu verändern kann man entweder liste.reverse() um die Liste umzudrehen oder liste.sort() um die Liste aufsteigend zu Sortieren verwenden. Man kann bei sort() auch einen Suchschlüssel wie z.b. liste.sort(key=len) übergeben, in diesem Beispiel um den Inhalt der Liste anhand der Länge der Items zu sortieren. Als zusätzliches Argument kann noch reverse=True übergeben werden um die Reihenfolge der Bedingung umzukehren.')
output('Listen: Kopieren tief/flach','Man kann Listen auch Kopieren wobei liste2=liste1 keine Kopie darstellt, dieser Ausdruck bedeutet dass der name liste2 auf die liste1 verweist, jedoch keine eigene Liste ist. Möchte man eine Liste kopieren in welcher sich Items noch ändern können erstellt man eine flache Kopie der Liste mittels liste2=liste1[:]. Somit wird eine neue Liste mit dem Namen liste2 erstellt mit den Verweisen auf die Inhalte von liste1. Möchte man eine eigentständige Kopie in welcher die Werte von liste1 enthalten sind verwendet man deepcopy() aus dem Modul copy.')
output('Listen per Ausdruck generieren','Listen können auch per Ausdruck generiert werden. Beispiel: liste=[i**2 for i in range(5)] oder liste=[i**2 for i in range(100) if i%7==0] um die Potenz aller durch Sieben teilbaren Zahlen bis 100 zu erhalten. Um zwei Listen zu vergleichen und die Gemeinsamen Inhalte auszugeben kann man liste=[i for i in liste1 if i in liste2. Um aus zwei Listen Tupel zu generieren mit kann man folgendes verwenden: liste=[(i,j) for i in liste1 for j in liste2]')
output('Definition Generatoren','Wärend in Sequenzen und Mengen Daten explizit gespeichert werden, sind Generatoren virtuelle Kollektionen, deren Inhalt erst bei Bedarf erzeugt werden.')
output('Generatoren-Formel','Um einen einfachen Generator zu verwenden verwendet man Formeln. Beispielsweise um alle quadratzahlen von 1 bis 10 aufzulisten n_square=(i*i for i in range(10))')
output('Generatoren Auslesen','Generatoren kann man nicht mittels print direkt ausgeben. Die einzelnen Items darin können jedoch mit einem For-Loop ausgelesen werden.')
output('Generatorfunktionen','Generatorfunktionen sind Funktionen in welchen das Statement yield vorkommt.')
output('yield','Mit yield <Rückgabewert> kann man einen Generator unterbrechen und den aktuellen Wert ausgeben.')
output('next()','Mit der Funktion next(generator_item) lässt sich der nächste Wert mit dem Generator generieren und ausgeben.')
output('Iteratoren','Iteratoren sind spezielle Generatoren die den Zugriff auf Elemente einer Kollektion kontrollieren.')
output('iter()','Die Funktion iter() nimmt eine Sequenz entgegen und gibt nach und nach das nächste Element der Sequenz aus.')
output('min() / max()','Wird die Funktion min() oder max() auf einen Generator angewendet so besitzt dieser im nachhinein keine Objekte mehr da alle abgearbeitet wurden.')
output('Verwendung Generatoren','Generatoren verwenden deutlich weniger Speicher als statische Listen. Oft werden sie auch verwendet um Mengen zu definieren.')
output('Definition Mengen','Mengen sind ungeordnete Datenkolletktionen mit einmalig vorkommenden Elementen.')
output('Mengentypen','Zur Darstellung von endlichen Mengen gibt es in Python die Typen "set" und "frozenset".')
output('set()','Objekte vom Typ set() sind wie Listen, die Inhalte sind also veränderbar. Alternativ zu set() kann man die Liste in geschweiften Klammern schreiben. Beispiel: a={1,2,3,4,5}')
output('frozenset()','Objekte vom Typ frozenset() sind wie Tupel oder Strings, also unveränderbar.')
output('Besonderheiten von Mengen','Wird eine Liste mit mehreren identischen Einträgen an set() oder frozenset() übergeben so werden die mehrfach vorkommenden Einträge entfernt. Auch dürfen nur unveränderbare Elemente in einer Menge vorkommen, heisst Tupel, Strings, frozenset-Objekte und Zahlen, keine Variabeln.')
output('menge_1 | menge_2','Mit dem Operator "|" können zwei Mengen vereinigt werden.')
output('menge_1 & menge_2','Mit dem Operator "&" wird der Durchschnitt der beiden Mengen ausgegeben.')
output('menge_1 - menge_2','Mit dem Operator "-" wird die Differenz der beiden Mengen ausgegeben.')
output('menge_1 <= menge_2','Mit dem Operator "<=" kann geprüft werden ob menge_1 eine Teilmenge von menge_2 ist.')
output('menge_1 >= menge_2','Mit dem Operator ">=" kann geprüfte werden ob menge_2 eine Teilmenge von menge_1 ist.')
output('Iterierbarkeit von Mengen','Mengen sind ebenfalls iterierbar weswegen sie ebenfalls mittels einem For-Loop ausgelesen werden können.')
| 117.807692 | 585 | 0.78126 |
63ea9fde35c924cb3488b6987ca9bf50acb51aea
| 4,195 |
py
|
Python
|
nodes/ue02/moveTurtle_distance_LOESUNG.py
|
ProfJust/Ruhr-TurtleBot-Competition-RTC-
|
5c2425bee331b4d5033757a9425676932d111775
|
[
"Unlicense",
"MIT"
] | null | null | null |
nodes/ue02/moveTurtle_distance_LOESUNG.py
|
ProfJust/Ruhr-TurtleBot-Competition-RTC-
|
5c2425bee331b4d5033757a9425676932d111775
|
[
"Unlicense",
"MIT"
] | null | null | null |
nodes/ue02/moveTurtle_distance_LOESUNG.py
|
ProfJust/Ruhr-TurtleBot-Competition-RTC-
|
5c2425bee331b4d5033757a9425676932d111775
|
[
"Unlicense",
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# --- moveTurtle_distance_LOESUNG.py ------
# Version vom 16.11.2020 by OJ
# ohne OOP und Klasse
# ----------------------------------
# Starten von ROS und der TurtleSim
# $1 roscore
# $2 roslaunch turtlebot3_gazebo turtlebot3_house.launch
# $3 rosrun rtc moveTurtle_distance_gazebo.py
# (vorher catkin_make und ausführbar machen mit chmod +x)
# ------------------------------------------
import rospy
from math import pow, atan2, sqrt, pi
from geometry_msgs.msg import Twist
from turtlesim.msg import Pose
# --- Instanzierung einer globale pose - Variable ---
# Wird benoetigt um die pose aus der callback-Funktion
# (ohne Aufruf bzw. return) heraus zu bekommen,
pose = Pose()
def update_pose(data):
# Callback function which is called when a new message
# of type Pose is received by the subscriber.
# rospy.loginfo(rospy.get_caller_id() + "x %s y %s theta %s",
# data.x, data.y, data.theta)
pose.x = round(data.x, 4) # Runde auf 4 Stellen
pose.y = round(data.y, 4)
pose.theta = round(data.theta, 4)
def move():
# Creates a node with name 'turtlebot_controller' and make sure it is a
# unique node (using anonymous=True).
rospy.init_node('turtlebot_controller', anonymous=True)
# Publisher which will publish to the topic '/turtle1/cmd_vel'.
velocity_publisher = rospy.Publisher('/turtle1/cmd_vel',
Twist,
queue_size=10)
# A subscriber to the topic '/turtle1/pose'. self.update_pose is called
# when a message of type Pose is received.
rospy.Subscriber('/turtle1/pose', Pose, update_pose)
rate = rospy.Rate(10)
# Get the input from the user.
dist_x = eval(input("Set your x dist: "))
dist_y = eval(input("Set your y dist: "))
# Wegstrecke und Orientierung der Turtle berechnen
dist = sqrt(pow(dist_x, 2) + pow(dist_y, 2))
sollTheta = atan2(dist_y, dist_x)
# Get start pose of Turtle - meanwhile received?
start_x = pose.x
start_y = pose.y
# Debug ausgabe
rospy.loginfo("Start Pose is %s %s", start_x, start_y)
rospy.loginfo("Angle to turn %s ", sollTheta)
rospy.loginfo("Still to turn %s ", abs(pose.theta - sollTheta))
vel_msg = Twist() # Twist Nachricht instanzieren
# --- Erst die Turtle drehen ---
tolerance = 0.015
while (abs(pose.theta - sollTheta) > tolerance):
# theta auf Bereich [-pi...pi] begrenzen
if pose.theta > pi:
pose.theta = pose.theta - 2 * pi
elif pose.theta < -pi:
pose.theta = pose.theta + 2 * pi
# set Angular velocity in the z-axis.
if pose.theta - sollTheta > 0:
vel_msg.angular.z = -0.1
else:
vel_msg.angular.z = 0.1
# Debug ausgabe
rospy.loginfo("Pose is %s", pose.theta)
rospy.loginfo("Goal angle is %s", sollTheta)
rospy.loginfo("Still to turn %s ", abs(pose.theta - sollTheta))
velocity_publisher.publish(vel_msg) # Publishing our vel_msg
rate.sleep() # Publish at the desired rate
# Stopping our robot after the movement is over
vel_msg.linear.x = 0
vel_msg.angular.z = 0
velocity_publisher.publish(vel_msg)
# --- Dann die Strecke fahren ---
while sqrt(pow((start_x - pose.x), 2)
+ pow((start_y - pose.y), 2)) < abs(dist):
# Linear velocity in the x-axis.
vel_msg.linear.x = 0.2
rospy.loginfo("Pose is %s %s", pose.x, pose.y)
rospy.loginfo("Still to Go %s ",
dist-sqrt(pow((start_x - pose.x), 2)
+ pow((start_y - pose.y), 2)))
# Publishing our vel_msg
velocity_publisher.publish(vel_msg)
# Publish at the desired rate.
rate.sleep()
# Stopping our robot after the movement is over.
rospy.loginfo("Reached aim - now stopping ")
# ----- hier Code einfügen ------
exit()
# If we press control + C, the node will stop.
# rospy.spin()
if __name__ == '__main__':
try:
move()
except rospy.ROSInterruptException:
pass
| 33.56 | 75 | 0.601907 |
f624b3d43b6d50614f6f8e8a181ccf0f111e7a60
| 2,544 |
py
|
Python
|
official/nlp/prophetnet/src/utils/eval_score.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/nlp/prophetnet/src/utils/eval_score.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/nlp/prophetnet/src/utils/eval_score.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Get score by given metric."""
from .ppl_score import ngram_ppl
from .rouge_score import rouge
def get_ppl_score(result):
"""
Calculate Perplexity(PPL) score.
Args:
List[Dict], prediction, each example has 4 keys, "source",
"target", "log_prob" and "length".
Returns:
Float, ppl score.
"""
log_probs = []
total_length = 0
for sample in result:
log_prob = sample['log_prob']
length = sample['length']
log_probs.extend(log_prob)
total_length += length
print(f" | log_prob:{log_prob}")
print(f" | length:{length}")
ppl = ngram_ppl(log_probs, total_length, log_softmax=True)
print(f" | final PPL={ppl}.")
return ppl
def get_rouge_score(result, vocab):
"""
Calculate ROUGE score.
Args:
List[Dict], prediction, each example has 4 keys, "source",
"target", "prediction" and "prediction_prob".
Dictionary, dict instance.
return:
Str, rouge score.
"""
predictions = []
targets = []
for sample in result:
predictions.append(' '.join([vocab[t] for t in sample['prediction']]))
targets.append(' '.join([vocab[t] for t in sample['target']]))
print(f" | source: {' '.join([vocab[t] for t in sample['source']])}")
print(f" | target: {targets[-1]}")
return rouge(predictions, targets)
def get_score(result, vocab=None, metric='rouge'):
"""
Get eval score.
Args:
List[Dict], prediction.
Dictionary, dict instance.
Str, metric function, default is rouge.
Return:
Str, Score.
"""
score = None
if metric == 'rouge':
score = get_rouge_score(result, vocab)
elif metric == 'ppl':
score = get_ppl_score(result)
else:
print(f" |metric not in (rouge, ppl)")
return score
| 27.354839 | 78 | 0.610456 |
2ec8c7af211c0b5d149b59315ba8d7953085f12a
| 3,231 |
py
|
Python
|
RFID_auslesen_LCD.py
|
bhemsen/bigBrother
|
260f927fdcf6b6d43ec79a7a7ea4ba61a44c5e6d
|
[
"MIT"
] | null | null | null |
RFID_auslesen_LCD.py
|
bhemsen/bigBrother
|
260f927fdcf6b6d43ec79a7a7ea4ba61a44c5e6d
|
[
"MIT"
] | null | null | null |
RFID_auslesen_LCD.py
|
bhemsen/bigBrother
|
260f927fdcf6b6d43ec79a7a7ea4ba61a44c5e6d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf8 -*-
import time
import RPi.GPIO as GPIO
import MFRC522
import signal
import busio
import board as board1
import adafruit_character_lcd.character_lcd_i2c as character_lcd
import Database
from Database import Database
import ast
# Definiere LCD Zeilen und Spaltenanzahl.
lcd_columns = 16
lcd_rows = 2
# Initialisierung I2C Bus
i2c = busio.I2C(board1.SCL, board1.SDA)
# Festlegen des LCDs in die Variable LCD
lcd = character_lcd.Character_LCD_I2C(i2c, lcd_columns, lcd_rows, 0x21)
continue_reading = True
def entry(name):
# Skript starten, Daten loggen, etc.
lcd.backlight = True
lcd.message = (name +", du geiler Typ\nViel Erfolg!")
time.sleep(5.0)
lcd.backlight = False
lcd.clear()
MIFAREReader.MFRC522_StopCrypto1()
def noentry(name):
print ("Kein Zutritt!")
lcd.backlight = True
lcd.message = ("Kein Zutritt, "+ name + "\nFrag den Admin")
time.sleep(5.0)
lcd.backlight = False
lcd.clear()
MIFAREReader.MFRC522_StopCrypto1()
def unknown():
print("Karte nicht bekannt")
lcd.backlight = True
lcd.message = ("----!Hau ab!----\n----!SOFORT!----")
lcd.backlight = False
time.sleep(1.0)
for i in range(4):
lcd.backlight = True
time.sleep(.7)
lcd.backlight = False
time.sleep(.7)
lcd.clear()
MIFAREReader.MFRC522_StopCrypto1()
def compareKeyWithDatabaseKeys(key):
db = Database("localhost", "webadmin", "password", "sensoro")
result = db.getAllowdRFIDS()
for i in range(len(result)):
respondKey = ast.literal_eval(result[i][0])
allowedKey = respondKey[:9]
securityLevel = result[i][2]
name = result[i][1]
print(allowedKey)
if allowedKey == key:
if securityLevel == 2:
entry(name)
access = "granted"
db.logEntry(name , key, access)
return
else:
noentry(name)
access = "denied"
db.logEntry(name , key, access)
return
unknown()
name = "unknown"
access = "denied"
db.logEntry(name , key, access)
# ...
MIFAREReader = MFRC522.MFRC522()
# die ersten 9 Ziffern sind der Authentifizierungscode
try:
while True:
# Scan for cards
(status,TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)
# If a card is found
if status == MIFAREReader.MI_OK:
# Get the UID of the card
(status,uid) = MIFAREReader.MFRC522_Anticoll()
print(uid)
# This is the default key for authentication
key = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]
# Select the scanned tag
MIFAREReader.MFRC522_SelectTag(uid)
# Authenticate
status = MIFAREReader.MFRC522_Auth(MIFAREReader.PICC_AUTHENT1A, 8, key, uid)
# Check if authenticated
if status == MIFAREReader.MI_OK:
# Read block 8
data = MIFAREReader.MFRC522_Read(8)
compareKeyWithDatabaseKeys(data[:9])
except KeyboardInterrupt:
print("Abbruch")
GPIO.cleanup()
| 28.095652 | 88 | 0.60848 |
2ef2f2004c51175ade2e82b408fd8dd4522ad343
| 682 |
py
|
Python
|
code/begin/Cookie.py
|
redxyb/Flask
|
4ee226501f16eb0fa5cb585dc6bf780005fa8a28
|
[
"MIT"
] | null | null | null |
code/begin/Cookie.py
|
redxyb/Flask
|
4ee226501f16eb0fa5cb585dc6bf780005fa8a28
|
[
"MIT"
] | null | null | null |
code/begin/Cookie.py
|
redxyb/Flask
|
4ee226501f16eb0fa5cb585dc6bf780005fa8a28
|
[
"MIT"
] | null | null | null |
'''
Author: xyb
Date: 2020-08-10 18:06:52
LastEditTime: 2020-08-10 18:34:28
'''
from flask import Flask, make_response, redirect, request
app = Flask(__name__)
#设置cookie
@app.route('/set_cookie')
def set_cookie():
resp = make_response('set_cookie ok')
resp.set_cookie('username', 'xyb', max_age=3600) #max_age:有效期
return resp
#读取cookie
@app.route('/get_cookie')
def get_cookie():
resp = request.cookies.get('username')
return resp
#删除cookie
@app.route('/delete_cookie')
def delete_cookie():
resp = make_response("hello,xyb")
resp.delete_cookie('username')
return resp
if __name__ == "__main__":
app.run(host='', port=5000, debug=False)
| 18.944444 | 66 | 0.68915 |
2ef97251288ee2925580a4ac7956e57e50d63431
| 736 |
py
|
Python
|
Problems/Dynamic Programming/Easy/PascalTriangle/test_pascal_triangle.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/Dynamic Programming/Easy/PascalTriangle/test_pascal_triangle.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/Dynamic Programming/Easy/PascalTriangle/test_pascal_triangle.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from pascal_triangle import generate, getRow
class Test(TestCase):
def test_generate(self):
self.assertTrue(generate(1) == [[1]])
self.assertTrue(generate(2) == [[1], [1, 1]])
self.assertTrue(generate(3) == [[1], [1, 1], [1, 2, 1]])
self.assertTrue(generate(4) == [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1]])
self.assertTrue(generate(5) == [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1]])
def test_get_row(self):
self.assertTrue(getRow(0) == [1])
self.assertTrue(getRow(1) == [1, 1])
self.assertTrue(getRow(2) == [1, 2, 1])
self.assertTrue(getRow(3) == [1, 3, 3, 1])
self.assertTrue(getRow(4) == [1, 4, 6, 4, 1])
| 40.888889 | 95 | 0.53125 |
25ccf02f07ff391e152d0dc6435f9657af7dcbb0
| 289 |
py
|
Python
|
etc code/make_movie1.py
|
gusghrlrl101/Dance-Helper
|
e9f5f6168f1dc6944c25cc8ff7eb829791fe484c
|
[
"MIT"
] | 11 |
2019-06-17T02:59:01.000Z
|
2021-05-24T14:10:04.000Z
|
etc code/make_movie1.py
|
RabbitG29/Dance-Helper
|
e9f5f6168f1dc6944c25cc8ff7eb829791fe484c
|
[
"MIT"
] | null | null | null |
etc code/make_movie1.py
|
RabbitG29/Dance-Helper
|
e9f5f6168f1dc6944c25cc8ff7eb829791fe484c
|
[
"MIT"
] | 3 |
2019-06-17T02:59:32.000Z
|
2019-07-03T04:31:30.000Z
|
import cv2
import os
from glob import glob
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('result1.avi', fourcc, 24.0, (1280, 720))
img_paths = sorted(glob('*[0-9].png'))
for ind, img_path in enumerate(img_paths):
img = cv2.imread(img_path)
out.write(img)
out.release()
| 24.083333 | 63 | 0.719723 |
e2a35d584959ae99843bafb3656bd6fd7fb6b40f
| 2,783 |
py
|
Python
|
research/cv/gan/src/param_parse.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 1 |
2021-11-18T08:17:44.000Z
|
2021-11-18T08:17:44.000Z
|
research/cv/gan/src/param_parse.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
research/cv/gan/src/param_parse.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2 |
2019-09-01T06:17:04.000Z
|
2019-10-04T08:39:45.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''GAN parameter parser'''
import argparse
def parameter_parser():
'''parameter parser'''
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.001, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_ascend", type=int, default=8,
help="number of ascend threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parser.add_argument("--img_size", type=int, default=28, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=400, help="interval between image samples")
parser.add_argument("-d", default="mnist")
parser.add_argument("-l", type=float, default=1000)
parser.add_argument("-c", "--cross_val", default=10, type=int, help="Number of cross valiation folds")
parser.add_argument("--sigma_start", default=-1, type=float)
parser.add_argument("--sigma_end", default=0, type=float)
parser.add_argument("-s", "--sigma", default=None)
parser.add_argument("--batch_size_t", type=int, default=10, help="size of the test batches")
parser.add_argument("--batch_size_v", type=int, default=1000, help="size of the valid batches")
parser.add_argument('--device_id', type=int, default=0, help='device id of Ascend (Default: 0)')
parser.add_argument("--data_path", type=str, default="data/MNIST_Data/", help="dataset path")
parser.add_argument("--distribute", type=bool, default=False, help="Run distribute, default is false.")
return parser.parse_args()
| 59.212766 | 114 | 0.699605 |
39521d5ee9e97573f6ceb47868c517bb312c6928
| 330 |
py
|
Python
|
pusta1/config.py
|
EE/flexdb
|
08a80b9e56201e678ef055af27bdefa6d52bcbf5
|
[
"MIT"
] | null | null | null |
pusta1/config.py
|
EE/flexdb
|
08a80b9e56201e678ef055af27bdefa6d52bcbf5
|
[
"MIT"
] | null | null | null |
pusta1/config.py
|
EE/flexdb
|
08a80b9e56201e678ef055af27bdefa6d52bcbf5
|
[
"MIT"
] | null | null | null |
app_name = "pusta1"
prefix_url = "pusta1"
static_files = {
'js': {
'pusta1/js/': ['main.js', ]
},
'css': {
'pusta1/css/': ['main.css', ]
},
'html': {
'pusta1/html/': ['index.html', ]
}
}
permissions = {
"edit": "Editing actualy nothing.",
"sample1": "sample1longversion",
}
| 18.333333 | 40 | 0.487879 |
201b911da6e44fbbfb75076d67137a743cb199f1
| 874 |
py
|
Python
|
setup.py
|
stefaniuk/trading-bot
|
403abd2b53caf686a6d2456e7eab124c670e7340
|
[
"MIT"
] | 3 |
2018-05-10T13:51:42.000Z
|
2020-07-05T16:43:45.000Z
|
setup.py
|
stefaniuk/trading-bot
|
403abd2b53caf686a6d2456e7eab124c670e7340
|
[
"MIT"
] | null | null | null |
setup.py
|
stefaniuk/trading-bot
|
403abd2b53caf686a6d2456e7eab124c670e7340
|
[
"MIT"
] | 1 |
2020-04-22T09:06:17.000Z
|
2020-04-22T09:06:17.000Z
|
from setuptools import setup, find_packages
setup(
name="tradingbot",
version="0.1a0",
packages=['tradingbot'],
install_requires=[
'trading212api'
],
zip_safe=False,
author="Federico Lolli",
author_email="[email protected]",
description="Package to invest Trading212",
license="MIT",
keywords="trading bot",
url="https://github.com/federico123579/TradingBot",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: User Interfaces',
'Topic :: System :: Emulators'
]
)
| 30.137931 | 59 | 0.615561 |
b3b3247b69dd6d2c3fb39bdc65a4e36c66e47ade
| 5,831 |
py
|
Python
|
publ/cli.py
|
fluffy-critter/Publ
|
868c5a17c605707dde4d1a95d79405301bd12f05
|
[
"MIT"
] | 6 |
2018-03-29T02:07:44.000Z
|
2018-09-26T00:17:31.000Z
|
publ/cli.py
|
fluffy-critter/Publ
|
868c5a17c605707dde4d1a95d79405301bd12f05
|
[
"MIT"
] | 82 |
2018-04-01T08:53:59.000Z
|
2018-09-28T23:45:05.000Z
|
publ/cli.py
|
fluffy-critter/Publ
|
868c5a17c605707dde4d1a95d79405301bd12f05
|
[
"MIT"
] | null | null | null |
""" CLI utilities for Publ """
# pylint:disable=too-many-arguments
import itertools
import logging
import os.path
import re
import time
import arrow
import click
import slugify
from flask.cli import AppGroup, with_appcontext
from pony import orm
from . import queries
from .config import config
LOGGER = logging.getLogger(__name__)
publ_cli = AppGroup('publ', short_help="Publ-specific commands") # pylint:disable=invalid-name
@publ_cli.command('reindex', short_help="Reindex the content store")
@click.option('--quietly', '-q', 'quietly', is_flag=True, help="Quietly")
@click.option('--fresh', '-f', 'fresh', is_flag=True, help="Start with a fresh database")
@with_appcontext
def reindex_command(quietly, fresh):
""" Forces a reindex of the content store.
This is particularly useful to ensure that all content has been indexed
before performing another action, such as sending out notifications.
"""
from . import index, model
if fresh:
model.reset()
spinner = itertools.cycle('|/-\\')
index.scan_index(config.content_folder, False)
while index.in_progress():
if not quietly:
qlen = index.queue_size() or ''
print(f"\rIndexing... {next(spinner)} {qlen} ", end='', flush=True)
time.sleep(0.1)
if not quietly:
print("Done")
@publ_cli.command('token', short_help="Generate a bearer token")
@click.argument('identity')
@click.option('--scope', '-s', help="The token's permission scope")
@click.option('--lifetime', '-l', help="The token's lifetime (in seconds)", default=3600)
@with_appcontext
def token_command(identity, scope, lifetime):
""" Generates a bearer token for use with external applications. """
from . import tokens
print(tokens.get_token(identity, int(lifetime), scope))
@publ_cli.command('normalize', short_help="Normalize entry filenames")
@click.argument('category', nargs=-1)
@click.option('--recurse', '-r', 'recurse', is_flag=True,
help="Include subdirectories")
@click.option('--all', '-a', 'all_entries', is_flag=True,
help="Apply to all entries, not just reachable ones")
@click.option('--dry-run', '-n', 'dry_run', is_flag=True,
help="Show, but don't apply, changes")
@click.option('--format', '-f', 'format_str',
help="Filename format to use",
default="{date} {sid} {title}")
@click.option('--verbose', '-v', 'verbose', is_flag=True,
help="Show detailed actions")
@with_appcontext
@orm.db_session
def normalize_command(category, recurse, dry_run, format_str, verbose, all_entries):
""" Normalizes the filenames of content files based on a standardized format.
This will only normalize entries which are already in the content index.
If no categories are specified, it defaults to the root category. To include
the root category in a list of other categories, use an empty string parameter,
e.g.:
flask publ normalize '' blog
Available tokens for --format/-f:
{date} The entry's publish date, in YYYYMMDD format
{time} The entry's publish time, in HHMMSS format
{id} The entry's ID
{status} The entry's publish status
{sid} If the entry is reachable, the ID, otherwise the status
{title} The entry's title, normalized to filename-safe characters
{slug} The entry's slug text
{type} The entry's type
"""
# pylint:disable=too-many-locals
from .model import PublishStatus
entries = queries.build_query({
'category': category or '',
'recurse': recurse,
'_future': True,
'_all': all_entries,
})
fname_slugify = slugify.UniqueSlugify(max_length=100, safe_chars='-.', separator=' ')
for entry in entries:
path = os.path.dirname(entry.file_path)
basename, ext = os.path.splitext(os.path.basename(entry.file_path))
status = PublishStatus(entry.status)
eid = entry.id
if status == PublishStatus.DRAFT:
# Draft entries don't get a stable entry ID
eid = status.name
sid = entry.id if status in (PublishStatus.PUBLISHED,
PublishStatus.HIDDEN,
PublishStatus.SCHEDULED) else status.name
date = arrow.get(entry.local_date)
dest_basename = format_str.format(
date=date.format('YYYYMMDD'),
time=date.format('HHmmss'),
id=eid,
status=status.name,
sid=sid,
title=entry.title,
slug=entry.slug_text,
type=entry.entry_type).strip()
dest_basename = re.sub(r' +', ' ', dest_basename)
if dest_basename != basename:
while True:
# UniqueSlugify will bump the suffix until it doesn't collide
dest_path = os.path.join(path, fname_slugify(dest_basename) + ext)
if not os.path.exists(dest_path):
break
if verbose:
print(f'{entry.file_path} -> {dest_path}')
if not os.path.isfile(entry.file_path):
LOGGER.warning('File %s does not exist; is the index up-to-date?', entry.file_path)
elif os.path.exists(dest_path):
LOGGER.warning('File %s already exists', dest_path)
elif not dry_run:
try:
os.rename(entry.file_path, dest_path)
except OSError:
LOGGER.exception('Error moving %s to %s', entry.file_path, dest_path)
entry.file_path = dest_path
orm.commit()
def setup(app):
""" Register the CLI commands with the command parser """
app.cli.add_command(publ_cli)
| 33.705202 | 99 | 0.624936 |
923e246774c40f0baba2357271d7034cba423a4b
| 4,442 |
py
|
Python
|
mumath/context/chemistry.py
|
fourpoints/mumath
|
f1c36c4a5b3c32a3e7f8e7a922eafea8b7a14fd4
|
[
"MIT"
] | null | null | null |
mumath/context/chemistry.py
|
fourpoints/mumath
|
f1c36c4a5b3c32a3e7f8e7a922eafea8b7a14fd4
|
[
"MIT"
] | null | null | null |
mumath/context/chemistry.py
|
fourpoints/mumath
|
f1c36c4a5b3c32a3e7f8e7a922eafea8b7a14fd4
|
[
"MIT"
] | null | null | null |
def normal(): return dict(mathvariant="normal")
_ELEMENTS = [
(1, r"H", "Helium"), # ^1H
(1, r"D", "Deuterium"), # ^2H
(1, r"T", "Tritium"), # ^3H
(2, r"He", "Helium"),
(3, r"Li", "Lithium"),
(4, r"Be", "Beryllium"),
(5, r"B", "Boron"),
(6, r"C", "Carbon"),
(7, r"N", "Nitrogen"),
(8, r"O", "Oxygen"),
(9, r"F", "Fluorine"),
(10, r"Ne", "Neon"),
(11, r"Na", "Sodium"),
(12, r"Mg", "Magnesium"),
(13, r"Al", "Aluminium"),
(14, r"Si", "Silicon"),
(15, r"P", "Phosphorus"),
(16, r"S", "Sulfur"),
(17, r"Cl", "Chlorine"),
(18, r"Ar", "Argon"),
(19, r"K", "Potassium"),
(20, r"Ca", "Calcium"),
(21, r"Sc", "Scandium"),
(22, r"Ti", "Titanium"),
(23, r"V", "Vanadium"),
(24, r"Cr", "Chromium"),
(25, r"Mn", "Manganese"),
(26, r"Fe", "Iron"),
(27, r"Co", "Cobalt"),
(28, r"Ni", "Nickel"),
(29, r"Cu", "Copper"),
(30, r"Zn", "Zinc"),
(31, r"Ga", "Gallium"),
(32, r"Ge", "Germanium"),
(33, r"As", "Arsenic"),
(34, r"Se", "Selenium"),
(35, r"Br", "Bromine"),
(36, r"Kr", "Krypton"),
(37, r"Rb", "Rubidium"),
(38, r"Sr", "Strontium"),
(39, r"Y", "Yttrium"),
(40, r"Zr", "Zirconium"),
(41, r"Nb", "Niobium"),
(42, r"Mo", "Molybdenum"),
(43, r"Tc", "Technetium"),
(44, r"Ru", "Ruthenium"),
(45, r"Rh", "Rhodium"),
(46, r"Pd", "Palladium"),
(47, r"Ag", "Silver"),
(48, r"Cd", "Cadmium"),
(49, r"In", "Indium"),
(50, r"Sn", "Tin"),
(51, r"Sb", "Antimony"),
(52, r"Te", "Tellurium"),
(53, r"I", "Iodine"),
(54, r"Xe", "Xenon"),
(55, r"Cs", "Caesium"),
(56, r"Ba", "Barium"),
(57, r"La", "Lanthanum"),
(58, r"Ce", "Cerium"),
(59, r"Pr", "Praseodymium"),
(60, r"Nd", "Neodymium"),
(61, r"Pm", "Promethium"),
(62, r"Sm", "Samarium"),
(63, r"Eu", "Europium"),
(64, r"Gd", "Gadolinium"),
(65, r"Tb", "Terbium"),
(66, r"Dy", "Dysprosium"),
(67, r"Ho", "Holmium"),
(68, r"Er", "Erbium"),
(69, r"Tm", "Thulium"),
(70, r"Yb", "Ytterbium"),
(71, r"Lu", "Lutetium"),
(72, r"Hf", "Hafnium"),
(73, r"Ta", "Tantalum"),
(74, r"W", "Tungsten"),
(75, r"Re", "Rhenium"),
(76, r"Os", "Osmium"),
(77, r"Ir", "Iridium"),
(78, r"Pt", "Platinum"),
(79, r"Au", "Gold"),
(80, r"Hg", "Mercury"),
(81, r"Tl", "Thallium"),
(82, r"Pb", "Lead"),
(83, r"Bi", "Bismuth"),
(84, r"Po", "Polonium"),
(85, r"At", "Astatine"),
(86, r"Rn", "Radon"),
(87, r"Fr", "Francium"),
(88, r"Ra", "Radium"),
(89, r"Ac", "Actinium"),
(90, r"Th", "Thorium"),
(91, r"Pa", "Protactinium"),
(92, r"U", "Uranium"),
(93, r"Np", "Neptunium"),
(94, r"Pu", "Plutonium"),
(95, r"Am", "Americium"),
(96, r"Cm", "Curium"),
(97, r"Bk", "Berkelium"),
(98, r"Cf", "Californium"),
(99, r"Es", "Einsteinium"),
(100, r"Fm", "Fermium"),
(101, r"Md", "Mendelevium"),
(102, r"No", "Nobelium"),
(103, r"Lr", "Lawrencium"),
(104, r"Rf", "Rutherfordium"),
(105, r"Db", "Dubnium"),
(106, r"Sg", "Seaborgium"),
(107, r"Bh", "Bohrium"),
(108, r"Hs", "Hassium"),
(109, r"Mt", "Meitnerium"),
(110, r"Ds", "Darmstadtium"),
(111, r"Rg", "Roentgenium"),
(112, r"Cn", "Copernicium"),
(113, r"Nh", "Nihonium"),
(114, r"Fl", "Flerovium"),
(115, r"Mc", "Moscovium"),
(116, r"Lv", "Livermorium"),
(117, r"Ts", "Tennessine"),
(118, r"Og", "Oganesson"),
]
# We sort by symbol name length so He is captured before H
def sym(t): return len(t[1])
identifiers = {
r"\alembic": "⚗", # ⚗
r"\atom": "⚛", # ⚛
r"\radioactive": "☢", # ☢
r"\biohazard": "☣", # ☣
r"\poisonold": "☠", # ☠
r"\equilibrium": "⇌", # ⇌
r"\reverseequilibrium": "⇋", # ⇋
r"\biequation": "⇄", # ⇄
r"\requation": "→", # →
r"\Requation": "⟶", # ⟶
r"\lequation": "←", # ←
r"\Lequation": "⟵", # ⟵
r"\aqua": "q", # q
r"\liquid": "l", # l
r"\gas": "g", # g
r"\solid": "s", # s
r"\togas": "↑", # ↑
r"\tosolid": "↓", # ↓
}
for atomic_number, symbol, name in sorted(_ELEMENTS, key=sym, reverse=True):
identifiers[symbol] = identifiers[name] = (symbol, normal())
| 28.844156 | 76 | 0.463755 |
5b94b615ceff8d7df48ff33224494a96bc063d37
| 1,326 |
py
|
Python
|
Chrome_Dinosaur_Game.py
|
kapiljaingit02/Hacktoberfest2020
|
a9f496400d709f00b53e2309f6a1378121a1daca
|
[
"MIT"
] | null | null | null |
Chrome_Dinosaur_Game.py
|
kapiljaingit02/Hacktoberfest2020
|
a9f496400d709f00b53e2309f6a1378121a1daca
|
[
"MIT"
] | null | null | null |
Chrome_Dinosaur_Game.py
|
kapiljaingit02/Hacktoberfest2020
|
a9f496400d709f00b53e2309f6a1378121a1daca
|
[
"MIT"
] | null | null | null |
import pyautogui # pip install pyautogui
from PIL import Image, ImageGrab # pip install pillow
# from numpy import asarray
import time
def hit(key):
pyautogui.keyDown(key)
return
def isCollide(data):
# Draw the rectangle for birds
for i in range(725, 825):
for j in range(280, 328):
if data[i, j] > 140:
hit("up")
return
for i in range(700, 805):
for j in range(230, 275):
if data[i, j] > 140 and data[i, j] < 160:
hit("down")
return
return
if __name__ == "__main__":
print("Hey.. Dinosour game about to start in 3 seconds")
time.sleep(3)
hit('up')
while True:
image = ImageGrab.grab().convert('L')
data = image.load()
isCollide(data)
#### below code is used to test the code with game by building dummy image on screen
# # Draw the rectangle for cactus
# for i in range(700, 715):
# for j in range(230, 275):
# data[i, j] = 140
# # Draw the rectangle for birds
# for i in range(710, 730):
# for j in range(280, 330):
# data[i, j] = 160
# image.show()
# break
| 27.625 | 92 | 0.500754 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.