seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
74606059388
|
from __future__ import unicode_literals
try:
from urllib2 import Request
except ImportError:
from urllib.request import Request
from mock import MagicMock, patch
from requests_kerberos import HTTPKerberosAuth
from grafana_dashboards.client.connection import (KerberosConnection,
BearerAuthConnection,
BasicAuthConnection,
SSLAuthConnection)
__author__ = 'Jakub Plichta <[email protected]>'
class Capture(object):
"""
Class for use in method call verification that captures call argument that can be tested later on.
"""
def __eq__(self, other):
"""
Captures argument and always returns true to make verification successful.
:return: True
"""
self.value = other
return True
def test_connection():
connection = BasicAuthConnection('username', 'password', 'https://host')
connection._opener = MagicMock()
# noinspection PyProtectedMember
connection._opener.open().read.return_value = '{"hello":"world"}'
assert connection.make_request('/uri', {'it\'s': 'alive'}) == {'hello': 'world'}
request = Request('https://host/uri',
'{"it\'s": "alive"}',
headers={
'Content-type': 'application/json',
'Accept': 'application/json',
'Authorization': b'Basic dXNlcm5hbWU6cGFzc3dvcmQ='
})
capture = Capture()
# noinspection PyProtectedMember
connection._opener.open.assert_called_with(capture)
assert request.get_full_url() == capture.value.get_full_url()
assert request.header_items() == capture.value.header_items()
assert request.get_method() == capture.value.get_method()
assert request.data.encode('utf-8') == capture.value.data
def test_connection_with_token():
connection = BearerAuthConnection('token', 'https://host')
connection._opener = MagicMock()
# noinspection PyProtectedMember
connection._opener.open().read.return_value = '{"hello":"world"}'
assert connection.make_request('/uri', {'it\'s': 'alive'}) == {'hello': 'world'}
request = Request('https://host/uri',
'{"it\'s": "alive"}',
headers={
'Content-type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer token'
})
capture = Capture()
# noinspection PyProtectedMember
connection._opener.open.assert_called_with(capture)
assert request.get_full_url() == capture.value.get_full_url()
assert request.header_items() == capture.value.header_items()
assert request.get_method() == capture.value.get_method()
assert request.data.encode('utf-8') == capture.value.data
@patch('requests.post')
def test_connection_with_kerberos(post):
connection = KerberosConnection('https://host')
post().json.return_value = {'hello': 'world'}
assert connection.make_request('/uri', {'it\'s': 'alive'}) == {'hello': 'world'}
capture = Capture()
post.assert_called_with('https://host/uri', auth=capture, json={"it's": 'alive'}, verify=False)
assert isinstance(capture.value, HTTPKerberosAuth)
@patch('requests.post')
def test_connection_with_sslauth(post):
connection = SSLAuthConnection('https://host', ('/fake/cert'))
post().json.return_value = {'hello': 'world'}
assert connection.make_request('/uri', {'it\'s': 'alive'}) == {'hello': 'world'}
post.assert_called_with('https://host/uri', json={"it's": 'alive'}, cert='/fake/cert')
|
jakubplichta/grafana-dashboard-builder
|
tests/grafana_dashboards/client/test_connection.py
|
test_connection.py
|
py
| 3,770 |
python
|
en
|
code
| 141 |
github-code
|
6
|
36031012834
|
# Recursive
def maxSum(n, arr):
# Write your code here.
def recurse(idx):
if idx == 0:
return arr[idx]
if idx < 0:
return 0
pick = arr[idx] + recurse(idx-2)
not_pick = recurse(idx-1)
return max(pick, not_pick)
return recurse(n-1)
# Memoisation
def maxSum2(n, arr):
# Write your code here.
def memoise(idx, dp):
if idx == 0:
return arr[idx]
if idx < 0:
return 0
if dp[idx] != -1:
return dp[idx]
pick = arr[idx] + memoise(idx-2, dp)
not_pick = memoise(idx-1, dp)
dp[idx] = max(pick, not_pick)
return max(dp)
dp = [-1] * (n)
return memoise(n-1, dp)
# Tabulation
def maxSum3(n, arr):
def tabule(idx, dp):
dp[0] = arr[0]
neg = 0
for i in range(1, n):
if i >1:
pick = arr[i] + dp[i-2]
else:
pick = arr[i]
not_pick = dp[i-1]
dp[i] = max(pick, not_pick)
return max(dp)
dp = [-1]*(n)
return tabule(n-1, dp)
# Space Optimisation
def maxSum4(n, arr):
prev2, prev = 0, arr[0]
for i in range(1, n):
take = arr[i]
if i > 1:
take += prev2
not_take = prev
curr = max(take, not_take)
prev2 = prev
prev = curr
return prev
print(maxSum(4, [2, 1, 1, 2]))
print(maxSum2(4, [2, 1, 1, 2]))
|
aad17/Striver-Dynamic-Programming
|
maxnonadjecentsum.py
|
maxnonadjecentsum.py
|
py
| 1,479 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2523947822
|
import argparse
import torch
from model import Pretrain_SegmentationNet, DPRAN
import os
from data.dataloader import create_dataloader
from train import net_Pretrain, DPRAN_Train
import segmentation_models_pytorch as smp
def main():
parser = argparse.ArgumentParser(description='DPRAN')
parser.add_argument('--num_classes', default=1, type=int, help='Number of output classes [2]')
parser.add_argument('--num_channels', default=1, type=int, help='Dimension of the input CEUS frames')
parser.add_argument('--lr_pre', default=0.0002, type=float, help='Initial learning rate [0.0002]')
parser.add_argument('--lr', default=0.0002, type=float, help='Initial learning rate [0.0002]')
parser.add_argument('--num_epochs', default=50, type=int, help='Number of total training epochs [40]')
parser.add_argument('--num_epochs_pre', default=50, type=int, help='Number of total training epochs [40]')
parser.add_argument('--dataset', default='data', type=str, help='Dataset folder name')
args = parser.parse_args()
save_path = os.path.join('checkpoint')
os.makedirs(save_path, exist_ok=True)
layers = [32, 32, 64, 128]
# data load and split
train_loader, val_loader, test_loader = create_dataloader(dataset=args.dataset, batch_size=1, is_pretraining=True)
# stage 1
net = Pretrain_SegmentationNet(n_channels=args.num_channels, n_classes=args.num_classes, layers=layers)
net.cuda()
criterion = smp.losses.DiceLoss('binary', classes=None, log_loss=False, from_logits=True, smooth=0.0,
ignore_index=None, eps=1e-07)
# Optimizer
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr_pre)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9862, last_epoch=-1)
# Parameters
epoch_start = 0
epoch_end = args.num_epochs_pre
print("Start net Pre-Training...")
net = net_Pretrain(net, criterion, optimizer, scheduler, epoch_start, epoch_end, train_loader, val_loader,
save_path)
# stage 2
print("Start DPRAN Training...")
model = DPRAN(n_channels=args.num_channels, n_classes=args.num_classes, layers=layers)
model.encoder_ceus.load_state_dict(net.encoder.state_dict())
model.cuda()
# Optimizer and loss
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9862, last_epoch=-1)
epoch_end = args.num_epochs
train_loader.dataset.is_pretraining = False
val_loader.dataset.is_pretraining = False
test_loader.dataset.is_pretraining = False
test_result, trained_model = DPRAN_Train(model, net, criterion, optimizer, scheduler,
epoch_start, epoch_end, train_loader, val_loader,
test_loader)
torch.save({'test_rec': test_result, 'DpRAN': trained_model, 'Pretrain_SegmentationNet': net.state_dict()},
os.path.join(save_path, 'DpRAN' + '.pt'))
if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
SEED = 0
torch.manual_seed(SEED)
if torch.cuda.is_available():
torch.cuda.manual_seed(SEED)
main()
|
wanpeng16/DpRAN
|
main.py
|
main.py
|
py
| 3,242 |
python
|
en
|
code
| 1 |
github-code
|
6
|
19147556064
|
'''PROGRAM ANALISIS VARIANSI (Rata-Rata n Populasi -- Kalo Variansi sama dari uji
Levene test)'''
import scipy.stats as st
print(" H0 : miu sampe n sama semua")
print(" H1 : Ada miu yang tidak sama\n")
alfa = input("Tingkat Signifikansi : ")
jumlah_populasi = int(input("Jumlah Populasi : "))
data_populasi = [[17.5,16.9,15.8,18.6],[16.4,19.2,17.7,15.4],
[20.3,15.7,17.8,18.9],[14.6,16.7,20.8,18.9],
[17.5,19.2,16.5,20.5],[18.3,16.2,17.5,20.1]]
#data_populasi = [[1.06,0.79,0.82,0.89,1.05,0.95,0.65,1.15,1.12],
# [1.58,1.45,0.57,1.16,1.12,0.91,0.83,0.43],
# [0.29,0.06,0.44,0.55,0.61,0.43,0.51,0.10,0.53,
# 0.34,0.06,0.09,0.17,0.17,0.60]]
#data_populasi = [[42.5,39.3,39.6,39.9,42.9,43.6],
# [39.8,40.1,40.5,42.3,42.5,43.1],
# [40.2,40.5,41.3,43.4,44.9,45.1],
# [41.3,42.2,43.5,44.2,45.9,42.3]]
#data_populasi = []
ukuran_sampel = input("Ukuran sampel sama (Ya/Tidak) : ")
#for i in range(1,jumlah_populasi+1):
# populasi_i = []
# jumlah_data_i = int(input("Jumlah data populasi ke {0} : ".format(i)))
# for j in range(1,jumlah_data_i+1):
# data_j = float(input("Data ke {0} : ".format(j)))
# populasi_i.append(data_j)
# print("\n")
# data_populasi.append(populasi_i)
#JKT, JKK, JKG untuk menentukan f
#=============================================
x_kuadrat = 0
jumlah_nilai_T = []
k = jumlah_populasi
nilai_T_masing_kuadrat = []
N = 0
if ukuran_sampel == "Ya" :
n = len(data_populasi[0])
for i in range (0,jumlah_populasi):
for j in range (0,len(data_populasi[i])):
x_kuadrat += (data_populasi[i][j])**2
# print(x_kuadrat)
jumlah_nilai_i = sum (data_populasi[i])
jumlah_nilai_T.append(jumlah_nilai_i)
nilai_T_masing_kuadrat.append((jumlah_nilai_i)**2)
print("Jumlah nilai Ti. =",jumlah_nilai_T)
jumlah_nilai_T_kuadrat = (sum(jumlah_nilai_T))**2
JKT = x_kuadrat-((jumlah_nilai_T_kuadrat)/(n*k))
print("JKT = :",round(JKT,4))
JKK = (sum(nilai_T_masing_kuadrat)/n) - ((jumlah_nilai_T_kuadrat)/(n*k))
print("JKK = :",round(JKK,4))
print("JKT - JKK = :",round(JKT-JKK,4))
s1_2 = JKK/(k-1)
s2_2 = (JKT-JKK)/(k*(n-1))
print("\nRata-rata jumlah Kuadrat")
print("s1^2 = {0} s^2 = {1}".format(round(s1_2,4),round(s2_2,4)))
F = s1_2 / s2_2
print("F hitung =",round(F,4))
distribusi_f = st.f(k-1,k*(n-1))
x = distribusi_f.ppf(1-float(alfa))
print("F dengan alfa =",str(alfa),",dfn =",str(k-1),",dan dfd =",str(k*(n-1)),"adalah"
,str(round(x,4)))
print("\n")
print(" {0} {1} {2} {3} {4}"
.format("Sumber Variansi","Derajat Bebas",
"Jumlah Kuadrat", "RJK", "Statistik F"))
print("{0} {1} {2} {3} {4}"
.format("AntarMesin(Kolom)",k-1,round(JKK,4),
round(s1_2,4),round(F,4)))
print(" {0} {1} {2} {3}"
.format("Galat",k*(n-1),round(JKT-JKK,4),
round(s2_2,4)))
print(" {0} {1} {2}"
.format("Total",k*(n-1)+k-1,round(JKT,4)))
elif ukuran_sampel == "Tidak" :
for i in range (0,jumlah_populasi):
for j in range (0,len(data_populasi[i])):
x_kuadrat += (data_populasi[i][j])**2
# print(x_kuadrat)
jumlah_nilai_i = sum (data_populasi[i])
jumlah_nilai_T.append(jumlah_nilai_i)
nilai_T_masing_kuadrat.append(((jumlah_nilai_i)**2)/len(data_populasi[i]))
N_jumlah = len(data_populasi[i])
N += N_jumlah
print("Jumlah nilai Ti. =",jumlah_nilai_T)
jumlah_nilai_T_kuadrat = (sum(jumlah_nilai_T))**2
JKT = x_kuadrat-((jumlah_nilai_T_kuadrat)/(N))
print("JKT = :",round(JKT,4))
JKK = sum(nilai_T_masing_kuadrat) - ((jumlah_nilai_T_kuadrat)/(N))
print("JKK = :",round(JKK,4))
print("JKT - JKK = :",round(JKT-JKK,4))
s1_2 = JKK/(k-1)
s2_2 = (JKT-JKK)/(N-k)
print("\nRata-rata jumlah Kuadrat")
print("s1^2 = {0} s^2 = {1}".format(round(s1_2,4),round(s2_2,4)))
F = s1_2 / s2_2
print("F hitung =",round(F,4))
distribusi_f = st.f(k-1,N-k)
x = distribusi_f.ppf(1-float(alfa))
print("F dengan alfa =",str(alfa),",dfn =",str(k-1),",dan dfd =",str(N-k),"adalah"
,str(round(x,4)))
print("\n")
print(" {0} {1} {2} {3} {4}"
.format("Sumber Variansi","Derajat Bebas",
"Jumlah Kuadrat", "RJK", "Statistik F"))
print("{0} {1} {2} {3} {4}"
.format("AntarMesin(Kolom)",k-1,round(JKK,4),
round(s1_2,4),round(F,4)))
print(" {0} {1} {2} {3}"
.format("Galat",N-k,round(JKT-JKK,4),
round(s2_2,4)))
print(" {0} {1} {2}"
.format("Total",N-1,round(JKT,4)))
#print(st.f.sf(2,2,12)) #p_value langsung ini bisa
print("\n")
if F > float(round(x,4)) :
print("Karena F hitung > F tabel, H0 ditolak")
else :
print("Karena F hitung < F tabel, H0 tidak dapat ditolak")
'''RUMUS CEPAT'''
#print(st.f_oneway(data_populasi[0],data_populasi[1],data_populasi[2]))
|
fstevenm/Project-Statistics
|
Statistic Method/Analisis Variansi Satu Arah.py
|
Analisis Variansi Satu Arah.py
|
py
| 5,809 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35841585660
|
# from time import sleep
import os
# from reply1024 import postreply1024
# import time
from datetime import datetime, timedelta
tday = datetime.now()+timedelta(hours = 8)
print(tday.hour)
tday.strftime("%Y-%m-%d %H:%M:%S")
print(tday)
if os.path.isdir("tmp")==0:
os.mkdir("tmp")
with open("./tmp/test.txt","r+") as f:
# f.write(f"{tday} 成功添加一行内容\n")
con=f.read()
a=60
f.seek(0)
f.truncate()
f.write(f"{int(con)+a}")
# print(f"文件关闭了吗:{f.closed}")
with open("./tmp/test.txt") as f:
con=f.read()
# print(f"文件关闭了吗:{f.closed}")
print("读取内容:",con)
|
Mmingdev/reply-1024
|
test.py
|
test.py
|
py
| 630 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8855989929
|
# Importamos Pillow
from PIL import Image
import glob
# Importamos Pandas
import pandas as pd
import csv
# TIME
import time
import datetime
from time import gmtime, strftime
# Importamos Pytesseract
import pytesseract
import os
path = "./output/media"
for root,dirs,files in os.walk(path):
for infile in [f for f in files if f.lower().endswith('.jpg')]:
file, ext = os.path.splitext(infile)
full_path = os.path.join(root,infile)
a = root[15:]
b = full_path[full_path.rfind("/")+1:]
print ("-------------------------------------------------------")
try:
img = Image.open(full_path)
texto = pytesseract.image_to_string(img)
if len(texto) is 0:
c = 'none'
else:
txt = texto.replace("\n"," ")
c = txt
row = [a,b,c]
except:
print ("Lo siento, no es una imagen legible")
c = 'No legible'
row = [a,b,c]
with open('./output/media/data_ocr.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
print (row)
writer.writerow(row)
csvFile.close()
df = pd.read_csv("./output/media/data_ocr.csv", sep=',')
print (df)
|
bisite/Telegram-History-dump
|
telegram/img_ocr.py
|
img_ocr.py
|
py
| 1,331 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29790002949
|
users={}
name = input("What is your Name : ")
age = input("What is your age : ")
fav_movies=input("Enter your fav movies seprated by , ").split(',')
fav_songs=input("Enter your fav songs seprated by , ").split(',')
users['name']=name
users['age']=age
users['fav_movies']=fav_movies
users['fav_songs']=fav_songs
print(users)
#To print in different lines
for key, value in users.items():
print(f'{key} : {value}')
|
chiragkuk/Learningpython
|
Chapter7/exercise2.py
|
exercise2.py
|
py
| 424 |
python
|
en
|
code
| 1 |
github-code
|
6
|
28026928602
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 11:31:56 2020
@author: admin
"""
from __future__ import print_function
import os, sys, time, argparse
from sensapex import SensapexDevice, UMP, UMPError
parser = argparse.ArgumentParser(
description="Test for sensapex devices; prints position and status updates continuously.")
parser.add_argument('--group', type=int, default=0, help="Device group number")
args = parser.parse_args()
ump = UMP.get_ump(group=args.group)
devids = ump.list_devices()
devs = {i:SensapexDevice(i) for i in devids}
print("SDK version:", ump.sdk_version())
print("Found device IDs:", devids)
def print_pos(timeout=None):
line = ""
for i in devids:
dev = devs[i]
try:
pos = str(dev.get_pos(timeout=timeout))
except Exception as err:
pos = str(err.args[0])
pos = pos + " " * (30 - len(pos))
line += "%d: %s" % (i, pos)
print(line)
t = time.time()
while True:
t1 = time.time()
dt = t1 - t
t = t1
line = "%3f" % dt
for id in sorted(list(devs.keys())):
line += " %d: %s busy: %s" % (id, devs[id].get_pos(timeout=0), devs[id].is_busy())
line += " \r"
print(line, end=" ")
sys.stdout.flush()
time.sleep(0.01)
|
bsbrl/Amey_microinjection
|
Sensapex_Manipulator/test.py
|
test.py
|
py
| 1,293 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35327101756
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 14 15:00:34 2017
@author: rsotoc
"""
import numpy as np
import pandas as pd
import time
import re
import nltk
from nltk.corpus import stopwords
from nltk.util import ngrams
from sklearn.model_selection import train_test_split
from bs4 import BeautifulSoup
from sklearn.naive_bayes import MultinomialNB
def document_features_ngrams(document, global_features):
features = [0] * len(global_features)
nd = len(document)
for elem in document:
if elem in global_features:
tf = document.freq(elem) / nd
index, idf = global_features[elem]
if ( idf > 0 ):
features[index] = tf * idf
return features
# ------------------------------------------------------------------
movies_reviews = pd.read_csv("Movies Reviews/labeledTrainData.tsv", sep='\t')
# Limpiar los documentos. Conservar sólo plabras (alfabéticas) y pasar a minúsculas
movies_reviews.review = list(map(lambda row: re.sub("[^a-zA-Z]", " ",
BeautifulSoup(row, "lxml").get_text().lower()),
movies_reviews.review))
# Agregar una columna con la conversión de mensajes a listas de palabras
# Sin eliminar las palabras vacías
movies_reviews["words"] = list(map(lambda row: row.split(), movies_reviews.review))
corpus_len = len(movies_reviews.words)
# Agregar una columna con la conversión de mensajes a listas de palabras
# Se eliminan las palabras vacías
stop_words = set(stopwords.words("english"))
most_common_words = nltk.FreqDist(w for wl in movies_reviews.words for w in wl)
#my_stop_words = [ w for (w,f) in most_common_words.most_common(15)]
movies_reviews["words"] = list(map(lambda row: [w for w in row.split() if not w in stop_words],
movies_reviews.review))
movies_reviews["bigrams"] = list(map(lambda row: list(ngrams(row,2)),
movies_reviews.words))
# Generar un arreglo con los valores de clasificación
Sentiments = np.array([int(x) for x in movies_reviews.sentiment])
#movies_reviews["trigrams"] = list(map(lambda row: list(ngrams(row,3)),
# movies_reviews.words))
#words_frq = nltk.FreqDist(w.lower() for wl in movies_reviews.words for w in wl
# ).most_common(4000)
bigrams_frq = nltk.FreqDist(w for wl in movies_reviews.bigrams for w in wl
).most_common(4000)
print("empezando")
start_time = time.time()
bag_idf = {}
for i, (elem, f) in zip (range(len(bigrams_frq)), bigrams_frq):
nt = 0
for row in movies_reviews.bigrams:
if elem in row:
nt += 1
bag_idf[elem] = (i, np.log(corpus_len / nt))
featuresets_bigrams = [
document_features_ngrams(nltk.FreqDist(w for w in d),
bag_idf) for d in movies_reviews["bigrams"]]
#trigrams_frq = nltk.FreqDist(w for wl in movies_reviews.trigrams for w in wl
# ).most_common(4000)
#featuresets_words = [
# document_features_ngrams(d, words_frq) for d in movies_reviews["words"]]
#
#bag_dict = {}
#for i, (elem, f) in zip (range(len(bigrams_frq)), bigrams_frq):
# bag_dict[elem] = (i, float(f)/word_bag_len)
#featuresets_bigrams = [
# document_features_ngrams(nltk.FreqDist(d), bigrams_frq)
# for d in movies_reviews["bigrams"]]
#featuresets_trigrams = [
# document_features_ngrams(nltk.FreqDist(d), trigrams_frq)
# for d in movies_reviews["trigrams"]]
elapsed_time = time.time() - start_time
#for i in range(100):
# print(sum(x > 0 for x in featuresets_bigrams[i]))
bigrams_train, bigrams_test, biy_train, biy_test = train_test_split(
featuresets_bigrams, Sentiments, test_size=0.1)
# Entrenamiento de un clasificador Multinomial Bayes ingenuo
clfM = MultinomialNB()
clfM.fit(bigrams_train, biy_train)
print(elapsed_time)
# Pruebas del clasificador
predictions_train = clfM.predict(bigrams_train)
fails_train = sum(biy_train != predictions_train)
print("Puntos mal clasificados en el conjunto de entrenamiento: {} de {} ({}%)\n"
.format(fails_train, len(bigrams_train), 100*fails_train/len(bigrams_train)))
predictions_test = clfM.predict(bigrams_test)
fails_test = sum(biy_test != predictions_test)
print("Puntos mal clasificados en el conjunto de prueba: {} de {} ({}%)\n"
.format(fails_test, len(bigrams_test), 100*fails_test/len(bigrams_test)))
|
rsotoc/pattern-recognition
|
Data sets/ngrams.py
|
ngrams.py
|
py
| 4,516 |
python
|
en
|
code
| 14 |
github-code
|
6
|
72143905788
|
from typing import Dict, List, Union
import csv
import os
from unittest import result
def load_csv(
file_path: str,
delimiter: str = ',',
has_header: bool = True,
try_casting: bool = True
) -> List[Dict]:
'''
This function laods a csv file from the given path. It accepts both csv with headers and without them.
Args:
file_path: (str) the path to the given csv file.
delimiter: (str) the string delimiter between columns of the csv.
has_headers: (bool) flag to indicate if the file has headers. [Default True]
Output:
Returns a List of dictionaries representing each row. The keys of each dictionary ar the
column name.
Throws:
- FileNotFoundError
'''
if not os.path.exists(file_path):
print(f'The path {file_path} does not exists!')
raise FileNotFoundError
results = []
with open(file_path, 'r') as f:
csv_reader = csv.reader(f, delimiter=delimiter)
if has_header:
headers = next(csv_reader)
for row in csv_reader:
if try_casting:
mapped_row = list(map(lambda item: cast_to_num(item), row))
else:
mapped_row = row
new_row = { key : item for key, item in zip(headers, mapped_row)}
results.append(new_row)
return results
def cast_to_num(value: str) -> Union[str, int, float]:
int_val = None
float_val = None
try:
int_val = int(value)
except ValueError:
try:
float_val = float(value)
except ValueError:
pass
if int_val is not None:
return int_val
if float_val is not None:
return float_val
return value
|
levensworth/udesa-pc-tutorial
|
mini-proyectos/song_recommendation/text_processing.py
|
text_processing.py
|
py
| 1,864 |
python
|
en
|
code
| 2 |
github-code
|
6
|
70756306748
|
__author__ = 'jacopobacchi'
import RPi.GPIO as GPIO
import time
import shlex
import subprocess
# Define Buttons
PREV = 4
NEXT = 17
PLAY = 27
REPEAT = 22
GPIO.setmode(GPIO.BCM)
GPIO.setup(PREV, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(NEXT, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(PLAY, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(REPEAT, GPIO.IN, pull_up_down=GPIO.PUD_UP)
prev_input = 0
next_input = 0
play_input = 0
repeat_input = 0
play = 1
while True:
#take a reading
input1 = GPIO.input(PREV)
#if the last reading was low and this one high, print
if ((not prev_input) and input):
print("Button PREV pressed")
cmd = "mpc prev"
args = shlex.split(cmd)
output = subprocess.Popen(args)
#update previous input
prev_input = input1
time.sleep(0.05)
#take a reading
input2 = GPIO.input(NEXT)
#if the last reading was low and this one high, print
if ((not next_input) and input):
print("Button NEXT pressed")
cmd = "mpc next"
args = shlex.split(cmd)
output = subprocess.Popen(args)
#update previous input
next_input = input2
time.sleep(0.05)
#take a reading
input3 = GPIO.input(PLAY)
#if the last reading was low and this one high, print
if ((not play_input) and input):
print("Button PLAY pressed")
print(play)
if (play == 1 ):
cmd = "mpc play"
args = shlex.split(cmd)
output = subprocess.Popen(args)
play = 0
else:
cmd = "mpc pause"
args = shlex.split(cmd)
output = subprocess.Popen(args)
play = 1
#update previous input
play_input = input3
time.sleep(0.05)
#take a reading
input4 = GPIO.input(REPEAT)
if ((not repeat_input) and input):
print("Button REPEAT pressed")
cmd = "mpc repeat"
args = shlex.split(cmd)
output = subprocess.Popen(args)
#update previous input
repeat_input = input4
#slight pause to debounce
time.sleep(0.05)
|
jacopobac/PiPod
|
buttonsControl.py
|
buttonsControl.py
|
py
| 1,926 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3035685345
|
# find the minimum length of the subarray whose values sum
# to a value greater than or equal to a target. assume all
# array entries are positive
import numpy as np
vec = np.array([2,3,1,2,4,3])
target = 7
L=0
R=0
min_length = np.inf
cur_length = np.inf
cur_sum = 0
for R,n in enumerate(vec):
cur_sum += n
while cur_sum>=target:
cur_length = R-L+1
cur_sum -= vec[L]
L += 1
if (cur_length<min_length):
min_length=cur_length
if (min_length==np.inf):
print(0)
else:
print(min_length)
|
estimatrixPipiatrix/decision-scientist
|
key_algos/moving_window.py
|
moving_window.py
|
py
| 537 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18200259066
|
''' . Isomorphic Strings
All occurrences of a character must be replaced with another character while preserving the order of characters. No two characters may map to the same character,
but a character may map to itself.
'''
# def reduce(s):
# result = []
# lookup = {}
# for i, c in enumerate(s):
# if c in lookup:
# result[lookup[c]].append(i)
# else:
# result.append([i])
# lookup[c] = len(result)-1
# return result
# class Solution(object):
# def isIsomorphic(self, s, t):
# return reduce(s) == reduce(t)
def isIsomorphic(self, s: str, t: str, second: bool = False) -> bool:
morph = {} # character map
# just checking if morphs are all identical
for i, first in enumerate(s):
if first in morph:
if t[i] != morph[first]:
return False
else:
morph[first] = t[i]
# check the other way round if it's not the second check
return (True if second else self.isIsomorphic(t, s, True))
|
rashmi-fit/100-daysOf-Python_challenge
|
Isomorphic_Strings.py
|
Isomorphic_Strings.py
|
py
| 1,110 |
python
|
en
|
code
| 2 |
github-code
|
6
|
11812609282
|
"""empty message
Revision ID: 08084a992d8b
Revises:
Create Date: 2018-03-23 09:28:07.017990
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '08084a992d8b'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(), nullable=False),
sa.Column('email', sa.String(), nullable=False),
sa.Column('password', sa.String(), nullable=False),
sa.Column('hash_key', sa.String(), nullable=False),
sa.Column('activate', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('hash_key'),
sa.UniqueConstraint('username')
)
op.create_table('businesses',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('logo', sa.String(), nullable=True),
sa.Column('location', sa.String(), nullable=True),
sa.Column('category', sa.String(), nullable=True),
sa.Column('bio', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_businesses_category'), 'businesses', ['category'], unique=False)
op.create_index(op.f('ix_businesses_location'), 'businesses', ['location'], unique=False)
op.create_index(op.f('ix_businesses_name'), 'businesses', ['name'], unique=False)
op.create_table('reviews',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=True),
sa.Column('desc', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('business_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['business_id'], ['businesses.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('reviews')
op.drop_index(op.f('ix_businesses_name'), table_name='businesses')
op.drop_index(op.f('ix_businesses_location'), table_name='businesses')
op.drop_index(op.f('ix_businesses_category'), table_name='businesses')
op.drop_table('businesses')
op.drop_table('users')
# ### end Alembic commands ###
|
victorjambo/WeConnect
|
migrations/versions/08084a992d8b_.py
|
08084a992d8b_.py
|
py
| 2,922 |
python
|
en
|
code
| 2 |
github-code
|
6
|
31066666255
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.loginView, name='login'),
path('register/', views.registerView, name='register'),
path('logout/', views.logoutView, name='logout'),
path('akun/', views.update_akunView, name='update_akun'),
path('register/berhasil', views.berhasilView.as_view(aksi='register_berhasil'), name='register_berhasil'),
path('akun/berhasil', views.berhasilView.as_view(aksi='update_akun_berhasil'), name='update_akun_berhasil'),
]
|
mugiwara35/smart-plant
|
akun/urls.py
|
urls.py
|
py
| 512 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25022233496
|
import sys
sys.stdin = open('input.txt')
T = int(input())
for t in range(T):
N, M, L = map(int, input().split())
tree = [0] * (N+1)
for i in range(M):
node, val = map(int, input().split())
tree[node] = val
for i in range(N, 0, -1):
if tree[i] == 0 and 2*i + 1 < N+1:
tree[i] = tree[2*i] + tree[2 * i + 1]
elif tree[i] == 0:
tree[i] = tree[2*i]
print(f'#{t+1}', tree[L])
|
pepper999/TIL
|
algorithm_lecture/05_tree_Jiheon/5178_sumofnodes/sol.py
|
sol.py
|
py
| 446 |
python
|
en
|
code
| 2 |
github-code
|
6
|
42934168844
|
from pyherc.ports import set_action_factory
from pyherc.data import Model
from pyherc.test.builders import ActionFactoryBuilder
from pyherc.test.cutesy import (Arrows, Bow, Club, Dagger, LeatherArmour,
PlateMail, Rune, ScaleMail, Sword, Warhammer,
LightBoots, HeavyBoots, IronBoots, SpeedBoots,
FlyingBoots)
def default_context(fn):
"""
Decorator to set up context
.. versionadded:: 0.8
"""
def context_setup(*args, **kwargs):
"""
Set up context
"""
context = args[0]
if not hasattr(context, 'model'):
context.model = Model()
if not hasattr(context, 'items'):
context.items = []
if not hasattr(context, 'characters'):
context.characters = []
if not hasattr(context, 'places'):
context.places = []
return fn(*args, **kwargs)
return context_setup
def with_action_factory(fn):
"""
Decorator to inject action factory
.. versionadded:: 0.8
"""
def action_factorize(*args, **kwargs):
"""
Inject action factory
"""
context = args[0]
if not hasattr(context, 'action_factory'):
context.action_factory = (ActionFactoryBuilder()
.with_drink_factory()
.with_inventory_factory()
.build())
set_action_factory(context.action_factory)
return fn(*args, **kwargs)
return action_factorize
def armour_list(fn):
"""
Decorator to set up armour list
.. versionadded:: 0.8
"""
def armour_setup(*args, **kwargs):
"""
Set up armour list
"""
context = args[0]
if not hasattr(context, 'armour_list'):
context.armour_list = {}
context.armour_list['leather armour'] = LeatherArmour
context.armour_list['scale mail'] = ScaleMail
context.armour_list['plate mail'] = PlateMail
return fn(*args, **kwargs)
return armour_setup
def boots_list(fn):
"""
Decorator to set up boots list
"""
def boots_setup(*args, **kwargs):
"""
Setup boots list
"""
context = args[0]
if not hasattr(context, 'boots_list'):
context.boots_list = {}
context.boots_list['light boots'] = LightBoots
context.boots_list['heavy boots'] = HeavyBoots
context.boots_list['iron boots'] = IronBoots
context.boots_list['speed boots'] = SpeedBoots
context.boots_list['flying boots'] = FlyingBoots
return fn(*args, **kwargs)
return boots_setup
def weapon_list(fn):
"""
Decorator to set up weapon list
.. versionadded:: 0.8
"""
def weapon_setup(*args, **kwargs):
"""
Setup weapon list
"""
context = args[0]
if not hasattr(context, 'weapon_list'):
context.weapon_list = {}
context.weapon_list['warhammer'] = Warhammer
context.weapon_list['sword'] = Sword
context.weapon_list['dagger'] = Dagger
context.weapon_list['club'] = Club
context.weapon_list['bow'] = Bow
context.weapon_list['arrows'] = Arrows
return fn(*args, **kwargs)
return weapon_setup
def misc_item_list(fn):
"""
Decorator to set up misc items list
.. versionadded:: 0.10
"""
def misc_item_setup(*args, **kwargs):
"""
Setup misc items list
"""
context = args[0]
if not hasattr(context, 'misc_item_list'):
context.misc_item_list = {}
context.misc_item_list['rune'] = Rune
return fn(*args, **kwargs)
return misc_item_setup
def get_character(context, character_name):
"""
Get character from context
:param context: context
:param character_name: name of character
:type character_name: string
.. versionadded:: 0.8
"""
characters = [x for x in context.characters
if x.name == character_name]
return characters[0]
def get_location(context, location_name):
"""
Get location from context
:param context: context
:param location_name: name of location
:type location_name: string
.. versionadded:: 0.8
"""
locations = [x for x in context.places
if ((hasattr(x, 'name') and x.name == location_name)
or (hasattr(x, 'keys') and x['name'] == location_name))]
return locations[0]
def get_item(context, item_name):
"""
Get item from context
:param context: context
:param item_name: name of item
:type item_name: string
.. versionadded:: 0.8
"""
items = [x for x in context.items
if x.name == item_name]
return items[0]
def get_entity(context, entity_name):
"""
Get entity from context
:param context: context
:param entity_name: name of entity
:type entity_name: string
.. versionadded:: 0.8
"""
entities = []
entities.extend(context.characters)
entities.extend(context.items)
entity = [x for x in entities
if x.name == entity_name]
if entity:
return entity[0]
else:
return get_location(context, entity_name)
|
tuturto/pyherc
|
src/pyherc/test/bdd/features/helpers/context.py
|
context.py
|
py
| 5,476 |
python
|
en
|
code
| 43 |
github-code
|
6
|
33250585794
|
#Dependencies
import os
import csv
csvpath = os.path.join('..', 'Resources', 'election_data.csv')
# Open the file using "write" mode. Specify the variable to hold the contents
with open(csvpath, newline='') as csvfile:
#set variable
vote_count= []
candidatelist = []
unique_candidate= []
vote_percent= []
count=0
# Initialize csv.writer
csvreader = csv.reader(csvfile, delimiter=',')
csv_header = next(csvreader)
print(csvreader)
for row in csvreader:
vote_count.append(row[1])
candidatelist.append(row[2])
count= count + 1
for x in set(candidatelist):
unique_candidate.append(x)
y= candidatelist.count(x)
vote_count.append(y)
a= (y/count) *100
vote_percent.append(a)
print("Election Results")
print("-------------------")
print(f"Total Votes: {len(vote_count)}")
for i in range(len(unique_candidate)):
print(unique_candidate[i])
print(str(vote_percent[i]) + "%")
text_file = open("main.txt","w",newline='')
text_file.write(("Finacial Analysis"))
text_file.write("--------------------")
text_file.write(f"Total Votes: {len(vote_count)}")
for i in range(len(unique_candidate)):
text_file.write(unique_candidate[i])
text_file.write(str(vote_percent[i]))
text_file.close()
|
beau-nguyen/Python_Challenge
|
PyPoll/Solved/main.py
|
main.py
|
py
| 1,385 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39914923603
|
import cv2
import pickle
import numpy as np
import random
import threading
import warnings
from ..utils.image import read_image_bgr
import numpy as np
from PIL import Image
from six import raise_from
import csv
import sys
import os.path
import keras
from ..utils.anchors import (
anchor_targets_bbox_centers,
anchor_targets_bbox,
anchors_for_shape,
guess_shapes
)
from ..utils.image import (
TransformParameters,
adjust_transform_for_image,
apply_transform,
preprocess_image,
resize_image,
)
from ..utils.transform import transform_aabb, random_transform_generator
class Centers_Generator(object):
def __init__(
self,
pairs,
BCS_path,
BoxCars_dataset,
BoxCars_images,
BCS_sessions=range(4),
no_centers=False,
fake_centers=False,
split_exclusion_function=None,
batch_size=1,
group_method='random', # one of 'none', 'random', 'ratio'
shuffle_groups=True,
image_min_side=400,
image_max_side=600,
transform_list=None,
transform_parameters=None,
compute_anchor_targets=anchor_targets_bbox_centers,
compute_shapes=guess_shapes,
preprocess_image=preprocess_image
):
""" Initialize Generator object.
Args
transform_generator : A generator used to randomly transform images and annotations.
batch_size : The size of the batches to generate.
group_method : Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).
shuffle_groups : If True, shuffles the groups each epoch.
image_min_side : After resizing the minimum side of an image is equal to image_min_side.
image_max_side : If after resizing the maximum side is larger than image_max_side, scales down further so that the max side is equal to image_max_side.
transform_parameters : The transform parameters used for data augmentation.
compute_anchor_targets : Function handler for computing the targets of anchors for an image and its annotations.
compute_shapes : Function handler for computing the shapes of the pyramid for a given input.
preprocess_image : Function handler for preprocessing an image (scaling / normalizing) for passing through a network.
"""
self.image_names = []
self.image_data = {}
self.classes = {'car': 0}
# Take base_dir from annotations file if not explicitly specified.
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
self.fake_centers = fake_centers
self.no_centers = no_centers
self.split_exclusion_function = split_exclusion_function
self.image_data = {}
self.transform_indices = []
for pair in pairs:
if BoxCars_dataset is not None:
self.dataset_name = 'BoxCars'
self.parse_BoxCars(BoxCars_dataset.format(pair), BoxCars_images.format(pair))
for i in BCS_sessions:
self.dataset_name = 'BCS'
ds_path = os.path.join(BCS_path.format(pair), 'dataset_{}.pkl'.format(i))
im_path = os.path.join(BCS_path.format(pair), 'images_{}'.format(i))
self.parse_BCS(dataset_path=ds_path, images_path=im_path)
print("Generator size: {}".format(self.size()))
# self.transform_generator = transform_generator
self.batch_size = int(batch_size)
self.group_method = group_method
self.shuffle_groups = shuffle_groups
self.image_min_side = image_min_side
self.image_max_side = image_max_side
if transform_parameters is not None:
self.transform_list = transform_list
else:
self.transform_list = [
random_transform_generator(
min_translation=(-0.4, -0.4),
max_translation=(0.4, 0.4),
min_scaling=(0.9, 0.9),
max_scaling=(2.0, 2.0),
flip_x_chance=0.5
),
random_transform_generator(
min_translation=(-0.5, -0.5),
max_translation=(0.5, 0.5),
min_scaling=(0.03, 0.03),
max_scaling=(1.0, 1.0),
flip_x_chance=0.5
),
]
self.transform_parameters = transform_parameters or TransformParameters(fill_mode='constant')
if self.no_centers:
self.compute_anchor_targets = anchor_targets_bbox
else:
self.compute_anchor_targets = compute_anchor_targets
self.compute_shapes = compute_shapes
self.preprocess_image = preprocess_image
self.group_index = 0
self.lock = threading.Lock()
self.group_images()
def size(self):
""" Size of the dataset.
"""
return len(self.image_names)
def num_classes(self):
""" Number of classes in the dataset.
"""
return max(self.classes.values()) + 1
def name_to_label(self, name):
""" Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
""" Map label to name.
"""
return self.labels[label]
def image_path(self, image_index):
""" Returns the image path for image_index.
"""
return self.image_names[image_index]
def image_aspect_ratio(self, image_index):
""" Compute the aspect ratio for an image with image_index.
"""
# PIL is fast for metadata
image = Image.open(self.image_path(image_index))
return float(image.width) / float(image.height)
def load_image(self, image_index):
""" Load an image at the image_index.
"""
return read_image_bgr(self.image_path(image_index))
def load_annotations(self, image_index):
""" Load annotations for an image_index.
"""
path = self.image_names[image_index]
annots = self.image_data[path]
if self.no_centers:
boxes = np.zeros((len(annots), 5))
else:
boxes = np.zeros((len(annots), 6))
for idx, annot in enumerate(annots):
class_name = annot['class']
boxes[idx, 0] = float(annot['x1'])
boxes[idx, 1] = float(annot['y1'])
boxes[idx, 2] = float(annot['x2'])
boxes[idx, 3] = float(annot['y2'])
boxes[idx, 4] = self.name_to_label(class_name)
if not self.no_centers:
boxes[idx, 5] = float(annot['c'])
return boxes
def load_transform_indices(self, group):
return [self.transform_indices[index] for index in group]
def load_annotations_group(self, group):
""" Load annotations for all images in group.
"""
return [self.load_annotations(image_index) for image_index in group]
def filter_annotations(self, image_group, annotations_group, group):
""" Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
assert (isinstance(annotations,
np.ndarray)), '\'load_annotations\' should return a list of numpy arrays, received: {}'.format(
type(annotations))
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
invalid_indices = np.where(
(annotations[:, 2] <= annotations[:, 0]) |
(annotations[:, 3] <= annotations[:, 1]) |
(annotations[:, 0] < 0) |
(annotations[:, 1] < 0) |
(annotations[:, 2] > image.shape[1]) |
(annotations[:, 3] > image.shape[0])
)[0]
# delete invalid indices
if len(invalid_indices):
# cv2.imwrite("ID_.png".format(group[index]), image)
# warnings.warn("Following warning happens in:{}".format(self.dataset_name))
# warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(
# group[index],
# image.shape,
# [annotations[invalid_index, :] for invalid_index in invalid_indices]
# ))
annotations_group[index] = np.delete(annotations, invalid_indices, axis=0)
return image_group, annotations_group
def load_image_group(self, group):
""" Load images for all images in a group.
"""
return [self.load_image(image_index) for image_index in group]
def random_transform_group_entry(self, image, annotations, transform_index):
""" Randomly transforms image and annotation.
"""
# randomly transform both image and annotations
transform_generator = self.transform_list[transform_index]
if transform_generator:
transform = adjust_transform_for_image(next(transform_generator), image,
self.transform_parameters.relative_translation)
image = apply_transform(transform, image, self.transform_parameters)
# Transform the bounding boxes in the annotations.
annotations = annotations.copy()
for index in range(annotations.shape[0]):
annotations[index, :4] = transform_aabb(transform, annotations[index, :4])
return image, annotations
def resize_image(self, image):
""" Resize an image using image_min_side and image_max_side.
"""
return resize_image(image, min_side=self.image_min_side, max_side=self.image_max_side)
def preprocess_group_entry(self, image, annotations, transform_index):
""" Preprocess image and its annotations.
"""
# preprocess the image
image = self.preprocess_image(image)
# randomly transform image and annotations
image, annotations = self.random_transform_group_entry(image, annotations, transform_index)
# resize image
image, image_scale = self.resize_image(image)
# apply resizing to annotations too
annotations[:, :4] *= image_scale
return image, annotations
def preprocess_group(self, image_group, annotations_group, transform_indices):
""" Preprocess each image and its annotations in its group.
"""
for index, (image, annotations, transform_index) in enumerate(
zip(image_group, annotations_group, transform_indices)):
# preprocess a single group entry
image, annotations = self.preprocess_group_entry(image, annotations, transform_index)
# copy processed data back to group
image_group[index] = image
annotations_group[index] = annotations
return image_group, annotations_group
def group_images(self):
""" Order the images according to self.order and makes groups of self.batch_size.
"""
# determine the order of the images
order = list(range(self.size()))
if self.group_method == 'random':
random.shuffle(order)
elif self.group_method == 'ratio':
order.sort(key=lambda x: self.image_aspect_ratio(x))
# divide into groups, one group = one batch
self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in
range(0, len(order), self.batch_size)]
def compute_inputs(self, image_group):
""" Compute inputs for the network using an image_group.
"""
# get the max image shape
max_shape = tuple(max(image.shape[x] for image in image_group) for x in range(3))
# construct an image batch object
image_batch = np.zeros((self.batch_size,) + max_shape, dtype=keras.backend.floatx())
# copy all images to the upper left part of the image batch object
for image_index, image in enumerate(image_group):
image_batch[image_index, :image.shape[0], :image.shape[1], :image.shape[2]] = image
return image_batch
def generate_anchors(self, image_shape):
return anchors_for_shape(image_shape, shapes_callback=self.compute_shapes)
def compute_targets(self, image_group, annotations_group):
""" Compute target outputs for the network using images and their annotations.
"""
# get the max image shape
max_shape = tuple(max(image.shape[x] for image in image_group) for x in range(3))
anchors = self.generate_anchors(max_shape)
if self.no_centers:
labels_batch, regression_batch, _ = self.compute_anchor_targets(
anchors,
image_group,
annotations_group,
self.num_classes()
)
return [regression_batch, labels_batch]
else:
labels_batch, regression_batch, centers_batch, _ = self.compute_anchor_targets(
anchors,
image_group,
annotations_group,
self.num_classes()
)
return [regression_batch, labels_batch, centers_batch]
def compute_input_output(self, group):
""" Compute inputs and target outputs for the network.
"""
# load images and annotations
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# perform preprocessing steps
transform_indices = self.load_transform_indices(group)
image_group, annotations_group = self.preprocess_group(image_group, annotations_group, transform_indices)
# compute network inputs
inputs = self.compute_inputs(image_group)
# compute network targets
targets = self.compute_targets(image_group, annotations_group)
return inputs, targets
def __next__(self):
return self.next()
def next(self):
# advance the group index
with self.lock:
if self.group_index == 0 and self.shuffle_groups:
# shuffle groups at start of epoch
random.shuffle(self.groups)
group = self.groups[self.group_index]
self.group_index = (self.group_index + 1) % len(self.groups)
return self.compute_input_output(group)
def parse_BCS(self, dataset_path, images_path):
with open(dataset_path, "rb") as f:
ds = pickle.load(f, encoding='latin-1', fix_imports=True)
for i, entry in enumerate(ds):
filename = os.path.join(images_path, entry['filename'])
if self.split_exclusion_function is not None:
if self.split_exclusion_function(filename):
continue
if filename not in self.image_data:
self.image_data[filename] = []
self.image_names.append(filename)
self.transform_indices.append(0)
if self.no_centers:
for label in entry['labels']:
dict = {'x1': label['x_min'], 'x2': label['x_max'],
'y1': label['y_min'], 'y2': label['y_max'],
'class': 'car'}
self.image_data[filename].append(dict)
elif self.fake_centers:
for label in entry['labels']:
dict = {'x1': label['x_min'], 'x2': label['x_max'],
'y1': label['y_min'], 'y2': label['y_max'],
'c': 0.0, 'class': 'car'}
self.image_data[filename].append(dict)
else:
for label in entry['labels']:
dict = {'x1': label['x_min'], 'x2': label['x_max'],
'y1': label['y_min'], 'y2': label['y_max'],
'c': label['centery'], 'class': 'car'}
self.image_data[filename].append(dict)
def parse_BoxCars(self, dataset_path, images_path):
with open(dataset_path, "rb") as f:
ds = pickle.load(f, encoding='latin-1', fix_imports=True)
for sample in ds['samples']:
# to_camera = sample['to_camera']
for i_id, instance in enumerate(sample['instances']):
filename = os.path.join(images_path, instance['filename'])
if filename not in self.image_data:
self.image_data[filename] = []
self.image_names.append(filename)
self.transform_indices.append(1)
if self.no_centers:
dict = {'x1': instance['bb_out']['x_min'], 'x2': instance['bb_out']['x_max'],
'y1': instance['bb_out']['y_min'], 'y2': instance['bb_out']['y_max'],
'class': 'car'}
else:
if self.fake_centers:
centery = 0.0
else:
centery = (instance['bb_in']['y_min'] - instance['bb_out']['y_min']) / \
(instance['bb_out']['y_max'] - instance['bb_out']['y_min'])
dict = {'x1': instance['bb_out']['x_min'], 'x2': instance['bb_out']['x_max'],
'y1': instance['bb_out']['y_min'], 'y2': instance['bb_out']['y_max'],
'c': centery, 'class': 'car'}
self.image_data[filename].append(dict)
|
kocurvik/retinanet_traffic_3D
|
keras_retinanet/preprocessing/centers_generator.py
|
centers_generator.py
|
py
| 18,123 |
python
|
en
|
code
| 24 |
github-code
|
6
|
35395933423
|
import functools
import ipaddress
import re
import socket
from pathlib import Path, PurePath
from random import SystemRandom
from types import TracebackType
from typing import Any, AsyncContextManager, Awaitable, Callable, Dict
from typing import Generator, Generic, IO, Mapping, Optional, Sequence
from typing import Tuple, Type, TypeVar, Union, cast, overload
from typing_extensions import Literal, Protocol
from .constants import DEFAULT_LANG
from .constants import DISC_COMPRESSION_ERROR, DISC_CONNECTION_LOST
from .constants import DISC_HOST_KEY_NOT_VERIFIABLE, DISC_ILLEGAL_USER_NAME
from .constants import DISC_KEY_EXCHANGE_FAILED, DISC_MAC_ERROR
from .constants import DISC_NO_MORE_AUTH_METHODS_AVAILABLE
from .constants import DISC_PROTOCOL_ERROR, DISC_PROTOCOL_VERSION_NOT_SUPPORTED
from .constants import DISC_SERVICE_NOT_AVAILABLE
class _Hash(Protocol):
"""Protocol for hashing data"""
@property
def digest_size(self) -> int:
"""Return the hash digest size"""
@property
def block_size(self) -> int:
"""Return the hash block size"""
@property
def name(self) -> str:
"""Return the hash name"""
def digest(self) -> bytes:
"""Return the digest value as a bytes object"""
def hexdigest(self) -> str:
"""Return the digest value as a string of hexadecimal digits"""
def update(self, __data: bytes) -> None:
"""Update this hash object's state with the provided bytes"""
class HashType(Protocol):
"""Protocol for returning the type of a hash function"""
def __call__(self, __data: bytes = ...) -> _Hash:
"""Create a new hash object"""
class _SupportsWaitClosed(Protocol):
"""A class that supports async wait_closed"""
async def wait_closed(self) -> None:
"""Wait for transport to close"""
_T = TypeVar('_T')
DefTuple = Union[Tuple[()], _T]
MaybeAwait = Union[_T, Awaitable[_T]]
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
OptExcInfo = Union[ExcInfo, Tuple[None, None, None]]
BytesOrStr = Union[bytes, str]
FilePath = Union[str, PurePath]
HostPort = Tuple[str, int]
IPAddress = Union[ipaddress.IPv4Address, ipaddress.IPv6Address]
IPNetwork = Union[ipaddress.IPv4Network, ipaddress.IPv6Network]
SockAddr = Union[Tuple[str, int], Tuple[str, int, int, int]]
# Define a version of randrange which is based on SystemRandom(), so that
# we get back numbers suitable for cryptographic use.
_random = SystemRandom()
randrange = _random.randrange
_unit_pattern = re.compile(r'([A-Za-z])')
_byte_units = {'': 1, 'k': 1024, 'm': 1024*1024, 'g': 1024*1024*1024}
_time_units = {'': 1, 's': 1, 'm': 60, 'h': 60*60,
'd': 24*60*60, 'w': 7*24*60*60}
def hide_empty(value: object, prefix: str = ', ') -> str:
"""Return a string with optional prefix if value is non-empty"""
value = str(value)
return prefix + value if value else ''
def plural(length: int, label: str, suffix: str = 's') -> str:
"""Return a label with an optional plural suffix"""
return '%d %s%s' % (length, label, suffix if length != 1 else '')
def all_ints(seq: Sequence[object]) -> bool:
"""Return if a sequence contains all integers"""
return all(isinstance(i, int) for i in seq)
def get_symbol_names(symbols: Mapping[str, int], prefix: str,
strip_leading: int = 0) -> Mapping[int, str]:
"""Return a mapping from values to symbol names for logging"""
return {value: name[strip_leading:] for name, value in symbols.items()
if name.startswith(prefix)}
# Punctuation to map when creating handler names
_HANDLER_PUNCTUATION = (('@', '_at_'), ('.', '_dot_'), ('-', '_'))
def map_handler_name(name: str) -> str:
"""Map punctuation so a string can be used as a handler name"""
for old, new in _HANDLER_PUNCTUATION:
name = name.replace(old, new)
return name
def _normalize_scoped_ip(addr: str) -> str:
"""Normalize scoped IP address
The ipaddress module doesn't handle scoped addresses properly,
so we normalize scoped IP addresses using socket.getaddrinfo
before we pass them into ip_address/ip_network.
"""
try:
addrinfo = socket.getaddrinfo(addr, None, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM,
flags=socket.AI_NUMERICHOST)[0]
except socket.gaierror:
return addr
if addrinfo[0] == socket.AF_INET6:
sa = addrinfo[4]
addr = sa[0]
idx = addr.find('%')
if idx >= 0: # pragma: no cover
addr = addr[:idx]
ip = ipaddress.ip_address(addr)
if ip.is_link_local:
scope_id = cast(Tuple[str, int, int, int], sa)[3]
addr = str(ipaddress.ip_address(int(ip) | (scope_id << 96)))
return addr
def ip_address(addr: str) -> IPAddress:
"""Wrapper for ipaddress.ip_address which supports scoped addresses"""
return ipaddress.ip_address(_normalize_scoped_ip(addr))
def ip_network(addr: str) -> IPNetwork:
"""Wrapper for ipaddress.ip_network which supports scoped addresses"""
idx = addr.find('/')
if idx >= 0:
addr, mask = addr[:idx], addr[idx:]
else:
mask = ''
return ipaddress.ip_network(_normalize_scoped_ip(addr) + mask)
def open_file(filename: FilePath, mode: str, buffering: int = -1) -> IO[bytes]:
"""Open a file with home directory expansion"""
return open(Path(filename).expanduser(), mode, buffering=buffering)
@overload
def read_file(filename: FilePath) -> bytes:
"""Read from a binary file with home directory expansion"""
@overload
def read_file(filename: FilePath, mode: Literal['rb']) -> bytes:
"""Read from a binary file with home directory expansion"""
@overload
def read_file(filename: FilePath, mode: Literal['r']) -> str:
"""Read from a text file with home directory expansion"""
def read_file(filename, mode = 'rb'):
"""Read from a file with home directory expansion"""
with open_file(filename, mode) as f:
return f.read()
def write_file(filename: FilePath, data: bytes, mode: str = 'wb') -> int:
"""Write or append to a file with home directory expansion"""
with open_file(filename, mode) as f:
return f.write(data)
def _parse_units(value: str, suffixes: Mapping[str, int], label: str) -> float:
"""Parse a series of integers followed by unit suffixes"""
matches = _unit_pattern.split(value)
if matches[-1]:
matches.append('')
else:
matches.pop()
try:
return sum(float(matches[i]) * suffixes[matches[i+1].lower()]
for i in range(0, len(matches), 2))
except KeyError:
raise ValueError('Invalid ' + label) from None
def parse_byte_count(value: str) -> int:
"""Parse a byte count with optional k, m, or g suffixes"""
return int(_parse_units(value, _byte_units, 'byte count'))
def parse_time_interval(value: str) -> float:
"""Parse a time interval with optional s, m, h, d, or w suffixes"""
return _parse_units(value, _time_units, 'time interval')
_ACM = TypeVar('_ACM', bound=AsyncContextManager, covariant=True)
class _ACMWrapper(Generic[_ACM]):
"""Async context manager wrapper"""
def __init__(self, coro: Awaitable[_ACM]):
self._coro = coro
self._coro_result: Optional[_ACM] = None
def __await__(self) -> Generator[Any, None, _ACM]:
return self._coro.__await__()
async def __aenter__(self) -> _ACM:
self._coro_result = await self._coro
return await self._coro_result.__aenter__()
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType]) -> Optional[bool]:
assert self._coro_result is not None
exit_result = await self._coro_result.__aexit__(
exc_type, exc_value, traceback)
self._coro_result = None
return exit_result
_ACMCoro = Callable[..., Awaitable[_ACM]]
_ACMWrapperFunc = Callable[..., _ACMWrapper[_ACM]]
def async_context_manager(coro: _ACMCoro[_ACM]) -> _ACMWrapperFunc[_ACM]:
"""Decorator for functions returning asynchronous context managers
This decorator can be used on functions which return objects
intended to be async context managers. The object returned by
the function should implement __aenter__ and __aexit__ methods
to run when the async context is entered and exited.
This wrapper also allows the use of "await" on the function being
decorated, to return the context manager without entering it.
"""
@functools.wraps(coro)
def context_wrapper(*args, **kwargs) -> _ACMWrapper[_ACM]:
"""Return an async context manager wrapper for this coroutine"""
return _ACMWrapper(coro(*args, **kwargs))
return context_wrapper
async def maybe_wait_closed(writer: '_SupportsWaitClosed') -> None:
"""Wait for a StreamWriter to close, if Python version supports it
Python 3.8 triggers a false error report about garbage collecting
an open stream if a close is in progress when a StreamWriter is
garbage collected. This can be avoided by calling wait_closed(),
but that method is not available in Python releases prior to 3.7.
This function wraps this call, ignoring the error if the method
is not available.
"""
try:
await writer.wait_closed()
except AttributeError: # pragma: no cover
pass
class Options:
"""Container for configuration options"""
kwargs: Dict[str, object]
def __init__(self, options: Optional['Options'] = None, **kwargs: object):
if options:
if not isinstance(options, type(self)):
raise TypeError('Invalid %s, got %s' %
(type(self).__name__, type(options).__name__))
self.kwargs = options.kwargs.copy()
else:
self.kwargs = {}
self.kwargs.update(kwargs)
self.prepare(**self.kwargs)
def prepare(self, **kwargs: object) -> None:
"""Pre-process configuration options"""
def update(self, kwargs: Dict[str, object]) -> None:
"""Update options based on keyword parameters passed in"""
self.kwargs.update(kwargs)
self.prepare(**self.kwargs)
class _RecordMeta(type):
"""Metaclass for general-purpose record type"""
def __new__(mcs: Type['_RecordMeta'], name: str, bases: Tuple[type, ...],
ns: Dict[str, object]) -> '_RecordMeta':
if name != 'Record':
fields = cast(Mapping[str, str],
ns.get('__annotations__', {})).keys()
defaults = {k: ns.get(k) for k in fields}
ns = {k: v for k, v in ns.items() if k not in fields}
ns['__slots__'] = defaults
return cast(_RecordMeta, super().__new__(mcs, name, bases, ns))
class Record(metaclass=_RecordMeta):
"""Generic Record class"""
__slots__: Mapping[str, object] = {}
def __init__(self, *args: object, **kwargs: object):
for k, v in self.__slots__.items():
setattr(self, k, v)
for k, v in zip(self.__slots__, args):
setattr(self, k, v)
for k, v in kwargs.items():
setattr(self, k, v)
def __repr__(self) -> str:
return '%s(%s)' % (type(self).__name__,
', '.join('%s=%r' % (k, getattr(self, k))
for k in self.__slots__))
def __str__(self) -> str:
values = ((k, self._format(k, getattr(self, k)))
for k in self.__slots__)
return ', '.join('%s: %s' % (k, v) for k, v in values if v is not None)
def _format(self, k: str, v: object) -> Optional[str]:
"""Format a field as a string"""
# pylint: disable=no-self-use,unused-argument
return str(v)
class Error(Exception):
"""General SSH error"""
def __init__(self, code: int, reason: str, lang: str = DEFAULT_LANG):
super().__init__(reason)
self.code = code
self.reason = reason
self.lang = lang
class DisconnectError(Error):
"""SSH disconnect error
This exception is raised when a serious error occurs which causes
the SSH connection to be disconnected. Exception codes should be
taken from :ref:`disconnect reason codes <DisconnectReasons>`.
See below for exception subclasses tied to specific disconnect
reasons if you want to customize your handling by reason.
:param code:
Disconnect reason, taken from :ref:`disconnect reason
codes <DisconnectReasons>`
:param reason:
A human-readable reason for the disconnect
:param lang: (optional)
The language the reason is in
:type code: `int`
:type reason: `str`
:type lang: `str`
"""
class CompressionError(DisconnectError):
"""SSH compression error
This exception is raised when an error occurs while compressing
or decompressing data sent on the SSH connection.
:param reason:
Details about the compression error
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_COMPRESSION_ERROR, reason, lang)
class ConnectionLost(DisconnectError):
"""SSH connection lost
This exception is raised when the SSH connection to the remote
system is unexpectedly lost. It can also occur as a result of
the remote system failing to respond to keepalive messages or
as a result of a login timeout, when those features are enabled.
:param reason:
Details about the connection failure
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_CONNECTION_LOST, reason, lang)
class HostKeyNotVerifiable(DisconnectError):
"""SSH host key not verifiable
This exception is raised when the SSH server's host key or
certificate is not verifiable.
:param reason:
Details about the host key verification failure
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_HOST_KEY_NOT_VERIFIABLE, reason, lang)
class IllegalUserName(DisconnectError):
"""SSH illegal user name
This exception is raised when an error occurs while processing
the username sent during the SSL handshake.
:param reason:
Details about the illegal username
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_ILLEGAL_USER_NAME, reason, lang)
class KeyExchangeFailed(DisconnectError):
"""SSH key exchange failed
This exception is raised when the SSH key exchange fails.
:param reason:
Details about the connection failure
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_KEY_EXCHANGE_FAILED, reason, lang)
class MACError(DisconnectError):
"""SSH MAC error
This exception is raised when an error occurs while processing
the message authentication code (MAC) of a message on the SSH
connection.
:param reason:
Details about the MAC error
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_MAC_ERROR, reason, lang)
class PermissionDenied(DisconnectError):
"""SSH permission denied
This exception is raised when there are no authentication methods
remaining to complete SSH client authentication.
:param reason:
Details about the SSH protocol error detected
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_NO_MORE_AUTH_METHODS_AVAILABLE, reason, lang)
class ProtocolError(DisconnectError):
"""SSH protocol error
This exception is raised when the SSH connection is disconnected
due to an SSH protocol error being detected.
:param reason:
Details about the SSH protocol error detected
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_PROTOCOL_ERROR, reason, lang)
class ProtocolNotSupported(DisconnectError):
"""SSH protocol not supported
This exception is raised when the remote system sends an SSH
protocol version which is not supported.
:param reason:
Details about the unsupported SSH protocol version
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_PROTOCOL_ERROR, reason, lang)
class ServiceNotAvailable(DisconnectError):
"""SSH service not available
This exception is raised when an unexpected service name is
received during the SSH handshake.
:param reason:
Details about the unexpected SSH service
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_SERVICE_NOT_AVAILABLE, reason, lang)
class ChannelOpenError(Error):
"""SSH channel open error
This exception is raised by connection handlers to report
channel open failures.
:param code:
Channel open failure reason, taken from :ref:`channel open
failure reason codes <ChannelOpenFailureReasons>`
:param reason:
A human-readable reason for the channel open failure
:param lang:
The language the reason is in
:type code: `int`
:type reason: `str`
:type lang: `str`
"""
class ChannelListenError(Exception):
"""SSH channel listen error
This exception is raised to report failures in setting up
remote SSH connection listeners.
:param details:
Details of the listen failure
:type details: `str`
"""
class PasswordChangeRequired(Exception):
"""SSH password change required
This exception is raised during password validation on the
server to indicate that a password change is required. It
should be raised when the password provided is valid but
expired, to trigger the client to provide a new password.
:param prompt:
The prompt requesting that the user enter a new password
:param lang:
The language that the prompt is in
:type prompt: `str`
:type lang: `str`
"""
def __init__(self, prompt: str, lang: str = DEFAULT_LANG):
super().__init__('Password change required: %s' % prompt)
self.prompt = prompt
self.lang = lang
class BreakReceived(Exception):
"""SSH break request received
This exception is raised on an SSH server stdin stream when the
client sends a break on the channel.
:param msec:
The duration of the break in milliseconds
:type msec: `int`
"""
def __init__(self, msec: int):
super().__init__('Break for %s msec' % msec)
self.msec = msec
class SignalReceived(Exception):
"""SSH signal request received
This exception is raised on an SSH server stdin stream when the
client sends a signal on the channel.
:param signal:
The name of the signal sent by the client
:type signal: `str`
"""
def __init__(self, signal: str):
super().__init__('Signal: %s' % signal)
self.signal = signal
class SoftEOFReceived(Exception):
"""SSH soft EOF request received
This exception is raised on an SSH server stdin stream when the
client sends an EOF from within the line editor on the channel.
"""
def __init__(self) -> None:
super().__init__('Soft EOF')
class TerminalSizeChanged(Exception):
"""SSH terminal size change notification received
This exception is raised on an SSH server stdin stream when the
client sends a terminal size change on the channel.
:param width:
The new terminal width
:param height:
The new terminal height
:param pixwidth:
The new terminal width in pixels
:param pixheight:
The new terminal height in pixels
:type width: `int`
:type height: `int`
:type pixwidth: `int`
:type pixheight: `int`
"""
def __init__(self, width: int, height: int, pixwidth: int, pixheight: int):
super().__init__('Terminal size change: (%s, %s, %s, %s)' %
(width, height, pixwidth, pixheight))
self.width = width
self.height = height
self.pixwidth = pixwidth
self.pixheight = pixheight
_disc_error_map = {
DISC_PROTOCOL_ERROR: ProtocolError,
DISC_KEY_EXCHANGE_FAILED: KeyExchangeFailed,
DISC_MAC_ERROR: MACError,
DISC_COMPRESSION_ERROR: CompressionError,
DISC_SERVICE_NOT_AVAILABLE: ServiceNotAvailable,
DISC_PROTOCOL_VERSION_NOT_SUPPORTED: ProtocolNotSupported,
DISC_HOST_KEY_NOT_VERIFIABLE: HostKeyNotVerifiable,
DISC_CONNECTION_LOST: ConnectionLost,
DISC_NO_MORE_AUTH_METHODS_AVAILABLE: PermissionDenied,
DISC_ILLEGAL_USER_NAME: IllegalUserName
}
def construct_disc_error(code: int, reason: str, lang: str) -> DisconnectError:
"""Map disconnect error code to appropriate DisconnectError exception"""
try:
return _disc_error_map[code](reason, lang)
except KeyError:
return DisconnectError(code, '%s (error %d)' % (reason, code), lang)
|
ronf/asyncssh
|
asyncssh/misc.py
|
misc.py
|
py
| 22,888 |
python
|
en
|
code
| 1,408 |
github-code
|
6
|
35841499580
|
from .gameunit import GameUnit
class Explosion(GameUnit):
def __init__(self):
super().__init__()
self.health = 1
def expand_calc(self):
factor = 1 + 0.01*(self.factor - self.health)
self.size[0] = int((self.origin_size[0])*factor)
self.size[1] = int((self.origin_size[1])*factor)
self.pos[0] = int(self.pos[0] - (self.size[0]*(factor-1)//2))
self.pos[1] = int(self.pos[1] - (self.size[1]*(factor-1)//2))
self.health -= 1
class ExplBig(Explosion):
def __init__(self):
super().__init__()
self.size = [self.ww//4, self.ww//4]
self.origin_size = self.size
self.health = 10
self.factor = self.health
class ExplSmall(Explosion):
def __init__(self):
super().__init__()
self.size = [self.ww//10, self.ww//10]
self.origin_size = self.size
self.health = 5
self.factor = self.health
|
MMiirrkk/Galaxy_Shooter_I
|
objects/explosion.py
|
explosion.py
|
py
| 978 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19875373112
|
import sys
import os
from Model.Pairwise.Embedding import RelationEmbedding
from typing import List, Dict, Tuple
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASE_DIR)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
relationEmbedding = RelationEmbedding()
haveRels = {}
noRels = {}
def __init__(self, guid, text_a, text_b=None, label=None, entitys = None, rels=[],\
answerType: str = '', answerStr: str = ''):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.freebaseEntity = entitys
self.freebaseRels = rels
self.relsId = InputExample.relation2id(self.freebaseRels)
self.answerType = answerType
self.answerStr = answerStr
@staticmethod
def relation2id(freebaseRels):
relsId: List[int] = []
for rel in freebaseRels:
if(rel in InputExample.relationEmbedding.rel2id):
relsId.append(InputExample.relationEmbedding.rel2id[rel])
InputExample.haveRels[rel] = 1
else:
relsId.append(InputExample.relationEmbedding.rel2id['UNK'])
# print(rel)
InputExample.noRels[rel] = 1
while(len(relsId) < 2):
relsId.append(InputExample.relationEmbedding.rel2id['UNK'])
return relsId[0:2]
|
EnernityTwinkle/KBQA-QueryGraphSelection
|
RankingQueryGraphs/Model/common/InputExample.py
|
InputExample.py
|
py
| 1,494 |
python
|
en
|
code
| 5 |
github-code
|
6
|
35694943116
|
from enum import Enum, unique
@unique
class Move(Enum):
THE_SAME = 0,
LEFT = 1,
TOP = 2,
RIGHT = 3,
DOWN = 4,
LEFT_TOP = 5,
RIGHT_TOP = 6
LEFT_DOWN = 7,
RIGHT_DOWN = 8
class P:
Left = 0.2
Right = 0.4
Top = 0.3
Down = 0.2
Vertical_Same = 0.5
Horizontal_Same = 0.4
def classify_move(x_diff, y_diff):
if x_diff == 0 and y_diff == 0:
return Move.THE_SAME
if x_diff == -1 and y_diff == 0:
return Move.LEFT
if x_diff == 0 and y_diff == 1:
return Move.TOP
if x_diff == 1 and y_diff == 0:
return Move.RIGHT
if x_diff == 0 and y_diff == -1:
return Move.DOWN
if x_diff == -1 and y_diff == 1:
return Move.LEFT_TOP
if x_diff == 1 and y_diff == 1:
return Move.RIGHT_TOP
if x_diff == -1 and y_diff == -1:
return Move.LEFT_DOWN
if x_diff == 1 and y_diff == -1:
return Move.RIGHT_DOWN
def calculate_move_probability(move):
result = {
Move.THE_SAME: P.Vertical_Same * P.Horizontal_Same,
Move.LEFT: P.Left * P.Vertical_Same,
Move.TOP: P.Top * P.Horizontal_Same,
Move.RIGHT: P.Right * P.Vertical_Same,
Move.DOWN: P.Down * P.Horizontal_Same,
Move.LEFT_TOP: P.Left * P.Top,
Move.RIGHT_TOP: P.Right * P.Top,
Move.LEFT_DOWN: P.Left * P.Down,
Move.RIGHT_DOWN: P.Right * P.Down
}
return result[move]
def calculate_common_direction_probability(path):
p = 0
for move in path:
p += calculate_move_probability(move)
return p
def calculate_single_path_probability(path):
p = 1
for move in path:
p *= calculate_move_probability(move)
return p
# This recursion can calculate probability without copying moves to paths to speed up process
# But this solution is more clear, easily testable and debuggable.
def calculate_paths(paths_success, paths_fail, moves, x0, y0, step):
if step == 5:
if x0 == 0 and y0 == 0:
paths_success.append(moves.copy())
else:
paths_fail.append(moves.copy())
elif step < 5:
for x in range(-1, 2):
for y in range(-1, 2):
move = classify_move(x, y)
moves.append(move)
calculate_paths(paths_success, paths_fail, moves, x0 + x, y0 + y, step + 1)
del moves[-1]
def calculate_multiple_paths_probability(paths):
p = 0
for path in paths:
p += calculate_single_path_probability(path)
return p
def test1():
p = calculate_common_direction_probability([Move.THE_SAME,
Move.LEFT,
Move.LEFT_DOWN,
Move.DOWN,
Move.RIGHT_DOWN,
Move.RIGHT,
Move.RIGHT_TOP,
Move.TOP,
Move.LEFT_TOP])
res = round(p, 0) == 1
print(res)
def test2():
p = calculate_single_path_probability([Move.LEFT, Move.RIGHT])
res = round(p, 2) == 0.02
print(res)
def test3():
paths_success = []
paths_fail = []
moves = []
calculate_paths(paths_success, paths_fail, moves, 0, 0, 0)
p_success = calculate_multiple_paths_probability(paths_success)
p_fail = calculate_multiple_paths_probability(paths_fail)
res = round(p_success + p_fail, 0) == 1
print(res)
def test():
test1()
test2()
test3()
def main():
paths_success = []
paths_fail = []
moves = []
calculate_paths(paths_success, paths_fail, moves, 0, 0, 0)
p_success = calculate_multiple_paths_probability(paths_success)
p_fail = calculate_multiple_paths_probability(paths_fail)
print(p_success)
print(p_fail)
#test()
main()
|
yuryybk/bulbacon_2019_task
|
Task.py
|
Task.py
|
py
| 4,012 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5960535728
|
#!/usr/bin/env python3
#
# File: tidal_perturbation_in_circular_binary.py
# Author: Timothy Van Reeth <[email protected]>
# License: GPL-3+
# Description: Calculating the observed flux variations of a tidally
# distorted g-mode pulsation in a circular, synchronised
# binary system
import os
import sys
import glob
import yaml
import numpy as np
import subprocess as sp
import astropy.units as au
from progress.bar import Bar
from binary import circular_binary
from pulsations import pmode_pulsation
def distort_pulsation(star, orb_phase=0., distortion_factor=1.):
"""
Routine to scale the g-mode visibility as a function of the location on
the stellar surface
Parameters:
star: stellar_model object
the star in which the pulsation is distorted
orb_phase: float; optional
current phase of the orbital cycle
(default = 0.)
distortion_factor: float; optional
the scaling factor of the mode visibility
across the stellar surface (default = 1)
Returns:
puls_scaling: numpy array
"""
basefun = 0.5 * (3.*(np.sin(star.theta)*np.cos(star.phi - 2.*np.pi*orb_phase))**2. - 1.)
puls_scaling = ( 2. * np.ones(star.theta.shape) / ( distortion_factor + 1. ) ) + ( (basefun - np.nanmin(basefun)) / (np.nanmax(basefun) - np.nanmin(basefun)) * 2. * ( (distortion_factor - 1.) / (distortion_factor + 1.) ) )
return puls_scaling
def calculate_flux(binary, pulsation, distortion_factor=1., puls_phase=0., orb_phase=0.):
"""
Calculate the observed flux of a binary star with a perturbed g-mode pulsation at a
given pulsation and orbital phase, and indicate if the data point was simulated
during an eclipse.
Parameters:
binary: circular_binary object
the studied circular, synchronised binary system
pulsation: gmode_pulsation object
the g-mode pulsation that will be perturbed and evaluated
distortion_factor: float; optional
the scaling factor of the mode visibility
across the stellar surface (default = 1)
puls_phase: float; optional
current phase of the pulsation cycle (in the inertial frame)
(default = 0.)
orb_phase: float; optional
current phase of the orbital cycle
(default = 0.)
Returns:
bin_iflux: float
the simulated flux caused by (solely) the binarity
tot_iflux: float
the total simulated flux at the provided pulsational and
orbital phases
puls_iflux: float
the simulated flux caused by (solely) the distorted pulsation
ecl_iflag: int
flag indicating if the calculated data point occurs during an
eclipse
"""
ecl_maps = binary.eclipse_masks(orb_phase)
primary_vissurf_bool = binary.primary.theta_incl < np.pi/2.
secondary_vissurf_bool = binary.secondary.theta_incl < np.pi/2.
primary_mask = np.array(ecl_maps[0] & primary_vissurf_bool, dtype=float)
secondary_mask = np.array(ecl_maps[1] & secondary_vissurf_bool, dtype=float)
primary_vissurf = np.array(primary_vissurf_bool, dtype=float)
secondary_vissurf = np.array(secondary_vissurf_bool, dtype=float)
if(binary.primary.pulsating):
norm_puls = calculate_normalised_pulsations(binary.primary, pulsation, puls_phase)
# puls_scaling = distort_pulsation(binary.primary, orb_phase=orb_phase, distortion_factor=distortion_factor)
puls_scaling = 1.
primary_puls = 10.**(-0.0004 * pulsation.amplitude * norm_puls * puls_scaling)
secondary_puls = np.ones(binary.secondary.theta.shape)
else:
norm_puls = calculate_normalised_pulsations(binary.secondary, pulsation, puls_phase)
# puls_scaling = distort_pulsation(binary.secondary, orb_phase=orb_phase, distortion_factor=distortion_factor)
puls_scaling = 1.
secondary_puls = 10.**(-0.0004 * pulsation.amplitude * norm_puls * puls_scaling)
primary_puls = np.ones(binary.primary.theta.shape)
primary_totflux = np.nansum(2. * np.cos(binary.primary.theta_incl) * binary.primary.cell_weight * binary.primary.limb_darkening() * primary_puls * primary_mask)
secondary_totflux = np.nansum(2. * np.cos(binary.secondary.theta_incl) * binary.secondary.cell_weight * binary.secondary.limb_darkening() * secondary_puls * secondary_mask)
primary_binflux = np.nansum(2. * np.cos(binary.primary.theta_incl) * binary.primary.cell_weight * binary.primary.limb_darkening() * primary_mask)
secondary_binflux = np.nansum(2. * np.cos(binary.secondary.theta_incl) * binary.secondary.cell_weight * binary.secondary.limb_darkening() * secondary_mask)
primary_refflux = np.nansum(2. * np.cos(binary.primary.theta_incl) * binary.primary.cell_weight * binary.primary.limb_darkening() * primary_vissurf)
secondary_refflux = np.nansum(2. * np.cos(binary.secondary.theta_incl) * binary.secondary.cell_weight * binary.secondary.limb_darkening() * secondary_vissurf)
tot_iflux = -2500.*np.log10( (binary.light_contribution1*primary_totflux/primary_refflux) + (binary.light_contribution2*secondary_totflux/secondary_refflux))
bin_iflux = -2500.*np.log10( (binary.light_contribution1*primary_binflux/primary_refflux) + (binary.light_contribution2*secondary_binflux/secondary_refflux))
puls_iflux = tot_iflux - bin_iflux
if(ecl_maps[0].all() & ecl_maps[1].all()):
ecl_iflag = 0
else:
ecl_iflag = 1
return bin_iflux, tot_iflux, puls_iflux, ecl_iflag
def calculate_normalised_pulsations(star, pulsation, puls_phase):
if pulsation.mode_type == 'p':
return calculate_normalised_pmode_pulsations(star, pulsation, puls_phase)
else:
print('g modes are currently unsupported')
sys.exit()
def calculate_normalised_pmode_pulsations(star, pulsation, puls_phase):
"""
Converting the geomety of the calculated g-mode pulsation to temperature
variations, Lagrangian displacements and the associated velocity field
Parameters:
star: stellar_model object
the pulsating star
pulsation: gmode_pulsation object
the simulated g-mode pulsation
puls_phase: float
the current phase of the studied pulsation (as seen
by the observer)
Returns:
norm_puls: numpy array
normalised pulsation variability of the g-mode at the
stellar surface at the phase puls_phase
"""
if(pulsation.m < 0.):
sign = 1.
else:
sign = -1.
norm_puls = (pulsation.Lr * np.cos(pulsation.m*star.phi + 2.*np.pi*sign*puls_phase)) / np.nanmax(pulsation.Lr * np.cos(pulsation.m*star.phi + 2.*np.pi*sign*puls_phase))
return norm_puls
def read_inlist(inlist_filename='./inlist.yaml'):
"""
Read in the required variables to calculate r-mode visibilities,
following the methodology from Saio et al. (2018).
Parameters:
inlist_filename: string; optional (default: ./inlist.dat)
the path to the inlist
Returns:
maindir: string
main work directory
binary: circular binary object
the circular synchronised binary system that will be simulated
pulsation: gmode_pulsation object
the g-mode pulsation that will be simulated
distortion_factor: float
distortion factor of the simulated pulsation
N_forb_cycles: int
number of orbital cycles to be simulated
Nsample_per_cycle: int
number of simulated data points per orbital cycle
"""
with open(inlist_filename,'r') as f:
cfg = yaml.load(f, Loader=yaml.Loader)
f.close()
# collecting the given parameter values within the appropriate objects
binary = circular_binary(cfg)
if cfg['pulsation']['mode_type'] == 'p':
pulsation = pmode_pulsation(cfg, binary.freq_orb)
elif cfg['pulsation']['mode_type'] == 'g':
print('g modes are currently not supported')
sys.exit()
else:
print('Mode type {} not recognised. Exiting.'.format(cfg['pulsation']['mode_type']))
sys.exit()
if(cfg['pulsation']['pulsating_star'] == 'primary'):
binary.primary.pulsating = True
pulsation.calculate_puls_geometry(binary.primary)
elif(cfg['pulsation']['pulsating_star'] == 'secondary'):
binary.secondary.pulsating = True
pulsation.calculate_puls_geometry(binary.secondary)
return cfg['general']['main_dir'], binary, pulsation, \
cfg['pulsation']['distortion_factor'], \
cfg['simulation']['N_forb_cycles'], \
cfg['simulation']['Nsample_per_cycle']
def save_results(result_filename, time, orb_phases, puls_phases, total_flux, binary_flux, pulsation_flux, eclipse_flags):
"""
Saving the calculated visibilities of the distorted g-mode pulsations
Parameters:
result_filename: string
absolute path to the results output filename
time: astropy quantity array
the time stamps of the simulate data points
orb_phases: numpy array
orbital phases corresponding to the different time stamps
puls_phases: numpy array
pulsation phases at the different time stamps
total_flux: numpy array
the total observed flux variations at the different time stamps (unit: mmag)
binary_flux: numpy array
the flux variations from the binary motion at the different time stamps (unit: mmag)
pulsation_flux: numpy array
the flux variations from the pulsations at the different time stamps (unit: mmag)
eclipse_flags: numpy array of integers
flags indicating if the data point was taken during an eclipse (yes = 1; no = 0)
"""
file = open(result_filename, 'w')
headline = ' '*16 + 'time' + ' '*11 + 'orb_phase' + ' '*10 + 'puls_phase' + ' '*10 + 'total_flux' + ' '*14 + 'binary' + ' '*11 + 'pulsation' + ' '*13 + 'eclipse'
file.write(f'{headline}\n')
for itime, iorbphase, ipulsphase, iflux, ibin, ipuls, iflag in zip(time.to(au.d).value, orb_phases, puls_phases, total_flux, binary_flux, pulsation_flux, eclipse_flags):
data = f' {itime:18e} {iorbphase:18e} {ipulsphase:18e} {iflux:18e} {ibin:18e} {ipuls:18e} {iflag}\n'
file.write(data)
file.close()
return
if __name__ == "__main__":
# Reading the input parameters / variables
maindir, binary, pulsation, distortion_factor, \
N_forb_cycles, Nsample_per_cycle = read_inlist(sys.argv[1])
print('Inlist read')
# Setting the output directory and copying the used inlist
mass1 = binary.primary.Mstar.to(au.Msun).value
mass2 = binary.secondary.Mstar.to(au.Msun).value
freq_orb = binary.freq_orb.to(1./au.d).value
outdir = f'{maindir}binary_M{int(round(100.*mass1))}M{int(round(100.*mass2))}_forb{int(round(1000.*freq_orb))}_i{int(binary.incl_deg)}/'
if not os.path.exists(outdir):
os.makedirs(outdir)
print('Directory created')
print('Directory set')
computation_nr = f"{len(glob.glob(f'{outdir}inlist*.yaml')) + 1}".zfill(3)
sp.call(f'cp ./inlist.yaml {outdir}inlist{computation_nr}.yaml', shell=True) # warning: do not forget to adapt this line if the inlist filename changes!
# setting the time domain for tidal perturbation simulations
time = np.linspace(0., float(N_forb_cycles), N_forb_cycles*Nsample_per_cycle+1) * binary.period.to(au.d)
orb_phases = np.linspace(0.,float(N_forb_cycles),N_forb_cycles*Nsample_per_cycle+1) % 1.
puls_phases = np.array(pulsation.puls_freq.to(1/au.d)*time, dtype=float) % 1.
# time = np.loadtxt('tess_sector01_times.dat').T
# time *= au.day
# orb_phases = np.array( time * freq_orb, dtype=float) % 1.
# puls_phases = np.array(pulsation.puls_freq.to(1/au.d)*time, dtype=float) % 1.
print('Phase arrays constructed.')
# Calculating the mode visibilities and kinetic energy
binary_flux = []
total_flux = []
pulsation_flux = []
eclipse_flags = []
pbar = Bar('Calculating...', max=len(puls_phases))
for iph,puls_phase,orb_phase in zip(np.arange(len(puls_phases)), puls_phases, orb_phases):
# calculating the observed fluxes for (1) the binary + pulsation, (2) the binary, and (3) the pulsation, and provide (4) eclipse flags
bin_iflux, tot_iflux, puls_iflux, ecl_iflag = calculate_flux(binary, pulsation, puls_phase=puls_phase, orb_phase=orb_phase, distortion_factor=distortion_factor)
binary_flux.append(bin_iflux)
total_flux.append(tot_iflux)
pulsation_flux.append(puls_iflux)
eclipse_flags.append(int(ecl_iflag))
pbar.next()
pbar.finish()
binary_flux = np.array(binary_flux)
total_flux = np.array(total_flux)
pulsation_flux = np.array(pulsation_flux)
eclipse_flags = np.array(eclipse_flags, dtype=int)
# Saving the results
save_results(f'{outdir}pmode_f{int(np.round(pulsation.puls_freq.value*1000000))}_perturbed-visibilities_{computation_nr}.dat', time, orb_phases, puls_phases, total_flux, binary_flux, pulsation_flux, eclipse_flags)
|
colej/eb_mapping
|
run_model.py
|
run_model.py
|
py
| 14,587 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27973693497
|
import json
import urllib.request
# read the model into a variable
with open ("../src/test/gene-filter-example-2.xml", "r") as f:
model=f.read()
# encode the job
job = {
"export": {
"network_type":"en",
"network_format":"sbml"
},
"filter": {
"species": ["h2o", "atp"],
"reactions": [],
"enzymes": ["gene_abc"],
"enzyme_complexes": ["a + b + c", "x + Y", "b_098 + r_abc"],
},
"file": model
}
# setup request
req = urllib.request.Request("https://gemtractor.bio.informatik.uni-rostock.de/api/execute")
req.add_header('Content-Type', 'application/json; charset=utf-8')
job_bytes = json.dumps(job).encode('utf-8')
req.add_header('Content-Length', len(job_bytes))
# fire the job
try:
response = urllib.request.urlopen(req, job_bytes)
# do whatever you want with the returned file:
print (response.read())
except urllib.error.HTTPError as e:
# there was a problem...!?
print ("bad request: " + str (getattr(e, 'code', repr(e))) + getattr(e, 'message', repr(e)))
print (e.readlines())
|
binfalse/GEMtractor
|
clients/PythonClient.py
|
PythonClient.py
|
py
| 1,081 |
python
|
en
|
code
| 2 |
github-code
|
6
|
73733113787
|
from schedule import Scheduler
from session.manager import SessionManager
class GlobalObjectClass:
def __init__(self):
self.text: str = ""
self.database: str = ""
self.session_manager: SessionManager | None = None
self.scheduler: Scheduler | None = None
globalObject = GlobalObjectClass()
|
fkxxyz/chatgpt-session
|
server/common.py
|
common.py
|
py
| 329 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27466130779
|
# Dictionary
capitals = {
# "key": value,
"Canada": "Ottawa",
"France": "Paris",
"Kazakhstan": "Astana",
"Russia": "Moscow"
}
continents = {
"Canada": "North America",
"France": "Europe",
"Kazakhstan": "Asia",
"Russia": ["Asia", "Europe"] # Dictionary data type can contain another data type inside.
}
# Dictionaries can be differentiated from lists and tuples by them being not index-based but key-based.
# Dictionaries contain much more complex data, namely keys and values which can be updated.
# As the capital city of Kazakhstan has been renamed, let's update the particular dictionary.
capitals["Kazakhstan"] = "Nur-Sultan"
# Let's create a function to use the data provided in the dictionaries above simultaneously.
print("\nDICTIONARY DATA TYPE")
print("--------------\n") # just for formatting the output in order to keep it clean
# But zipping the dictionaries directly is not a proper action as it zips only the key names but not the values.
# zip(capitals, countries) would return [('Canada', 'Canada'), ('France', 'France'), ... ] which is not what we want.
# What we can do is to extract the items of dictionaries to lists using items() method.
# A function below is meant for giving an information provided in the dictionaries above.
def country_information():
global capitals
global continents
message = input("Do you want to get information about any country? [Type yes/no]: ").lower().strip()
# validating the input by lower-casing the letters and shrinking the spaces
if message == "yes":
print("Available countries:")
for number, country in enumerate(capitals, 1): # looping the enumerated dict to print the keys and values
print("", str(number) + ")", country)
checking_country_raw = input("Insert the name of the country: ")
checking_country = checking_country_raw[0].upper() + checking_country_raw[1:].lower() # validating the input
if checking_country in capitals and checking_country in continents:
for (k1, v1), (k2, v2) in zip(capitals.items(), continents.items()): # looping the zipped dicts
if k1 == checking_country:
if isinstance(v2, str): # if only 1 continent
print("\n", k1, "is situated in", v2, "and its capital city is", v1 + ".")
elif isinstance(v2, list): # if two continents
print("\n", k1, "is situated in", v2[0], "and", v2[1] + ". Its capital city is", v1 + ".")
print("--------------\n") # just for formatting the output in order to keep it clean
else:
print("\n!!! Please, choose an available country!\n")
country_information()
elif message == "no":
print("--------------\n") # just for formatting the output in order to keep it clean
print(" Thank you for using this app! Good bye!")
return
else:
print("\n!!! Please, answer properly!\n")
country_information()
country_information() # calling the function to get information about a country
|
00009115/CSF.CW1.00009115
|
dictionaries/dictionaries.py
|
dictionaries.py
|
py
| 3,143 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41383190209
|
import math
from abc import ABC, abstractmethod
from dataclasses import dataclass
from hvac import Quantity
from hvac.fluids import Fluid
from hvac.refrigerant_piping.copper_tubing import CopperTube
from hvac.fluid_flow import Pipe, Circular
Q_ = Quantity
@dataclass
class RefrigerantCycleData:
rfg: Fluid
T_eva: Quantity # saturated suction temperature = evaporating temperature
T_con: Quantity # saturated condensing temperature
ESH: Quantity # evaporator superheat
CSC: Quantity # condensor subcooling
CSH: Quantity # compressor superheat
def __post_init__(self):
self.T_eva_o = self.T_eva + self.ESH
self.T_con_i = self.T_con + self.CSH
self.T_con_o = self.T_con - self.CSC
self.P_eva = self.rfg(T=self.T_eva, x=Q_(0, 'frac')).P
self.P_con = self.rfg(T=self.T_con, x=Q_(0, 'frac')).P
self.rfg_con_i = self.rfg(P=self.P_con, T=self.T_con_i)
self.rfg_con_o = self.rfg(P=self.P_con, T=self.T_con_o)
self.rfg_eva_i = self.rfg(P=self.P_eva, h=self.rfg_con_o.h)
self.rfg_eva_o = self.rfg(P=self.P_eva, T=self.T_eva_o)
self.q_eva = self.rfg_eva_o.h - self.rfg_eva_i.h
class RefrigerantLine(ABC):
vr_allow_max: Quantity = None
vr_allow_min: dict[str, Quantity] | None = None
def __init__(
self,
rfg_cycle_data: RefrigerantCycleData,
Q_eva_max: Quantity,
Q_eva_min: Quantity | None = None,
):
"""
Parameters
----------
rfg_cycle_data:
Instance of dataclass RefrigerantCycleData containing the
specifications of the refrigerant cycle.
Q_eva_max:
Maximum evaporator capacity. This will determine the maximum
flow velocity of the refrigerant.
Q_eva_min: optional, default None
Minimum evaporator capacity. This will determine the minimum
flow velocity of the refrigerant. Leave to default `None` in case of
an ON/OFF-controlled compressor.
"""
self.rcd = rfg_cycle_data
self.Q_eva_max = Q_eva_max
self.Q_eva_min = Q_eva_min or Q_eva_max
# if Q_eva_min is None, set self.Q_eva_min equal to Q_eva_max
self.mr_max = self._get_mr(self.Q_eva_max)
self.Vr_max = self._get_Vr(self.mr_max)
self.mr_min = self._get_mr(self.Q_eva_min)
self.Vr_min = self._get_Vr(self.mr_min)
def _get_mr(self, Q) -> Quantity:
"""Get mass flow rate of refrigerant in the system."""
return Q / self.rcd.q_eva
@abstractmethod
def _get_Vr(self, mr) -> Quantity:
"""Get volume flow rate of refrigerant."""
...
@abstractmethod
def get_vr(self, *args, **kwargs) -> tuple:
"""Get refrigerant velocity."""
...
def _check_vr_max(self, vr_max):
if vr_max.to('feet / min') < self.vr_allow_max:
r = 'OK'
else:
r = 'TOO HIGH'
return r
def _check_vr_min(self, vr_min, copper_tube, riser):
...
@abstractmethod
def get_dp(self, copper_tube: CopperTube, Leq: Quantity) -> Quantity:
"""Get pressure drop across refrigeration line."""
...
class VaporLine(RefrigerantLine):
@abstractmethod
def _get_Vr(self, mr) -> Quantity:
...
def get_vr(self, copper_tube: CopperTube, riser: bool = True) -> tuple:
"""
Get maximum and minimum flow velocity of refrigerant.
This method also checks if maximum and minimum velocity are within the
allowable upper and lower limits to avoid noise on one hand and to
ensure proper oil return on the other hand.
Parameters
----------
copper_tube:
Instance of dataclass CopperTube containing copper tube specs.
riser: default True
Indicate if the pipe is a vertical riser (True) or not (False).
Returns
-------
Tuple with 4 elements:
1. the flow velocity at maximum capacity
2. string that indicates if maximum flow velocity is 'OK' or 'TOO HIGH'
3. the flow velocity at minimum capacity
4. string that indicates if minimum flow velocity is 'OK', 'TOO LOW', or
'TOO HIGH'
"""
A = math.pi * (copper_tube.di ** 2) / 4
vr_max = self.Vr_max / A
r_max = self._check_vr_max(vr_max)
vr_min = self.Vr_min / A
r_min = self._check_vr_min(vr_min, copper_tube, riser)
return vr_max, r_max, vr_min, r_min
def _check_vr_max(self, vr_max):
if vr_max.to('feet / min') < self.vr_allow_max:
r = 'OK'
else:
r = 'TOO HIGH'
return r
def _check_vr_min(self, vr_min, copper_tube, riser):
vr_min = vr_min.to('feet / min')
vr_allow_min = self.vr_allow_min[copper_tube.dn]
# minimum allowable flow velocity to ensure oil return
if not riser:
vr_allow_min *= 0.75
if vr_allow_min <= vr_min < self.vr_allow_max:
r = 'OK'
elif vr_min < vr_allow_min:
r = 'TOO LOW'
else: # vr_min >= self.vr_allow_max:
r = 'TOO HIGH'
return r
@abstractmethod
def get_dp(self, copper_tube: CopperTube, Leq: Quantity) -> Quantity:
...
class SuctionLine(VaporLine):
vr_allow_max = Q_(4000, 'ft / min')
vr_allow_min = {
# minimum allowable flow velocity for proper oil return in riser @
# saturated suction temperature of 20 °F
k: Q_(v, 'feet / min') for k, v in
[
('3/8', 370), ('1/2', 460),
('5/8', 520), ('3/4', 560),
('7/8', 600), ('1 1/8', 700),
('1 3/8', 780), ('1 5/8', 840),
('2 1/8', 980), ('2 5/8', 1080),
('3 1/8', 1180), ('3 5/8', 1270),
('4 1/8', 1360)
]
}
def _get_Vr(self, mr):
# volume flow rate of refrigerant at evaporator outlet
return mr / self.rcd.rfg_eva_o.rho
def get_dp(self, copper_tube: CopperTube, Leq: Quantity) -> Quantity:
"""
Get pressure drop across suction line.
Parameters
----------
copper_tube:
Instance of dataclass CopperTube containing copper tube specs.
Leq:
Equivalent length of suction line (including equivalent length
of fittings).
"""
pipe = Pipe.create(
length=Leq,
wall_roughness=Q_(0.0015, 'mm'),
fluid=self.rcd.rfg(P=self.rcd.P_eva, x=Q_(1, 'frac')),
cross_section=Circular.create(copper_tube.di),
volume_flow_rate=self.Vr_max
)
return pipe.pressure_drop
class DischargeLine(VaporLine):
vr_allow_max = Q_(3500, 'ft / min')
vr_allow_min = {
# minimum allowable flow velocity for proper oil return in riser @
# saturated condensing temperature of 80 °F
k: Q_(v, 'feet / min') for k, v in
[
('5/16', 220), ('3/8', 250),
('1/2', 285), ('5/8', 315),
('3/4', 345), ('7/8', 375),
('1 1/8', 430), ('1 3/8', 480),
('1 5/8', 520), ('2 1/8', 600),
('2 5/8', 665), ('3 1/8', 730)
]
}
def _get_Vr(self, mr):
# volume flow rate of refrigerant at condenser inlet
return mr / self.rcd.rfg_con_i.rho
def get_dp(self, copper_tube: CopperTube, Leq: Quantity) -> Quantity:
"""
Get pressure drop across discharge line.
Parameters
----------
copper_tube:
Instance of dataclass CopperTube containing copper tube specs.
Leq:
Equivalent length of discharge line (including equivalent length
of fittings).
"""
pipe = Pipe.create(
length=Leq,
wall_roughness=Q_(0.0015, 'mm'),
fluid=self.rcd.rfg(P=self.rcd.P_con, x=Q_(1, 'frac')),
cross_section=Circular.create(copper_tube.di),
volume_flow_rate=self.Vr_max
)
return pipe.pressure_drop
class LiquidLine(RefrigerantLine):
vr_allow_max = Q_(600, 'ft / min')
def _get_Vr(self, mr) -> Quantity:
return mr / self.rcd.rfg_con_o.rho
def get_vr(self, copper_tube: CopperTube) -> tuple:
A = math.pi * (copper_tube.di ** 2) / 4
vr_max = self.Vr_max / A
r_max = self._check_vr_max(vr_max)
return vr_max, r_max
def get_dp(self, copper_tube: CopperTube, Leq: Quantity) -> Quantity:
"""
Get pressure drop across liquid line.
Parameters
----------
copper_tube:
Instance of dataclass CopperTube containing copper tube specs.
Leq:
Equivalent length of discharge line (including equivalent length
of fittings).
"""
pipe = Pipe.create(
length=Leq,
wall_roughness=Q_(0.0015, 'mm'),
fluid=self.rcd.rfg(P=self.rcd.P_con, x=Q_(0, 'frac')),
cross_section=Circular.create(copper_tube.di),
volume_flow_rate=self.Vr_max
)
return pipe.pressure_drop
def get_dT(self, dP: Quantity, H: Quantity) -> Quantity:
"""
Get equivalent change in saturated temperature.
Parameters
----------
dP:
Total pressure drop across liquid line including fittings
and accessoires.
H:
The elevation height between the outlet and inlet of the liquid
line (if the outlet is below the inlet, `H` is negative).
"""
rho = self.rcd.rfg_con_o.rho
g = Q_(9.81, 'm / s ** 2')
dP_elev = rho * g * H
dP_tot = dP + dP_elev
P_out = self.rcd.P_con - dP_tot # pressure at liquid line outlet = TXV inlet
T_sat = self.rcd.rfg(P=P_out, x=Q_(0, 'frac')).T # saturation temperature @ P_out
dT = self.rcd.T_con.to('K') - T_sat.to('K')
return dT
|
TomLXXVI/HVAC
|
hvac/refrigerant_piping/sizing.py
|
sizing.py
|
py
| 10,022 |
python
|
en
|
code
| 8 |
github-code
|
6
|
35417197808
|
import csv
from dataclasses import dataclass, field
from itertools import count
from ..configs import Configs
from ..utils import add_bytes, stringify
from .actions import MONSTER_ACTIONS, Action
from .autoabilities import AUTOABILITIES
from .characters import CHARACTERS, Character
from .constants import (
Element,
ElementalAffinity,
EquipmentSlots,
EquipmentType,
GameVersion,
Rarity,
Stat,
Status,
)
from .file_functions import get_resource_path
from .items import ITEMS, ItemDrop
from .text_characters import TEXT_CHARACTERS
@dataclass
class Monster:
name: str
stats: dict[Stat, int]
elemental_affinities: dict[Element, ElementalAffinity]
status_resistances: dict[Status, int]
poison_tick_damage: int
zanmato_level: int
armored: bool
undead: bool
auto_statuses: list[Status]
gil: int
ap: dict[str, int]
item_1: dict[str, int | dict[Rarity, ItemDrop | None]]
item_2: dict[str, int | dict[Rarity, ItemDrop | None]]
steal: dict[str | Rarity, int | ItemDrop | None]
bribe: dict[str, int | ItemDrop | None]
equipment: dict[str, int | list | dict[Character, list[int]]]
actions: dict[str, Action]
zones: list[str] = field(default_factory=list)
def __str__(self) -> str:
return self.name
def _get_prize_structs(file_path: str) -> dict[str, list[int]]:
"""Retrieves the prize structs for enemies."""
absolute_file_path = get_resource_path(file_path)
with open(absolute_file_path) as file_object:
file_reader = csv.reader(file_object, delimiter=",")
monsters_data = {}
for line in file_reader:
prize_struct = [int(value, 16) for value in line]
# gets the name of the monster from the prize struct itself
# name is null (0x00) terminated
monster_name = ""
for character_id in prize_struct[408:430]:
if character_id == 0:
break
monster_name += TEXT_CHARACTERS[character_id]
monster_name = stringify(monster_name)
# if the name is already in the dictionary
# appends it with an underscore and a number
# from 2 to 8
if monster_name in monsters_data:
for i in count(2):
new_name = f"{monster_name}_{i}"
if new_name not in monsters_data:
monsters_data[new_name] = prize_struct
break
else:
monsters_data[monster_name] = prize_struct
return monsters_data
def _patch_prize_structs_for_hd(
prize_structs: dict[str, list[int]],
) -> dict[str, list[int]]:
"""Apply changes made in the HD version to the prize structs."""
def patch_abilities(
monster_name: str,
abilities: tuple[int, int, int, int, int, int, int],
equipment_type: EquipmentType = EquipmentType.WEAPON,
) -> None:
"""Modifies ability values 1-7 of every character's weapon
or armor ability array.
"""
# base address for abilities in the prize struct
base_address = 178
type_offset = 0 if equipment_type == EquipmentType.WEAPON else 1
# place the abilities values at the correct offsets
for owner_index in range(7):
offset = (type_offset + (owner_index * 2)) * 16
for slot in range(7):
slot_offset = (slot + 1) * 2
address = base_address + offset + slot_offset
prize_structs[monster_name][address] = abilities[slot]
# in the HD version equipment droprates were modified
# from 8/255 to 12/255 for these enemies
monster_names = (
"condor",
"dingo",
"water_flan",
"condor_2",
"dingo_2",
"water_flan_2",
"dinonix",
"killer_bee",
"yellow_element",
"worker",
"vouivre_2",
"raldo_2",
"floating_eye",
"ipiria",
"mi'ihen_fang",
"raldo",
"vouivre",
"white_element",
"funguar",
"gandarewa",
"lamashtu",
"raptor",
"red_element",
"thunder_flan",
"bite_bug",
"bunyip",
"garm",
"simurgh",
"snow_flan",
"bunyip_2",
"aerouge",
"buer",
"gold_element",
"kusariqqu",
"melusine",
"blue_element",
"iguion",
"murussu",
"wasp",
"evil_eye",
"ice_flan",
"mafdet",
"snow_wolf",
"guado_guardian_2",
"alcyone",
"mech_guard",
"mushussu",
"sand_wolf",
"bomb_2",
"evil_eye_2",
"guado_guardian_3",
"warrior_monk",
"warrior_monk_2",
"aqua_flan",
"bat_eye",
"cave_iguion",
"sahagin_2",
"swamp_mafdet",
"sahagin_3",
"flame_flan",
"mech_scouter",
"mech_scouter_2",
"nebiros",
"shred",
"skoll",
"flame_flan",
"nebiros",
"shred",
"skoll",
"dark_element",
"imp",
"nidhogg",
"yowie",
)
for monster_name in monster_names:
prize_structs[monster_name][139] = 12
# all the enemies that have ability arrays modified in the HD version
# besaid
patch_abilities("dingo", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("condor", (0, 0, 0, 0, 126, 126, 126))
patch_abilities("water_flan", (42, 42, 42, 42, 125, 125, 125))
patch_abilities("dingo_2", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("condor_2", (0, 0, 0, 0, 126, 126, 126))
patch_abilities("water_flan_2", (42, 42, 42, 42, 125, 125, 125))
# kilika
patch_abilities("dinonix", (38, 42, 38, 30, 126, 126, 126))
patch_abilities("killer_bee", (38, 42, 34, 30, 126, 126, 126))
patch_abilities("yellow_element", (38, 38, 38, 38, 125, 125, 125))
# luca
patch_abilities("vouivre_2", (38, 42, 34, 30, 124, 124, 124))
# mi'ihen
patch_abilities("raldo_2", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("bomb", (30, 30, 30, 30, 30, 30, 125))
patch_abilities("dual_horn", (67, 30, 30, 30, 30, 127, 127))
patch_abilities("floating_eye", (38, 42, 34, 30, 99, 126, 126))
patch_abilities("ipiria", (38, 42, 38, 30, 126, 126, 126))
patch_abilities("mi'ihen_fang", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("raldo", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("vouivre", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("white_element", (34, 34, 34, 34, 125, 125, 125))
# mushroom rock road
patch_abilities("gandarewa", (38, 38, 38, 38, 125, 125, 125))
patch_abilities("lamashtu", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("raptor", (38, 42, 38, 30, 126, 126, 126))
patch_abilities("red_element", (30, 30, 30, 30, 125, 125, 125))
patch_abilities("thunder_flan", (38, 38, 38, 38, 125, 125, 125))
# djose highroad
patch_abilities("bite_bug", (38, 42, 34, 30, 126, 126, 126))
patch_abilities("bunyip", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("garm", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("simurgh", (0, 0, 0, 0, 126, 126, 126))
patch_abilities("snow_flan", (34, 34, 34, 34, 125, 125, 125))
# moonflow
patch_abilities("bunyip_2", (38, 42, 34, 30, 124, 124, 124))
# thunder plains
patch_abilities("aerouge", (38, 38, 38, 38, 125, 125, 125))
patch_abilities("buer", (38, 42, 34, 30, 99, 126, 126))
patch_abilities("gold_element", (38, 38, 38, 38, 125, 125, 125))
patch_abilities("kusariqqu", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("melusine", (38, 42, 38, 30, 126, 126, 126))
# macalania woods
patch_abilities("blue_element", (42, 42, 42, 42, 125, 125, 125))
patch_abilities("chimera", (104, 104, 103, 103, 103, 103, 125))
patch_abilities("iguion", (38, 42, 38, 30, 126, 126, 126))
patch_abilities("murussu", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("wasp", (38, 42, 34, 30, 126, 126, 126))
# lake macalania
patch_abilities("evil_eye", (38, 42, 34, 30, 99, 126, 126))
patch_abilities("ice_flan", (34, 34, 34, 34, 125, 125, 125))
patch_abilities("mafdet", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("snow_wolf", (38, 42, 34, 30, 124, 124, 124))
# bikanel
patch_abilities("alcyone", (0, 0, 0, 0, 126, 126, 126))
patch_abilities("mushussu", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("sand_wolf", (38, 42, 34, 30, 124, 124, 124))
# home
patch_abilities("bomb_2", (30, 30, 30, 30, 30, 30, 125))
patch_abilities("chimera_2", (104, 104, 103, 103, 103, 103, 125))
patch_abilities("dual_horn_2", (67, 67, 67, 30, 30, 127, 127))
patch_abilities("evil_eye_2", (38, 42, 34, 30, 99, 126, 126))
# via purifico
patch_abilities("aqua_flan", (42, 42, 42, 42, 125, 125, 125))
patch_abilities("bat_eye", (38, 42, 34, 30, 99, 126, 126))
patch_abilities("cave_iguion", (38, 42, 38, 30, 126, 126, 126))
patch_abilities("swamp_mafdet", (38, 42, 34, 30, 124, 124, 124))
# calm lands
patch_abilities("chimera_brain", (104, 104, 104, 104, 103, 103, 125))
patch_abilities("flame_flan", (30, 30, 30, 30, 125, 125, 125))
patch_abilities("nebiros", (38, 42, 34, 30, 126, 126, 126))
patch_abilities("shred", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("skoll", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("defender_x", (100, 99, 99, 99, 99, 99, 124))
# cavern of the stolen fayth
patch_abilities("dark_element", (42, 30, 30, 34, 125, 125, 125))
patch_abilities("defender", (99, 99, 99, 99, 98, 98, 124))
patch_abilities("ghost", (104, 104, 104, 103, 103, 103, 125))
patch_abilities("imp", (38, 38, 38, 38, 125, 125, 125))
patch_abilities("nidhogg", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("valaha", (67, 67, 67, 30, 30, 127, 127))
patch_abilities("yowie", (38, 42, 38, 30, 126, 126, 126))
return prize_structs
def get_raw_data_string(prize_struct: list[str]) -> str:
string = ""
for index, byte in enumerate(prize_struct):
# every 16 bytes make a new line
if index % 16 == 0:
string += "\n"
string += " ".join([f"[{hex(index + i)[2:]:>3}]" for i in range(16)])
string += "\n"
# print the bytes' value
# string += f' {hex(byte)[2:]:>3} '
string += f" {byte:>3} "
# string += f' {byte:08b} '
return string
def _get_monster_data(monster_id: str, prize_struct: list[int]) -> Monster:
"""Get a Monster from his prize struct."""
def get_elements() -> dict[str, str]:
elements = {
Element.FIRE: 0b00001,
Element.ICE: 0b00010,
Element.THUNDER: 0b00100,
Element.WATER: 0b01000,
Element.HOLY: 0b10000,
}
affinities = {}
for element, value in elements.items():
if prize_struct[43] & value:
affinities[element] = ElementalAffinity.ABSORBS
elif prize_struct[44] & value:
affinities[element] = ElementalAffinity.IMMUNE
elif prize_struct[45] & value:
affinities[element] = ElementalAffinity.RESISTS
elif prize_struct[46] & value:
affinities[element] = ElementalAffinity.WEAK
else:
affinities[element] = ElementalAffinity.NEUTRAL
return affinities
def get_abilities(address: int) -> dict[str, list[str | None]]:
abilities = {}
equipment_types = (EquipmentType.WEAPON, 0), (EquipmentType.ARMOR, 16)
for equipment_type, offset in equipment_types:
abilities[equipment_type] = []
for i in range(address + offset, address + 16 + offset, 2):
if prize_struct[i + 1] == 128:
ability_name = AUTOABILITIES[prize_struct[i]]
else:
ability_name = None
abilities[equipment_type].append(ability_name)
return abilities
monster_name = ""
for character_id in prize_struct[408:430]:
if character_id == 0:
break
monster_name += TEXT_CHARACTERS[character_id]
for i in range(16):
if monster_id.endswith(f"_{i}"):
monster_name += f"#{i}"
break
stats = {
Stat.HP: add_bytes(*prize_struct[20:24]),
Stat.MP: add_bytes(*prize_struct[24:28]),
"overkill_threshold": add_bytes(*prize_struct[28:32]),
Stat.STRENGTH: prize_struct[32],
Stat.DEFENSE: prize_struct[33],
Stat.MAGIC: prize_struct[34],
Stat.MAGIC_DEFENSE: prize_struct[35],
Stat.AGILITY: prize_struct[36],
Stat.LUCK: prize_struct[37],
Stat.EVASION: prize_struct[38],
Stat.ACCURACY: prize_struct[39],
}
gil = add_bytes(*prize_struct[128:130])
ap = {
"normal": add_bytes(*prize_struct[130:132]),
"overkill": add_bytes(*prize_struct[132:134]),
}
item_1 = {
"drop_chance": prize_struct[136],
"normal": {Rarity.COMMON: None, Rarity.RARE: None},
"overkill": {Rarity.COMMON: None, Rarity.RARE: None},
}
if prize_struct[141] == 32:
item_1["normal"][Rarity.COMMON] = ItemDrop(
ITEMS[prize_struct[140]], prize_struct[148], False
)
if prize_struct[143] == 32:
item_1["normal"][Rarity.RARE] = ItemDrop(
ITEMS[prize_struct[142]], prize_struct[149], True
)
if prize_struct[153] == 32:
item_1["overkill"][Rarity.COMMON] = ItemDrop(
ITEMS[prize_struct[152]], prize_struct[160], False
)
if prize_struct[155] == 32:
item_1["overkill"][Rarity.RARE] = ItemDrop(
ITEMS[prize_struct[154]], prize_struct[161], True
)
item_2 = {
"drop_chance": prize_struct[137],
"normal": {Rarity.COMMON: None, Rarity.RARE: None},
"overkill": {Rarity.COMMON: None, Rarity.RARE: None},
}
if prize_struct[145] == 32:
item_2["normal"][Rarity.COMMON] = ItemDrop(
ITEMS[prize_struct[144]], prize_struct[150], False
)
if prize_struct[147] == 32:
item_2["normal"][Rarity.RARE] = ItemDrop(
ITEMS[prize_struct[146]], prize_struct[151], True
)
if prize_struct[157] == 32:
item_2["overkill"][Rarity.COMMON] = ItemDrop(
ITEMS[prize_struct[156]], prize_struct[162], False
)
if prize_struct[159] == 32:
item_2["overkill"][Rarity.RARE] = ItemDrop(
ITEMS[prize_struct[158]], prize_struct[163], True
)
steal = {
"base_chance": prize_struct[138],
Rarity.COMMON: None,
Rarity.RARE: None,
}
if prize_struct[165] == 32:
steal[Rarity.COMMON] = ItemDrop(
ITEMS[prize_struct[164]], prize_struct[168], False
)
if prize_struct[167] == 32:
steal[Rarity.RARE] = ItemDrop(ITEMS[prize_struct[166]], prize_struct[169], True)
bribe = {
"cost": float("nan"),
"item": None,
}
if prize_struct[171] == 32:
bribe["item"] = ItemDrop(ITEMS[prize_struct[170]], prize_struct[172], False)
elemental_affinities = get_elements()
status_resistances = {
Status.DEATH: prize_struct[47],
Status.ZOMBIE: prize_struct[48],
Status.PETRIFY: prize_struct[49],
Status.POISON: prize_struct[50],
Status.POWER_BREAK: prize_struct[51],
Status.MAGIC_BREAK: prize_struct[52],
Status.ARMOR_BREAK: prize_struct[53],
Status.MENTAL_BREAK: prize_struct[54],
Status.CONFUSE: prize_struct[55],
Status.BERSERK: prize_struct[56],
Status.PROVOKE: prize_struct[57],
Status.THREATEN: prize_struct[58],
Status.SLEEP: prize_struct[59],
Status.SILENCE: prize_struct[60],
Status.DARK: prize_struct[61],
Status.PROTECT: prize_struct[62],
Status.SHELL: prize_struct[63],
Status.REFLECT: prize_struct[64],
Status.NULBLAZE: prize_struct[65],
Status.NULFROST: prize_struct[66],
Status.NULSHOCK: prize_struct[67],
Status.NULTIDE: prize_struct[68],
Status.REGEN: prize_struct[69],
Status.HASTE: prize_struct[70],
Status.SLOW: prize_struct[71],
}
poison_tick_damage = stats[Stat.HP] * prize_struct[42] // 100
undead = prize_struct[72] == 2
auto_statuses = []
if prize_struct[74] & 0b00100000:
auto_statuses.append(Status.REFLECT)
if prize_struct[75] & 0b00000011 and prize_struct[74] & 0b11000000:
auto_statuses.append(Status.NULALL)
if prize_struct[75] & 0b00000100:
auto_statuses.append(Status.REGEN)
equipment = {
"drop_chance": prize_struct[139],
"bonus_critical_chance": prize_struct[175],
"base_weapon_damage": prize_struct[176],
"slots_modifier": prize_struct[173],
"slots_range": [],
"max_ability_rolls_modifier": prize_struct[177],
"max_ability_rolls_range": [],
"added_to_inventory": bool(prize_struct[174]),
}
for i in range(8):
slots_mod = equipment["slots_modifier"] + i - 4
slots = (slots_mod + ((slots_mod >> 31) & 3)) >> 2
if slots < EquipmentSlots.MIN:
slots = EquipmentSlots.MIN.value
elif slots > EquipmentSlots.MAX:
slots = EquipmentSlots.MAX.value
equipment["slots_range"].append(slots)
ab_mod = equipment["max_ability_rolls_modifier"] + i - 4
ab_rolls = (ab_mod + ((ab_mod >> 31) & 7)) >> 3
equipment["max_ability_rolls_range"].append(ab_rolls)
equipment["ability_arrays"] = {}
for c, i in zip(CHARACTERS.values(), range(178, 371, 32)):
equipment["ability_arrays"][c.name] = get_abilities(i)
armored = bool(prize_struct[40] & 0b00000001)
zanmato_level = prize_struct[402]
actions = MONSTER_ACTIONS[monster_id]
if not actions:
actions.update(MONSTER_ACTIONS["generic_actions"])
monster = Monster(
name=monster_name,
stats=stats,
elemental_affinities=elemental_affinities,
status_resistances=status_resistances,
poison_tick_damage=poison_tick_damage,
zanmato_level=zanmato_level,
armored=armored,
undead=undead,
auto_statuses=auto_statuses,
gil=gil,
ap=ap,
item_1=item_1,
item_2=item_2,
steal=steal,
bribe=bribe,
equipment=equipment,
actions=actions,
)
return monster
PRIZE_STRUCTS = _get_prize_structs("tracker\\data\\ffx_mon_data.csv")
if Configs.game_version is GameVersion.HD:
PRIZE_STRUCTS = _patch_prize_structs_for_hd(PRIZE_STRUCTS)
MONSTERS = {k: _get_monster_data(k, v) for k, v in PRIZE_STRUCTS.items()}
|
coderwilson/FFX_TAS_Python
|
tracker/ffx_rng_tracker/data/monsters.py
|
monsters.py
|
py
| 18,996 |
python
|
en
|
code
| 14 |
github-code
|
6
|
764666346
|
# Django初期化
import os, django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
django.setup()
# views.py
from datetime import date
from django.db.models import Count, Q
from app.models import Staff
today = date(2023, 2, 14) # 動作検証用
qs = (
Staff.objects
.values("pk", "name") # group byのキー
.annotate(
delivery_num=Count(
"delivery",
filter=Q(
delivery__date=today,
delivery__receiver__isnull=False,
),
),
unknown_num=Count(
"delivery",
filter=Q(
delivery__date=today,
delivery__receiver__isnull=True,
),
),
)
).values("pk", "name", "delivery_num", "unknown_num")
## クエリーセットを辞書のリストに展開。データの詰め替えは不要
values = list(qs)
# index.html
## todayは各行に持たせず別で渡せばよい
print(today)
for staff in values:
print(staff)
# ----------------------------
# SQL確認
print("### valuesに変更したSQL")
def printsql(query):
from sqlparse import format as sfmt
print(sfmt(str(query), reindent_aligned=True))
printsql(qs.query)
# ----------------------------
print("### SQLを観察してORMを組みなおした改善版")
from django.db.models import FilteredRelation, F
# ORMクエリの実装
qs = (
Staff.objects
.values("pk", "name") # group byのキー
.annotate(
dlist=FilteredRelation("delivery", condition=Q(delivery__date=today)),
delivery_num=Count("dlist__receiver"),
unknown_num=Count(
"dlist",
filter=Q(dlist__receiver__isnull=True),
),
).values("pk", "name", "delivery_num", "unknown_num")
)
printsql(qs.query)
|
shimizukawa/pycon-apac-2023-django-orm-dojo
|
src/try/try2-after.py
|
try2-after.py
|
py
| 1,811 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39443369301
|
def main():
dim = int(input("Enter odd number of rows/columns: "))
while dim % 2 != 1:
dim = int(input("Enter odd number of rows/columns: "))
# create rows
grid = []
for i in range(dim):
row = []
for j in range(dim):
row.append(0)
grid.append(row)
count = dim ** 2
grid[dim // 2][dim // 2] = 1
for i in range(dim // 2 + 1):
# starting index
x = i
y = dim - 1 - i
for j in range(dim - 1 - i * 2):
grid[x][y - j] = count
count -= 1
# x = i
y = i
for j in range(dim - 1 - i * 2):
grid[x + j][y] = count
count -= 1
x = dim - 1 - i
# y = i
for j in range(dim - 1 - i * 2):
grid[x][y + j] = count
count -= 1
# x = dim - 1 = i
y = dim - 1 - i
for j in range(dim - 1 - i * 2):
grid[x - j][y] = count
count -= 1
# print the result
for row in grid:
for i in row:
print('%02d' % i, end=" ")
print()
main()
|
ZeerakA/CS303E
|
challenge_spiral.py
|
challenge_spiral.py
|
py
| 930 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9484548824
|
import pygame
import sys
#define bird class
class Bird(object):
def __init__(self):
self.birdRect = pygame.Rect(65,50,50,50)
self.birdStatus = [pygame.image.load("flappybirdassets/assets/1.png"),
pygame.image.load("flappybirdassets/assets/2.png"),
pygame.image.load("flappybirdassets/assets/dead.png")]
self.status = 0
self.birdX = 120
self.birdY = 350
self.jump = False
self.jumpSpeed = 10
self.gravity = 5
self.dead = False
def birdUpdate(self):
#movement
if self.jump:
self.jumpSpeed -= 1
self.birdY -= self.jumpSpeed
else:
self.gravity += 0.2
self.birdY += self.gravity
self.birdRect[1] = self.birdY
def createMap():
screen.blit(background,(0,0))
#display pine
screen.blit(Pipeline.pineUp,(Pipeline.wallx, -300))
screen.blit(Pipeline.pineUp,(Pipeline.wallx, 500))
Pipeline.PipelineUpdate()
#display bird
if Bird.dead :
Bird.status = 2
elif Bird.jump :
Bird.status = 1
screen.blit(Bird.birdStatus[Bird.status], (Bird.birdX,Bird.birdY))
Bird.birdUpdate()
screen.blit(font.render('Score:'+ str(score),1,(255,255,255)),(100,50))
pygame.display.update()
#define pipeline class
class Pipeline(object):
def __init__(self):
self.wallx = 400
self.pineUp = pygame.image.load("flappybirdassets/assets/top.png")
self.pineDown = pygame.image.load("flappybirdassets/assets/bottom.png")
def PipelineUpdate(self):
#movement
self.wallx -= 5
if self.wallx < -80:
global score
score += 1
self.wallx = 400
def checkDead():
upRect = pygame.Rect(Pipeline.wallx,-300,Pipeline.pineUp.get_width(),Pipeline.pineUp.get_height())
downRect = pygame.Rect(Pipeline.wallx,500,Pipeline.pineDown.get_width(),Pipeline.pineDown.get_height())
if upRect.colliderect(Bird.birdRect) or downRect.colliderect(Bird.birdRect):
Bird.dead = True
if not Bird.birdRect[1] < height:
Bird.dead = True
return True
else:
return False
def getResult():
final_text1 = "GAME OVER"
final_text2 = "Your final score is :" + str(score)
ft1_font = fit1_font = pygame.font.SysFont("Arial",70)
ft1_surf = font.render(final_text1,1,(242,3,36))
ft2_font = fit2_font = pygame.font.SysFont("Arial",50)
ft2_surf = font.render(final_text2,1,(253,177,6))
screen.blit(ft1_surf,[screen.get_width()/2-ft1_surf.get_width()/2,100])
screen.blit(ft2_surf,[screen.get_width()/2-ft2_surf.get_width()/2,200])
pygame.display.update()
if __name__ == '__main__':
pygame.init()
font = pygame.font.SysFont(None,50)
size = width, height = 400,650
screen = pygame.display.set_mode(size) #setting windows sieze
clock = pygame.time.Clock()# setting delay time
color = (255,255,255)
Bird = Bird()
Pipeline = Pipeline()
score = 0
while True:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if (event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.KEYDOWN) and not Bird.dead :
Bird.jump = True
Bird.gravity = 5
Bird.jumpSpeed = 10
# screen.fill(color)
background = pygame.image.load("flappybirdassets/assets/background.png")
if checkDead():
getResult()
else:
createMap()
pygame.quit()
|
hxg10636/flappygame
|
flappybird.py
|
flappybird.py
|
py
| 3,693 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16637468602
|
# Write Text command, sends one or multiple text field
class WriteSpecialFunctions:
code = b"E"
def __init__(self):
self.label = b""
self.data = b""
# Memory configs
self.memory_configs = []
# Checksum?
self.checksum = False
# Each special function has it's own function here
def set_time_of_day(self, hours, minutes):
self.label = b"\x20"
def set_speaker(self, enabled):
self.label = b"\x21"
self.data = b"00" if enabled else b"FF"
def clear_memory(self):
self.label = b"\x24"
self.data = b""
def add_memory_config(self, label, type, ir, size, conf):
self.label = b"\x24"
# Label of file to configure
config = label.encode()
# Type of file to configure
types = { "text": b"\x41", "string": b"\x42", "dots": b"\x43" }
config += types.get(type, "\x41")
# IR Keyboard protection status
key = { "unlocked": b"\x55", "locked": b"\x4C" }
config += key.get(ir, "\x4C")
# Size (in bytes or in image size. WARN: image size is inverted, it's y*x natively)
config += f"{size[1]:02X}{size[0]:02X}".encode() if isinstance(size, tuple) else f"{size:04X}".encode()
# Type-specific config
if type == "text":
config += f"{conf['start']}{conf['stop']}".encode()
elif type == "string":
config += b"0000"
elif type == "dots":
colors = { "monochrome": b"1000", "3color": b"2000", "8color": b"8000" }
config += colors.get(conf, "\x8000")
else:
print("ERROR: unknown type")
self.memory_configs.append(config)
def set_day_of_week(self, day):
self.label = b"\x26"
days = {
"sunday": b"\x31",
"monday": b"\x32",
"tuesday": b"\x33",
"wednesday": b"\x34",
"thurdsay": b"\x35",
"friday": b"\x36",
"saturday": b"\x37",
}
self.data = days.get(day, b"\x32")
def set_time_format(self, format):
self.label = b"\x27"
self.data = "M" if format == "military" else "S"
def generate_tone(self, type, freq=0, duration=5, repeat=0):
self.label = b"\x28"
self.data = type
if type == b"\x32":
self.data += f"{freq:02X}{duration:1X}{repeat:1X}".encode()
def set_run_time_table(self, label, start, stop):
self.label = b"\x29"
self.data = label + start + stop
def display_text_at_xy(self, enabled, x, y, text):
self.label = b"\x2B"
status = "\x2B" if enabled else "\x2D"
file = "\x2B" # Apparently mandatory?
self.data = f"{status}{file}{x:02}{y:02}{text}".encode()
def soft_reset(self):
self.label = b"\x2C"
def set_run_sequence(self, sequence):
self.label = b"\x2E"
self.data = sequence
def set_dimming_reg(self, dim, brightness):
self.label = b"\x2F"
# Get index level closest to brightness
level = min([100, 86, 72, 58, 44], key=lambda x:abs(x-brightness))
index = [100, 86, 72, 58, 44].index(level)
self.data = f"{dim:02X}{index:02}".encode()
def set_dimming_time(self, start, stop):
self.label = b"\x2F"
self.data = f"{start:02X}{stop:02X}".encode()
def to_bytes(self):
# Do not return anything if there's no command
if not self.label:
return
# This one is simple: label (function) and it's data (parameters)
if self.label == "\x24" and self.memory_configs:
bytes = self.label + self.data
else:
bytes = self.label
for config in self.memory_configs:
bytes += config
return bytes
|
prototux/python-alphasign
|
alphasign/command/write_special_functions.py
|
write_special_functions.py
|
py
| 3,829 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12464441939
|
import argparse
import os
import shutil
import socket
import time
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torchvision.utils as vutils
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
from torch.autograd import Variable
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from data import MyImageFolder
from model import UnetGenerator
from model import RevealNet
from text_data import *
from utils import *
import nltk
DATA_DIR = '/media/changmin/mini_hard/ImageNet/'
TEXT_DATA_DIR = "/home/changmin/research/steganography/data/"
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default="train",
help='train | val | test')
parser.add_argument('--workers', type=int, default=8,
help='number of data loading workers')
parser.add_argument('--batchsize', type=int, default=4,
help='input batch size')
parser.add_argument('--imagesize', type=int, default=256,
help='the number of frames')
parser.add_argument('--epochs', type=int, default=100,
help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate, default=0.001')
parser.add_argument('--decay_round', type=int, default=10,
help='learning rate decay 0.5 each decay_round')
parser.add_argument('--beta1', type=float, default=0.5,
help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', type=bool, default=True,
help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1,
help='number of GPUs to use')
parser.add_argument('--Hnet', default='',
help="path to Hidingnet (to continue training)")
parser.add_argument('--Rnet', default='',
help="path to Revealnet (to continue training)")
parser.add_argument('--embedding', default='',
help="path to embedding (to continue training)")
parser.add_argument('--trainpics', default='./training/',
help='folder to output training images')
parser.add_argument('--validationpics', default='./training/',
help='folder to output validation images')
parser.add_argument('--testpics', default='./training/',
help='folder to output test images')
parser.add_argument('--outckpts', default='./training/',
help='folder to output checkpoints')
parser.add_argument('--traintexts', default='./training/',
help='folder to output training texts')
parser.add_argument('--outlogs', default='./training/',
help='folder to output images')
parser.add_argument('--outcodes', default='./training/',
help='folder to save the experiment codes')
parser.add_argument('--beta', type=float, default=0.01,
help='hyper parameter of beta')
parser.add_argument('--remark', default='', help='comment')
parser.add_argument('--test', default='', help='test mode, you need give the test pics dirs in this param')
parser.add_argument('--hostname', default=socket.gethostname(), help='the host name of the running server')
parser.add_argument('--debug', type=bool, default=False, help='debug mode do not create folders')
parser.add_argument('--logfrequency', type=int, default=10, help='the frequency of print the log on the console')
parser.add_argument('--resultpicfrequency', type=int, default=100, help='the frequency of save the resultpic')
parser.add_argument('--savefrequency', type=int, default=1000, help='the frequency of save the checkpoint')
def main():
global writer, smallestLoss, optimizerH, optimizerR, schedulerH, schedulerR
opt = parser.parse_args()
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, "
"so you should porbably run with --cuda")
cudnn.benchmark = True
create_dir_to_save_result(opt)
logpath = opt.outlogs + '%s_%d_log.txt' % (opt.dataset, opt.batchsize)
print_log(opt, str(opt), logpath)
save_current_codes(opt.outcodes)
if opt.test == '':
voc, _ = loadPrepareData(None, "all", os.path.join(TEXT_DATA_DIR, "dialogues_text.txt"), 768)
# tensorboardX writer
writer = SummaryWriter(comment='**' + opt.remark)
# Get the dataset
#traindir = os.path.join(DATA_DIR, 'train')
texttraindir = os.path.join(TEXT_DATA_DIR, "train/dialogues_train.txt")
valdir = os.path.join(DATA_DIR, 'val')
textvaldir = os.path.join(TEXT_DATA_DIR, "validation/dialogues_validation.txt")
"""
train_dataset = MyImageFolder(
traindir, # Preprocessing the data
transforms.Compose([
transforms.Resize([opt.imagesize, opt.imagesize]), # Randomly cut and resize the data to a given size
transforms.ToTensor(),
# Convert a numpy.ndarray with a value range of [0,255] or a shape of (H,W,C) to
# a torch.FloatTensor with a shape of [C,H,W] and a value of [0, 1.0] torch.FloatTensor
]),
True)
"""
_, text_train_dataset = loadPrepareData(None, "train", texttraindir, 768)
val_dataset = MyImageFolder(
valdir, # Preprocessing the data
transforms.Compose([ # Combine several transforms together
transforms.Resize([opt.imagesize, opt.imagesize]), # Randomly cut and resize the data to a given size
transforms.ToTensor(),
# Convert a numpy.ndarray with a value range of [0, 255] or a shpae of (H,W,C) to
# a torch.FloatTensor with a shape of [C,H,W] and a value of [0, 1.0] torch.FloatTensor
]))
_, text_val_dataset = loadPrepareData(None, "val", textvaldir, 768)
#assert train_dataset
assert val_dataset
assert text_train_dataset
assert text_val_dataset
else:
testdir = opt.test
texttestdir = os.path.join(TEXT_DATA_DIR, "test/dialogues_test.txt")
test_dataset = MyImageFolder(
testdir, # Preprocessing the data
transforms.Compose([ # Combine several transfroms together
transforms.Resize([opt.imagesize, opt.imagesize]),
transforms.ToTensor(),
]))
_, text_test_dataset = loadPrepareData(None, "test", texttestdir, 768)
assert test_dataset
assert text_test_dataset
# Create word embedding layer
embedding = nn.Embedding(voc.num_words, 256)
embedding.cuda()
embedding.weight.data.uniform_(-1, 1)
if opt.embedding != '':
embedding.load_state_dict(torch.load(opt.embedding))
if opt.ngpu > 1:
embedding = torch.nn.DataParallel(embedding).cuda()
# Create Hiding network objects
Hnet = UnetGenerator(input_nc=6, output_nc=3, num_downs=7, output_function=nn.Sigmoid)
Hnet.cuda()
Hnet.apply(weights_init)
# Determine whether to continue the previous training
if opt.Hnet != "":
Hnet.load_state_dict(torch.load(opt.Hnet))
if opt.ngpu > 1:
Hnet = torch.nn.DataParallel(Hnet).cuda()
print_network(opt, Hnet, logpath)
# Create Reveal network objects
Rnet = RevealNet(output_function=nn.Sigmoid)
Rnet.cuda()
Rnet.apply(weights_init)
if opt.Rnet != '':
Rnet.load_state_dict(torch.load(opt.Rnet))
if opt.ngpu > 1:
Rnet = torch.nn.DataParallel(Rnet).cuda()
print_network(opt, Rnet, logpath)
# LogSoftmax
logsoftmax = nn.LogSoftmax(dim=-1).cuda()
# Mean Square Error loss
criterion = nn.MSELoss().cuda()
# training mode
if opt.test == '':
# setup optimizer
optimizerH = optim.Adam(Hnet.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
schedulerH = ReduceLROnPlateau(optimizerH, mode='min', factor=0.2, patience=5, verbose=True)
optimizerR = optim.Adam(Rnet.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
schedulerR = ReduceLROnPlateau(optimizerR, mode='min', factor=0.2, patience=8, verbose=True)
#train_loader = DataLoader(train_dataset, batch_size=opt.batchsize,
# shuffle=True, num_workers=int(opt.workers))
val_loader = DataLoader(val_dataset, batch_size=opt.batchsize,
shuffle=True, num_workers=int(opt.workers))
smallestLoss = 10000
print_log(opt, "-------------------Starts Training----------------------", logpath)
for epoch in range(opt.epochs):
# train
train(opt, val_loader, epoch, voc, embedding, text_train_dataset, Hnet=Hnet, Rnet=Rnet,
criterion=criterion, logsoftmax=logsoftmax, logpath=logpath)
# validation
val_hloss, val_rloss, val_sumloss = validation(opt, val_loader, epoch, voc, text_val_dataset,
Hnet=Hnet, Rnet=Rnet, criterion=criterion, logsoftmax=logsoftmax, logpath=logpath)
# adjust learning rate
schedulerH.step(val_sumloss)
schedulerR.step(val_rloss)
# save the best model parameters
if val_sumloss < globals()["smallestLoss"]:
globals()["smallestLoss"] = val_sumloss
# do check pointing
torch.save(Hnet.state_dict(),
'%s/netH_epoch_%d,sumloss=%.f,Hloss=%.6f.pth' % (
opt.outckpts, epoch, val_sumloss, val_hloss))
torch.save(Rnet.state_dict(),
'%s/netR_epoch_%d,sumloss=%.6f,Rloss=%.6f.pth' % (
opt.outckpts, epoch, val_sumloss, val_rloss))
writer.close()
# test mode
else:
test_loader = DataLoader(test_dataset, batch_size=opt.batchsize,
shuffle=False, num_workers=int(opt.workers))
test(opt, test_loader, 0, Hnet=Hnet, Rnet=Rnet, criterion=criterion, logpath=logpath)
print("-------------------Test is completed-------------------")
def train(opt, train_loader, epoch, voc, embedding, text_train_dataset, Hnet, Rnet, criterion, logsoftmax, logpath):
batch_time = AverageMeter()
data_time = AverageMeter()
Hlosses = AverageMeter() # record the loss of each epoch Hnet
Rlosses = AverageMeter() # record the loss of each epoch Rnet
SumLosses = AverageMeter() # record the each epoch Hloss + β*Rloss
# switch to train mode
Hnet.train()
Rnet.train()
start_time = time.time()
for i, data in enumerate(train_loader, 0):
data_time.update(time.time() - start_time)
Hnet.zero_grad()
Rnet.zero_grad()
all_pics = data # all pics contains coverImg and secretImg, no label needed
this_batch_size = int(all_pics.size()[0])
#--------------------------------------------------------------------------------------------------------------------------------
# The first half of the picture is used as coverImg, and the second half of the picture is used as secretImg
#cover_img = all_pics[0:this_batch_size, :, :, :] # batch_size, 3, ,256, 256
cover_img = all_pics
#--------------------------------------------------------------------------------------------------------------------------------
# should change secret_img -> secret_text and secret_text has the same size with cover_img
#secret_img = all_pics[this_batch_size:this_batch_size * 2, :, :, :]
text_batches = batch2TrainData(voc, [random.choice(text_train_dataset) for _ in range(this_batch_size)])
secret_text, text_lengths, target_text, mask, max_target_len = text_batches
org_text = secret_text
secret_text = secret_text.cuda()
secret_text = embedding(secret_text)
secret_text = secret_text.view(this_batch_size, 3, 256, 256)
#--------------------------------------------------------------------------------------------------------------------------------
# Concat the pictures together to get six-channel pictures as input to the Hnet
concat_img_text = torch.cat([cover_img.cuda(), secret_text], dim=1)
# Data into gpu
if opt.cuda:
cover_img = cover_img.cuda()
#secret_img = secret_img.cuda()
#concat_img = concat_img.cuda()
secret_text = secret_text.cuda()
concat_img_text = concat_img_text.cuda()
#concat_imgv = Variable(concat_img) # concat_img as input to the Hnet
concat_img_textv = Variable(concat_img_text)
cover_imgv = Variable(cover_img) # cover_img as label of Hnet
#container_img = Hnet(concat_imgv) # Get container_img with secret_img
container_img = Hnet(concat_img_textv)
errH = criterion(container_img, cover_imgv) # Hnet reconstruction error
Hlosses.update(errH, this_batch_size) # record H_loss value
rev_secret_img = Rnet(container_img) # container_img is used as input to the Rnet to get rev_secret_img
#import pdb
#pdb.set_trace()
#secret_imgv = Variable(secret_img) # secret_img as the label of the Rnet
secret_textv = Variable(secret_text)
#errR = criterion(rev_secret_img, secret_imgv) # Rnet reconstruction error
#errR = criterion(rev_secret_img, secret_textv)
#-----------------------------------------------------------------------------------------------------------------------------
#import pdb
#pdb.set_trace()
rec_text = rev_secret_img.view(this_batch_size, 768, 256)
rec_text_norm = normalizing(rec_text, 2)
W_norm = normalizing(embedding.weight, 1)
text_org = org_text.view(-1)
prob_logits = torch.tensordot(torch.squeeze(rec_text_norm), W_norm, dims=[[2], [1]])
prob = logsoftmax(prob_logits * 100)
rec_sent = torch.squeeze(torch.argmax(prob, dim=2))
prob = prob.view(-1, voc.num_words)
idx = torch.arange(this_batch_size * 768)
all_idx = torch.t(torch.stack([idx, text_org]))
all_prob = gather_nd(prob, all_idx)
gen_temp = rec_sent.view(-1)
gen_idx = torch.t(torch.stack([idx, gen_temp.cpu()]))
gen_prob = gather_nd(prob, gen_idx)
errR = -torch.mean(all_prob)
#-----------------------------------------------------------------------------------------------------------------------------
Rlosses.update(errR, this_batch_size) # record R_loss value
betaerrR_secret = opt.beta * errR
err_sum = errH + betaerrR_secret
SumLosses.update(err_sum, this_batch_size)
# Calculate the gradient
err_sum.backward()
# Optimize the parameters of both networks
optimizerH.step()
optimizerR.step()
# Update the time of a batch
batch_time.update(time.time() - start_time)
start_time = time.time()
# log information
log = '[%d/%d][%d/%d]\tLoss_H: %.4f Loss_R: %.4f Loss_sum: %.4f \tdatatime: %.4f \tbatchtime: %.4f' % (
epoch, opt.epochs, i, len(train_loader),
Hlosses.val, Rlosses.val, SumLosses.val, data_time.val, batch_time.val)
# print log information
if i % opt.logfrequency == 0:
print_log(opt, log, logpath)
else:
print_log(opt, log, logpath, console=False)
# Related operations such as storing records
# Generate a picture in 100 steps
if epoch % 1 == 0 and i % opt.resultpicfrequency == 0:
APD = save_result_pic(opt, this_batch_size, cover_img, container_img.data, epoch, i, opt.trainpics)
save_text_path = opt.traintexts + '/ResultTexts_epoch%03d_batch%04d.txt' % (epoch, i)
#import pdb
#pdb.set_trace()
avg_bleu = 0
with open(save_text_path, 'a') as text_file:
for b in range(this_batch_size):
ori = [voc.index2word[x] for x in org_text[b].tolist() if x != 0]
recon = [voc.index2word[x] for x in rec_sent[b].tolist() if x != 0]
original_text = "{}_Original :".format(b) + " ".join([voc.index2word[x] for x in org_text[b].tolist() if x != 0])
recons_text = "{}_Reconstructed:".format(b) + " ".join([voc.index2word[x] for x in rec_sent[b].tolist() if x != 0])
text_file.write(original_text + "\n")
text_file.write(recons_text + "\n")
bleu_score = nltk.translate.bleu_score.sentence_bleu([ori], recon)
text_file.write(str(bleu_score) + "\n")
avg_bleu += bleu_score
apd_text = "APD: {}".format(APD) + "\n"
text_file.write(apd_text)
avg_bleu = avg_bleu / float(this_batch_size)
print("Original :" + " ".join([voc.index2word[x] for x in org_text[0].tolist() if x != 0]))
print()
print("Reconstructed:" + " ".join([voc.index2word[x] for x in rec_sent[0].tolist() if x != 0]))
print("Bleu score :{}".format(avg_bleu))
if i % opt.savefrequency == 0 and i != 0:
torch.save({
'epoch': epoch,
'iteration': i,
'Hnet': Hnet.state_dict(),
'Rnet': Rnet.state_dict(),
'optimizerH': optimizerH.state_dict(),
'optimizerR': optimizerR.state_dict(),
'sum_loss': err_sum,
'H_loss': errH,
'R_loss': errR,
'voc_dict': voc.__dict__,
'embedding': embedding.state_dict()
}, opt.outckpts + '/{}_{}_{}.tar'.format(epoch, i, 'checkpoint'))
# Time taken to output an epoch
"""
epoch_log = "one epoch time is %.4f==================================================" % (
batch_time.sum) + "\n"
epoch_log = epoch_log + "epoch learning rate: optimizerH_lr = %.8f optimizerR_lr = %.8f" % (
Hlosses.avg, Rlosses.avg, SumLosses.avg)
print_log(opt, epoch_log, logpath)
"""
if not opt.debug:
# record learning rate
writer.add_scalar("lr/H_lr", optimizerH.param_groups[0]['lr'], epoch)
writer.add_scalar("lr/R_lr", optimizerR.param_groups[0]['lr'], epoch)
writer.add_scalar("lr/beta", opt.beta, epoch)
# Every epoch records an average loss on tensorboard display
writer.add_scalar("train/R_loss", Rlosses.avg, epoch)
writer.add_scalar("train/H_loss", Hlosses.avg, epoch)
writer.add_scalar("train/sum_loss", SumLosses.avg, epoch)
def validation(opt, val_loader, epoch, voc, text_val_dataset, Hnet, Rnet, criterion, logsoftmax, logpath):
print("--------------------------------------------------validation begin--------------------------------------------------")
start_time = time.time()
Hnet.eval()
Rnet.eval()
Hlosses = AverageMeter() # record the loss of each epoch Hnet
Rlosses = AverageMeter() # record the loss of each epoch Rnet
count = 0
for i, data in enumerate(val_loader, 0):
if count >= 100:
break
Hnet.zero_grad()
Rnet.zero_grad()
with torch.no_grad():
all_pics = data # allpics contains coverImg and secretImg, no label needed
this_batch_size = int(all_pics.size()[0]) # Processing the last batch of each epoch may be insufficient for opt.batchsize
# The first half of the picture is used as coverImg, and the second half of the picture is used as secretImg
cover_img = all_pics # batchsize, 3, 256, 256
#secret_img = all_pics[this_batch_size:this_batch_size * 2, :, :, :]
text_batches = batch2TrainData(voc, [random.choice(text_val_dataset) for _ in range(this_batch_size)])
secret_text, text_lengths, target_text, mask, max_target_len = text_batches
org_text = secret_text
secret_text = secret_text.cuda()
secret_text = embedding(secret_text)
secret_text = secret_text.view(this_batch_size, 3, 256, 256)
# Concat the pictures together to get six-channel pictures as input to the Hnet
#concat_img = torch.cat([cover_img, secret_img], dim=1)
concat_img_text = torch.cat([cover_img.cuda(), secret_text], dim=1)
# Data into gpu
if opt.cuda:
cover_img = cover_img.cuda()
#secret_img = secret_img.cuda()
#concat_img = concat_img.cuda()
concat_img_text = concat_img_text.cuda()
#concat_imgv = Variable(concat_img) # concat_img as input to the Hnet
concat_img_textv = Variable(concat_img_text)
cover_imgv = Variable(cover_img) # cover_img as label of Hnet
container_img = Hnet(concat_img_textv) # Get container_img with secret_img
errH = criterion(container_img, cover_imgv) # Hnet reconstruction error
Hlosses.update(errH, this_batch_size) # record H_loss value
rev_secret_img = Rnet(container_img) # container_img is used as input to the Rnet to get rev_secret_img
secret_textv = Variable(secret_text)
rec_text = rev_secret_img.view(this_batch_size, 768, 256)
rec_text_norm = normalizing(rec_text, 2)
W_norm = normalizing(embedding.weight, 1)
text_org = org_text.view(-1)
prob_logits = torch.tensordot(torch.squeeze(rec_text_norm), W_norm, dims=[[2], [1]])
prob = logsoftmax(prob_logits * 100)
prob = prob.view(-1, voc.num_words)
idx = torch.arange(this_batch_size * 768)
all_idx = torch.t(torch.stack([idx, text_org]))
all_prob = gather_nd(prob, all_idx)
errR = -torch.mean(all_prob) # Rnet reconstruction error
Rlosses.update(errR, this_batch_size) # record R_loss value
"""
if i % 50 == 0:
save_result_pic(opt, this_batch_size, cover_img, container_img.data, secret_img, rev_secret_img.data, epoch, i,
opt.validationpics)
"""
count += 1
val_hloss = Hlosses.avg
val_rloss = Rlosses.avg
val_sumloss = val_hloss + opt.beta * val_rloss
val_time = time.time() - start_time
val_log = "validation[%d] val_Hloss = %.6f\t val_Rloss = %.6f\t val_Sumloss = %.6f\t validation time=%.2f" % (
epoch, val_hloss, val_rloss, val_sumloss, val_time)
print_log(opt, val_log, logpath)
if not opt.debug:
writer.add_scalar('validation/H_loss_avg', Hlosses.avg, epoch)
writer.add_scalar('validation/R_loss_avg', Rlosses.avg, epoch)
writer.add_scalar('validation/sum_loss_avg', val_sumloss, epoch)
print("--------------------------------------------------validation end--------------------------------------------------")
return val_hloss, val_rloss, val_sumloss
def test(opt, test_loader, epoch, Hnet, Rnet, criterion, logpath):
print("--------------------------------------------------test begin--------------------------------------------------")
start_time = time.time()
Hnet.eval()
Rnet.eval()
Hlosses = AverageMeter() # to record the Hloss in one epoch
Rlosses = AverageMeter() # to record the Rloss in one epoch
for i, data in enumerate(test_loader, 0):
Hnet.zero_grad()
Rnet.zero_grad()
with torch.no_grad():
all_pics = data # all_pics contain cover_img and secret_img, label is not needed
this_batch_size = int(all_pics.size()[0] / 2) # in order to handle the final batch which may not have opt.size
# half of the front is as cover_img, half of the end is as secret_img
cover_img = all_pics[0:this_batch_size, :, :, :] # batchsize,3,256,256
secret_img = all_pics[this_batch_size:this_batch_size * 2, :, :, :]
# concat cover and original secret get the concat_img with channels
concat_img = torch.cat([cover_img, secret_img], dim=1)
# data into gpu
if opt.cuda:
cover_img = cover_img.cuda()
secret_img = secret_img.cuda()
concat_img = concat_img.cuda()
concat_imgv = Variable(concat_img) # concat_img is the input of Hnet
cover_imgv = Variable(cover_img) # Hnet reconstruction error
container_img = Hnet(concat_imgv) # concat_img as the input of Hnet and get the container_img
errH = criterion(container_img, cover_imgv) # Hnet reconstruction error
Hlosses.update(errH, this_batch_size) # record the H loss value
rev_secret_img = Rnet(container_img) # container_img is the input of the Rnet and get the output "rev_secret_img"
secret_imgv = Variable(secret_img) # secret_imgv is the label of Rnet
errR = criterion(rev_secret_img, secret_imgv) # Rnet reconstructed error
Rlosses.update(errR, this_batch_size) # record the R_loss value
save_result_pic(opt, this_batch_size, cover_img, container_img.data, secret_img, rev_secret_img.data, epoch, i,
opt.testpics)
val_hloss = Hlosses.avg
val_rloss = Rlosses.avg
val_sumloss = val_hloss + opt.beta * val_rloss
val_time = time.time() - start_time
val_log = "validation[%d] val_Hloss = %.6f\t val_Rloss = %.6f\t val_Sumloss = %.6f\t validation time=%.2f" % (
epoch, val_hloss, val_rloss, val_sumloss, val_time)
print_log(opt, val_log, logpath)
print("--------------------------------------------------test end--------------------------------------------------")
return val_hloss, val_rloss, val_sumloss
if __name__ == '__main__':
main()
|
changminL/stegano
|
main.py
|
main.py
|
py
| 26,162 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14539725898
|
# class Solution: # 作弊解法
# def permute(self, nums):
# import itertools
# return list(itertools.permutations(nums))
class Solution:
def permute(self, nums):
res = []
if len(nums) == 1: # 结束条件
return [nums]
if len(nums) == 2: # 结束条件
return [nums, nums[::-1]]
for i in range(len(nums)):
num = nums[i]
newnums = nums[:i] + nums[i+1:]
for item in self.permute(newnums): # 递归调用
res.append([num] + item)
return res
if __name__ == '__main__':
s = Solution()
ans = s.permute([1,2,3,5,6,8])
print(ans)
|
Rainphix/LeetCode
|
046_permutations.py
|
046_permutations.py
|
py
| 682 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34472580214
|
import numpy as np
import matplotlib.pyplot as plt
def optimum_cf (gamma, P_exit_pa, mean_Pc_pa, ):
cf = lambda p2_p3, p3_p1: np.sqrt((2 * (gamma ** 2) / (gamma - 1)) * ((2 / (gamma + 1)) ** ((gamma + 1) / (gamma - 1))) * (1 - (p2_p3 * p3_p1) ** ((gamma - 1) / gamma))) + (p2_p3 * p3_p1 - p3_p1) \
/ ((((gamma + 1) / 2) ** (1 / (gamma - 1))) * ((p2_p3 * p3_p1) ** (1 / gamma)) * np.sqrt(((gamma + 1) / (gamma - 1)) * (1 - ((p2_p3 * p3_p1) ** ((gamma - 1) / gamma)))))
# pe_pa = np.linspace(0.1, 3, 1000000)
#
# plt.plot(pe_pa, [cf(i, P_exit_pa / mean_Pc_pa) for i in pe_pa], 1, cf(1, P_exit_pa / mean_Pc_pa), '*')
# plt.legend(['Thrust coefficient', 'Maximum Thrust Coefficient'])
# plt.xlabel('Pe/Pa')
# plt.ylabel(r'$C_{f}$')
# plt.title('Thrust coefficient VS Exit to ambient pressure ratio')
# plt.show()
#
# max_cf = cf(1,P_exit_pa / mean_Pc_pa)
Cf = cf()
return cf
|
rescolarandres/Coding_venture_projects
|
Rocket Nozzle Optimization in Python/optimum_cf.py
|
optimum_cf.py
|
py
| 972 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70101586109
|
from SlackClient import SlackClient
class SlackFiles(SlackClient):
def __init__(self):
self.file = None
self.id = None
self.count = None
self.cursor = None
self.limit = None
self.page = None
self.channel = None
self.show_files_hidden_by_limit = None
self.ts_from = None
self.ts_to = None
self.types = None
self.user = None
self.content = None
self.filename = None
self.filetype = None
self.initial_comment = None
self.thread_ts = None
self.title = None
self.channels = []
self.external_id = None
self.external_url = None
self.filetype = None
self.indexable_file_contents = None
self.preview_image = None
self.highlight = None
self.sort = None
self.sort_dir = None
self.team_id = None
def generate_queries(self):
body = {}
if self.highlight != None:
body['highlight'] = self.highlight
if self.sort != None:
body['sort'] = self.sort
if self.sort_dir != None:
body['sort_dir'] = self.sort_dir
if self.team_id != None:
body['team_id'] = self.team_id
if self.external_id != None:
body['external_id'] = self.external_id
if self.external_url != None:
body['external_url'] = self.external_url
if self.title != None:
body['title'] = self.title
if self.indexable_file_contents != None:
body['indexable_file_contents'] = self.indexable_file_contents
if self.preview_image != None:
body['preview_image'] = self.preview_image
if self.filename != None:
body['filename'] = self.filename
if self.filetype != None:
body['filetype'] = self.filetype
if self.initial_comment != None:
body['initial_comment'] = self.initial_comment
if self.tread_ts != None:
body['thread_ts'] = self.thread_ts
if self.title != None:
body['title'] = self.title
if len(self.channels) > 0:
body['channels'] = ','.join(self.channels)
if self.content != None:
body['content'] = self.content
if self.channel != None:
body['channel'] = self.channel
if self.show_files_hidden_by_limit != None:
body['show_files_hidden_by_limit'] = self.show_files_hidden_by_limit
if self.ts_from != None:
body['ts_from'] = self.ts_from
if self.ts_to != None:
body['ts_to'] = self.ts_to
if self.types != None:
body['types'] = self.types
if self.user != None:
body['user'] = self.user
if self.file != None:
body['file'] = self.file
if self.id != None:
body['id'] = self.id
if self.count != None:
body['count'] = self.count
if self.cursor != None:
body['cursor'] = self.cursor
if self.limit != None:
body['limit'] = self.limit
if self.page != None:
body['page'] = self.page
return body
def clear_queries(self):
self.file = None
self.id = None
self.count = None
self.cursor = None
self.limit = None
self.page = None
self.channel = None
self.show_files_hidden_by_limit = None
self.ts_from = None
self.ts_to = None
self.types = None
self.user = None
self.channels = []
self.content = None
self.file = None
self.filename = None
self.filetype = None
self.initial_comment = None
self.thread_ts = None
self.title = None
self.external_id = None
self.external_url = None
self.indexable_file_contents = None
self.preview_image = None
self.highlight = None
self.sort = None
self.sort_dir = None
self.team_id = None
|
cthacker-udel/Python-Slack-API
|
SlackFiles.py
|
SlackFiles.py
|
py
| 4,086 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25068498925
|
from uuid import uuid4
from typing import Tuple, List
from asendia_us_lib.shipping_request import ShippingRequest, Item
from asendia_us_lib.shipping_response import PackageLabel
from purplship.core.units import CustomsInfo, Packages, Options, Weight
from purplship.core.utils import Serializable, DP
from purplship.core.models import (
Documents,
ShipmentRequest,
ShipmentDetails,
Message,
Customs,
)
from purplship.providers.asendia_us.units import (
Service,
Option,
LabelType,
ProcessingLocation,
)
from purplship.providers.asendia_us.error import parse_error_response
from purplship.providers.asendia_us.utils import Settings
def parse_shipment_response(
responses: Tuple[str, dict], settings: Settings
) -> Tuple[ShipmentDetails, List[Message]]:
_, response = responses
errors = parse_error_response(response, settings)
details = (
_extract_details(responses, settings)
if response.get("packageLabel") is not None
else None
)
return details, errors
def _extract_details(response: Tuple[str, dict], settings: Settings) -> ShipmentDetails:
label, details = response
shipment = DP.to_object(PackageLabel, details)
return ShipmentDetails(
carrier_name=settings.carrier_name,
carrier_id=settings.carrier_id,
tracking_number=shipment.trackingNumber,
shipment_identifier=shipment.packageId,
docs=Documents(label=label),
)
def shipment_request(
payload: ShipmentRequest, settings: Settings
) -> Serializable[ShippingRequest]:
package = Packages(payload.parcels).single
options = Options(payload.options, Option)
product_code = Service.map(payload.service).value_or_key
unique_id = getattr(payload, "id", uuid4().hex)
customs = CustomsInfo(payload.customs or Customs(commodities=[]))
request = ShippingRequest(
accountNumber=settings.account_number,
subAccountNumber=options.asendia_sub_account_number,
processingLocation=ProcessingLocation.map(
options.asendia_processing_location or "SFO"
).name,
includeRate=True,
labelType=LabelType.map(payload.label_type or "PDF").name_or_key,
orderNumber=unique_id,
dispatchNumber=unique_id,
packageID=unique_id,
recipientTaxID=payload.recipient.state_tax_id,
returnFirstName=payload.shipper.person_name,
returnLastName=payload.shipper.person_name,
returnCompanyName=payload.shipper.company_name,
returnAddressLine1=payload.shipper.address_line1,
returnAddressLine2=payload.shipper.address_line2,
returnAddressLine3=None,
returnProvince=payload.shipper.state_code,
returnPostalCode=payload.shipper.postal_code,
returnCountryCode=payload.shipper.country_code,
returnPhone=payload.shipper.phone_number,
returnEmail=payload.shipper.email,
recipientFirstName=payload.recipient.person_name,
recipientLastName=payload.recipient.person_name,
recipientBusinessName=payload.recipient.company_name,
recipientAddressLine1=payload.recipient.address_line1,
recipientAddressLine2=payload.recipient.address_line2,
recipientAddressLine3=None,
recipientCity=payload.recipient.city,
recipientProvince=payload.recipient.state_code,
recipientPostalCode=payload.recipient.postal_code,
recipientPhone=payload.recipient.phone_number,
recipientEmail=payload.recipient.email,
totalPackageWeight=package.weight.value,
weightUnit=package.weight_unit.value.lower(),
dimLength=package.length.value,
dimWidth=package.width.value,
dimHeight=package.height.value,
dimUnit=package.dimension_unit.value,
totalPackageValue=options.declared_value,
currencyType=options.currency,
productCode=product_code,
customerReferenceNumber1=payload.reference,
customerReferenceNumber2=None,
customerReferenceNumber3=None,
contentType=("D" if package.parcel.is_document else "M"),
packageContentDescription=package.parcel.description,
vatNumber=None,
sellerName=payload.shipper.person_name,
sellerAddressLine1=payload.shipper.address_line1,
sellerAddressLine2=payload.shipper.address_line2,
sellerAddressLine3=None,
sellerProvince=payload.shipper.state_code,
sellerPostalCode=payload.shipper.postal_code,
sellerPhone=payload.shipper.phone_number,
sellerEmail=payload.shipper.email,
items=[
Item(
sku=item.sku,
itemDescription=item.description,
unitPrice=item.value_amount,
quantity=item.quantity,
unitWeight=Weight(item.weight, package.weight_unit).value,
countryOfOrigin=item.origin_country,
htsNumber=None,
)
for item in customs.commodities
],
)
return Serializable(request)
|
danh91/purplship
|
sdk/extensions/asendia_us/purplship/providers/asendia_us/shipment/create.py
|
create.py
|
py
| 5,068 |
python
|
en
|
code
| null |
github-code
|
6
|
10414559833
|
import collections
from typing import Any, List
import torch
from executorch.exir.dialects.edge.arg.model import BaseArg
from executorch.exir.dialects.edge.arg.type import ArgType
def extract_return_dtype(
returns: Any, sample_returns: List[BaseArg]
) -> List[torch.dtype]:
"""Extract the dtype from a return value."""
if not isinstance(returns, collections.abc.Sequence):
returns = [returns]
result = []
for ret, sample in zip(returns, sample_returns):
if sample.type == ArgType.TensorList or sample.type == ArgType.TensorOptList:
# Assuming all tensors in tensor list has the same dtype, and we only add 1 dtype to result.
assert (
ret is not None
), f"Expecting non-None return value for {sample} but got None"
result.append(ret.dtype)
break
elif sample.type == ArgType.Tensor or sample.type == ArgType.TensorOpt:
assert (
ret is not None
), f"Expecting non-None return value for {sample} but got None"
result.append(ret.dtype)
return result
|
pytorch/executorch
|
exir/dialects/edge/dtype/utils.py
|
utils.py
|
py
| 1,125 |
python
|
en
|
code
| 479 |
github-code
|
6
|
19325547874
|
from pandas import read_csv
from sklearn.metrics import mean_absolute_percentage_error
from math import sqrt
from matplotlib import pyplot as plt
from pandas import concat
import numpy as np
import scipy.stats as stats
import pandas as pd
def persistence_one_step_ln(train_log, teste_log,
show_results=False, plot_result=False):
# Prepare data
teste_log = teste_log.values
train_log = [x for x in train_log]
# Walk-forward validation
predictions = list()
for i in range(len(teste_log)):
# Predict
yhat = train_log[-1]
# Store forecast in list of predictions
predictions.append(yhat)
# Add actual observation to train for the next loop
obs = teste_log[i]
train_log.append(obs)
if show_results:
print('>Predicted=%.3f, Expected=%.3f' % (yhat, obs))
# Report performance
mape = mean_absolute_percentage_error(np.exp(teste_log), np.exp(predictions))
print('MAPE: %.3f' % mape)
# Plot predicted vs expected values
if plot_result:
plt.plot(np.exp(teste_log))
plt.plot(np.exp(predictions), color='red')
plt.show()
# Load data
train_log = pd.read_csv('../timeserie_log_train.csv',
header=0, index_col=0, parse_dates=True, squeeze=True)
teste_log = pd.read_csv('../timeserie_log_test.csv',
header=0, index_col=0, parse_dates=True, squeeze=True)
persistence_one_step_ln(train_log, teste_log, plot_result=True)
|
gsilva49/timeseries
|
H/python_code/persistence_one_step_ln.py
|
persistence_one_step_ln.py
|
py
| 1,509 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73859053626
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objects as go
import pandas as pd
import plotly.express as px
# Read data from a csv
z_data = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/api_docs/mt_bruno_elevation.csv')
fig = go.Figure(data=[go.Surface(z=z_data.values)])
fig.update_traces(contours_z=dict(show=True, usecolormap=True,
highlightcolor="limegreen", project_z=True))
fig.update_layout(title='Mt Bruno Elevation', autosize=False,
scene_camera_eye=dict(x=1.87, y=0.88, z=-0.64),
width=500, height=500,
margin=dict(l=65, r=50, b=65, t=90))
cont = go.Figure(go.Surface(
contours = {
"x": {"show": True, "start": 1.5, "end": 2, "size": 0.04, "color":"white"},
"z": {"show": True, "start": 0.5, "end": 0.8, "size": 0.05}
},
x = [1,2,3,4,5],
y = [1,2,3,4,5],
z = [
[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0]
]))
cont.update_layout(
scene = {
"xaxis": {"nticks": 20},
"zaxis": {"nticks": 4},
'camera_eye': {"x": 0, "y": -1, "z": 0.5},
"aspectratio": {"x": 1, "y": 1, "z": 0.2}
})
df = px.data.iris()
sepal = px.scatter_3d(df, x='sepal_length', y='sepal_width', z='petal_width',color='species')
app = dash.Dash(__name__)
app.layout = html.Div(
children = [
html.Div([
dcc.Graph(id="3d-surface-plot", figure=fig),
dcc.Graph(id="contour-surface-plot", figure=cont)
],style={'columnCount': 2}),
html.Div([
dcc.Graph(id="3d-scatter-plot", figure=sepal)
], style={'columnCount': 1})
])
|
juakonap/dash-3d
|
app/app.py
|
app.py
|
py
| 1,941 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6604263556
|
#!/usr/bin/env python3
'''conda create -n pytorch-env python=3.9 shap pandas optuna=2.10.1 xgboost scikit-learn sklearn-pandas rdkit pytorch torchvision torchaudio pytorch-cuda=11.6 cairosvg dgllife dgl=0.9.1 dgl-cuda11.6 ipython -c pytorch -c nvidia -c dglteam'''
import pandas as pd
import numpy as np
import datetime,time,joblib
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder
from sklearn_pandas import DataFrameMapper
############### Set required parameters and load data here ###############
'''basic parameters'''
filename_pkl = 'HistGradientBoosting_Optuna_best' # load target model from the *.pkl file
split_dataset = False # whether to split the dataset into training and test sets
model_name = "HistGradientBoostingClassifier"
# Supported models are as follows:
# (1) AdaBoostRegressor / AdaBoostClassifier
# (2) XGBRegressor / XGBClassifier
# (3) GradientBoostingRegressor / GradientBoostingClassifier
# (4) HistGradientBoostingRegressor / HistGradientBoostingClassifier
# (5) RandomForestRegressor / RandomForestClassifier
# (6) SVR / SVC
# (7) MLPRegressor / MLPClassifier
# (8) ElasticNet / LogisticRegression
'''load the dataset'''
selected_features = ['MolWt','NumRotatableBonds','AromaticProportion']
df = pd.read_csv('../../MolLogP_dataset.csv')
data_X = df[selected_features]
data_y = df['MolLogP<2']
# print(data_y)
# exit()
############### Some user-defined functions ###############
def total_running_time(end_time, start_time):
tot_seconds = round(end_time - start_time,2)
days = tot_seconds // 86400
hours = (tot_seconds % 86400) // 3600
minutes = (tot_seconds % 86400 % 3600)// 60
seconds = tot_seconds % 60
print(">> Elapsed time: {0:2d} day(s) {1:2d} hour(s) {2:2d} minute(s) {3:5.2f} second(s) <<".format(int(days),int(hours),int(minutes),seconds))
def load_model(model_name, filename_pkl):
ML_regression_list = ["XGBRegressor", "AdaBoostRegressor", "GradientBoostingRegressor",
"HistGradientBoostingRegressor", "MLPRegressor",
"RandomForestRegressor", "SVR", "ElasticNet"]
ML_classification_list = ["XGBClassifier", "AdaBoostClassifier", "GradientBoostingClassifier",
"HistGradientBoostingClassifier", "MLPClassifier",
"RandomForestClassifier", "SVC", "LogisticRegression"
]
if model_name in ML_regression_list:
ML_type = "Regression"
elif model_name in ML_classification_list:
ML_type = "Classification"
if model_name == "XGBRegressor":
from xgboost import XGBRegressor
elif model_name == "XGBClassifier":
from xgboost import XGBClassifier
elif model_name == "AdaBoostRegressor":
from sklearn.ensemble import AdaBoostRegressor
elif model_name == "AdaBoostClassifier":
from sklearn.ensemble import AdaBoostClassifier
elif model_name == "GradientBoostingRegressor":
from sklearn.ensemble import GradientBoostingRegressor
elif model_name == "GradientBoostingClassifier":
from sklearn.ensemble import GradientBoostingClassifier
elif model_name == "HistGradientBoostingRegressor":
from sklearn.ensemble import HistGradientBoostingRegressor
elif model_name == "HistGradientBoostingClassifier":
from sklearn.ensemble import HistGradientBoostingClassifier
elif model_name == "MLPRegressor":
from sklearn.neural_network import MLPRegressor
elif model_name == "MLPClassifier":
from sklearn.neural_network import MLPClassifier
elif model_name == "RandomForestRegressor":
from sklearn.ensemble import RandomForestRegressor
elif model_name == "RandomForestClassifier":
from sklearn.ensemble import RandomForestClassifier
elif model_name == "SVR":
from sklearn.svm import SVR
elif model_name == "SVC":
from sklearn.svm import SVC
elif model_name == "ElasticNet":
from sklearn.linear_model import ElasticNet
elif model_name == "LogisticRegression":
from sklearn.linear_model import LogisticRegression
else:
print('** Please rechoose a model **\n-> Supported models are as follows:')
print(' (1) AdaBoostRegressor / AdaBoostClassifier\n (2) XGBRegressor / XGBClassifier')
print(' (3) GradientBoostingRegressor / GradientBoostingClassifier\n (4) HistGradientBoostingRegressor / HistGradientBoostingClassifier')
print(' (5) RandomForestRegressor / RandomForestClassifier\n (6) SVR / SVC')
print(' (7) MLPRegressor / MLPClassifier\n (8) ElasticNet / LogisticRegression')
exit(1)
model = joblib.load(filename_pkl + ".pkl")
print('---------- Results based on the current loaded model ----------')
print('> Current parameters:\n {}\n'.format(model.get_params()))
return model, ML_type
def show_metrics(model, ML_type, y_test_pred, y_test_pred_proba, y_test, X_test):
print(" >>>> Metrics based on the best model <<<<\n")
if ML_type == "Classification":
from sklearn.metrics import accuracy_score, classification_report, roc_auc_score, average_precision_score
accuracy_test = accuracy_score(y_test, y_test_pred)
print('> Accuracy on the test set: {:.2%}'.format(accuracy_test))
print('> Score on the test set: {:.2%}'.format(model.score(X_test, y_test)))
print('> Classification report on the test set:')
print(classification_report(y_test, y_test_pred))
roc_auc_test, average_precision_test = [], []
for i in range(len(set(y_test))):
roc_auc_test.append(roc_auc_score(y_test, y_test_pred_proba[:,i], multi_class='ovr'))
average_precision_test.append(average_precision_score(y_test, y_test_pred_proba[:,i]))
pd.set_option('display.float_format','{:12.6f}'.format)
pd.set_option('display.colheader_justify', 'center')
test_reports = pd.DataFrame(np.vstack((roc_auc_test, average_precision_test)).T, columns=['ROC-AUC','AP(PR-AUC)'])
print('> Area under the receiver operating characteristic curve (ROC-AUC) and\n average precision (AP) which summarizes a precision-recall curve as the weighted mean\n of precisions achieved at each threshold on the test set:\n {}\n'.format(test_reports))
elif ML_type == "Regression":
from sklearn.metrics import mean_squared_error, mean_absolute_error
mse_test = mean_squared_error(y_test, y_test_pred)
mae_test = mean_absolute_error(y_test, y_test_pred)
print('> Mean squared error (MSE) on the test set: {:.6f}'.format(mse_test))
print('> Mean absolute error (MAE) on the test set: {:.6f}'.format(mae_test))
print('> R-squared (R^2) value on the test set: {:.6f}\n'.format(model.score(X_test, y_test)))
############### The ML training script starts from here ###############
start_time = time.time()
start_date = datetime.datetime.now()
print('*** Scikit-learn evaluation ({0}) started at {1} ***\n'.format(model_name, start_date.strftime("%Y-%m-%d %H:%M:%S")))
'''split training/test sets'''
if split_dataset:
print('The dataset is splited into training and test sets, and therefore the target model will be evaluated on the test set...\n')
X_train, X_test, y_train, y_test = train_test_split(data_X, data_y, test_size=0.2, random_state=0)
else:
print('The whole dataset will be used to evaluate the target model...\n')
X_test, y_test = data_X, data_y
target_model, ML_type = load_model(model_name, filename_pkl)
y_test_pred = target_model.predict(X_test)
y_test_pred_proba = target_model.predict_proba(X_test) if ML_type == "Classification" else None
show_metrics(target_model, ML_type, y_test_pred, y_test_pred_proba, y_test, X_test)
end_time = time.time()
end_date = datetime.datetime.now()
print('*** Scikit-learn evaluation ({0}) terminated at {1} ***\n'.format(model_name, end_date.strftime("%Y-%m-%d %H:%M:%S")))
total_running_time(end_time, start_time)
|
JianyongYuan/sklearn-scripts
|
Scikit-learn/Predictions/sklearn_evaluation.py
|
sklearn_evaluation.py
|
py
| 8,232 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70403733947
|
import mysql.connector
import const
file='192.168.2.txt'
src='192.168.5.89'
query="INSERT INTO packet(srcip,dstip,dstport,service) values(%s,%s,%s,%s)"
conn = mysql.connector.connect(
host='localhost',
port='3306',
user='root',
password=const.password,
database='ics'
)
cur = conn.cursor()
f = open(file)
lines = f.readlines()
for line in lines:
#print(line)
if 'Nmap scan report for' in line:
ss=line.split(" ")
dst=ss[len(ss)-1].strip()
print(dst)
elif 'open' in line:
ss=line.split("/")
port = ss[0]
sss=ss[len(ss)-1]
ssss=sss.split(" ")
service=ssss[len(ssss)-1].strip()
print(port+","+service)
cur.execute(query, (src,dst,port,service))
conn.commit()
cur.close()
conn.close()
f.close()
|
gamzattirev/icsrisk
|
tools/python/nmap.py
|
nmap.py
|
py
| 821 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31179240116
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
demand = [990,1980,3961,2971,1980]
d=0 # d% shortage allowance
Y_b = [1.3086,1.3671,1.4183,1.4538,1.5122] # Fabric yield (consumption rate) rate per garment of size 饾浗
U = 0.85
l_max= 20
e= .07 # Fabric end allowance
f= 2.90 # Fabric cost
if len(demand)!=len(Y_b):
raise ValueError('number of sizes and number of fabric consumption does not match')
# In[2]:
#Input variables (Marker)
M_d = 10 # Average marker design time (minute)
z = 0.65 # Printing speed per minute
v = 0.30 #Standard cost per minute in marker making floor (labor, machine & electricity)
# In[3]:
#Input variables (Cutting Time)
T_G = 30 # General Preparation Time
x= .20 # Average spreading speed in minutes after taking account for the idle strokes.
T_M= 2 # Time for Placement of the marker
t_c= 4.5 # SMV of cutting time per garment pattern
T_S= 5 # preparation time for sticker placement
饾憽_饾憦 = 2.837 # Standard minute value (SMV) of time takes to bundle.
饾憦 = 15 # pieces of garments in one bundle
饾懁 = 0.20 # standard cost per minute in cutting floor (labor, machine & electricity)
P_min, P_max= 10,350
# In[4]:
import numpy as np
import math
import pandas as pd
from copy import deepcopy
rng = np.random.default_rng()
import random
import time
import matplotlib.pylab as plt
import plotly.express as px
# In[5]:
def Update_Res(R,GG,PP):
for s in range(len(GG)): #Updating Residual within the while loop
R=R-np.dot(GG[s],PP[s])
return R
# In[6]:
def Length(g_i_j):
l_i = e+ np.dot(g_i_j,Y_b)/U
return l_i
# In[7]:
def Shortage_allowance(Q,d=0.01):
temp=np.dot((1-d),Q)
return [round(i) for i in temp]
Q_b= Shortage_allowance(demand,d)
# Q_b
# In[8]:
from Heuristics import H1,H3,H5
# In[9]:
# Sol_1 = H5(Q=Q_b,Y=Y_b,Pm=[P_min,P_max],U=U,e=e,l_max=l_max)
# Sol_1
# ## Objective Function
# In[10]:
def ObjectiveFunction (chromosome):
temp_Chromosome=deepcopy(chromosome)
G_a_b = temp_Chromosome['G']
P_a = temp_Chromosome['P']
Alpha = len(P_a) # number of Sections
''' Fabric Cost '''
# Total fabric length = L # Total Fabric Cost = C_F
l_a=[Length(G_a_b[alpha]) for alpha in range(Alpha) ] #Length function
L= np.dot(l_a,P_a) #Multiply then Sum
C_F = L*f
#print('Total Fabric Cost = C_F: ',C_F)
''' Marker Cost '''
#Marker Making Cost = C_M
M_p_a = [(la-e)/z for la in l_a] # devide each element of a 'l_a' by 'z'
#M_p_a = Marker Printing time (minute) of section alpha
'''
饾憻 = {1 ; 饾憱饾憮 饾憽h饾憭 饾憵饾憥饾憻饾憳饾憭饾憻 饾憱饾憼 饾憦饾憭饾憱饾憶饾憯 饾憿饾憼饾憭饾憫 饾憮饾憸饾憻 饾憽h饾憭 饾憮饾憱饾憻饾憼饾憽 饾憽饾憱饾憵饾憭
{0 ; 饾憱饾憮 饾憽h饾憭 饾憵饾憥饾憻饾憳饾憭饾憻 h饾憥饾憼 饾憦饾憭饾憭饾憶 饾憿饾憼饾憭饾憫 饾憦饾憭饾憮饾憸饾憻饾憭
'''
r=[]
for i in range(Alpha):
temp=0
j=i-1
while j>=0:
if G_a_b[i]== G_a_b[j]:
temp+=1
break
j-=1
if temp==0:
r.append(1)
else:
r.append(0)
C_M = 0
for 伪 in range(Alpha):
if l_a[伪]>e: # this makes sure that section has at least one garments
C_M += (M_d*r[伪] + M_p_a[伪])*v
# 'if la>e' makes sure that the section contain at least one garments,
# not all G_a_b values are zero
''' Cutting Cost '''
# Cutting Time of one section = T_T # Total Cutting Cost = C_C
#T_T =T_G + T_F +T_M+ T_c+T_S +T_B
T_C=[] #Cutting time for every section
for alpha in range(Alpha):
T_C.append(sum(G_a_b[alpha])*t_c)
T_F=[] # Fab spreading time for each section
for 伪 in range(Alpha):
T_F.append(l_a[伪]*P_a[伪]/x)
T_B=[] #Bundleing time for each section
for 伪 in range(Alpha):
T_B.append(math.ceil(P_a[伪]/b)*sum(G_a_b[伪])*t_b)
T_T_T = 0 #Total cutting time
for 伪 in range(Alpha):
if l_a[伪]>e: # this makes sure that section has at least one garments
T_T_T+=T_G+T_F[伪]+T_M+T_C[伪]+T_S+ T_B[伪]
C_C = T_T_T*w #Total cutting cost
''' Total Cost '''
# Total Cost = C_T = C_F + C_M + C_C
return C_F+C_M+C_C
# In[11]:
# ObjectiveFunction(Sol_1)
# ## Fitness Score
# In[12]:
def Fitness(chromosome):
t_chromosome=deepcopy(chromosome)
G_a_b= t_chromosome['G']
P_a = t_chromosome['P']
Beta= len(demand)
score= ObjectiveFunction(t_chromosome)
#print('score:',score)
fitness_score=score
''' Penalty for shortage production '''
R= Update_Res(R=demand,GG=G_a_b,PP=P_a)
for beta in range(Beta):
if R[beta]>0:
s_penalty= R[beta]/sum(demand)
fitness_score +=score*s_penalty
''' Penalty for excess production '''
r=np.dot(1.02,demand) # additional 2% allowance
R= Update_Res(R=r,GG=G_a_b,PP=P_a)
#print(R)
for beta in range(Beta):
if R[beta]<0:
e_penalty= (-R[beta]/sum(demand))*2 # 2times than s_penalty
fitness_score +=score*e_penalty
''' double check if the solution is valid '''
res= Update_Res(R=Q_b,GG=G_a_b,PP=P_a)
if max(res)>0:
'''solution is unvalid'''
fitness_score +=10000 #this will eventualy make the solution extinct.
return fitness_score
# Fitness(Sol_1)
# ## Function Initial Population Generation
# In[13]:
def GeneratePopulation(pop_size):
P_of_S=[]
for p in range(pop_size):
rng = np.random.default_rng()
h=rng.integers(0,3)
#print('h:',h)
if h==0:
sol=H1(Q=Q_b,Y=Y_b,Pm=[P_min,P_max],U=U,e=e,l_max=l_max)
elif h==1:
sol=H3(Q=Q_b,Y=Y_b,Pm=[P_min,P_max],U=U,e=e,l_max=l_max)
else:
sol=H5(Q=Q_b,Y=Y_b,Pm=[P_min,P_max],U=U,e=e,l_max=l_max)
P_of_S.append(sol)
return P_of_S
# Pool_of_Sol= GeneratePopulation(100)
# print(Pool_of_Sol)
# In[14]:
def S_with_F(p_o_s):
p_o_s_with_f= deepcopy(p_o_s)
for i in range(len(p_o_s)):
if 'F' not in p_o_s[i]:
p_o_s_with_f[i]['F']=Fitness(p_o_s[i])
return p_o_s_with_f
# ## PSO
# ### Cleaning section with zeros
# In[15]:
def CleanZeros (Sol):
Solution=deepcopy(Sol)
j=0
while j < len(Solution['G']):
if max(Solution['G'][j])==0:
Solution['G'].pop(j)
Solution['P'].pop(j)
continue
j+=1
#This is to make sure
if len(Solution['G'])!=len(Solution['P']):
raise ValueError('P and G lengths are not same')
return Solution
# In[16]:
# CleanZeros(Sol_1)
# ## Velocity Update (Jarboui et al. 2008)
# Lets assume 1st sol as X, 2nd Sol as Pbest, and 3rd Sol as Gbest
# #### Now we have to calculate Y
# ##### Initial Velocity generator
# In[17]:
def initial_velocity(Range, Sol): #Range is a list
a,b= Range
m=len(Sol['G'])
#generate a random uniform array [-a,b] of the same size of the solutions
v=(b-a) * np.random.random_sample(m) +a #http://bit.ly/3To2OWe
v=v.tolist()
return {'V':v}
# In[18]:
def Get_Y(X,GBest,PBest): #(Jarboui et al., 2008, p. 302)
y=[]
lens=[len(i) for i in [X['G'],GBest['G'],PBest['G']]]
min_len=min(lens)
for i in range(min_len):
if X['G'][i]==GBest['G'][i] and X['G'][i]==PBest['G'][i]:
y.append(random.choice([-1,1]))
elif X['G'][i]==GBest['G'][i]:
y.append(1)
elif X['G'][i]==PBest['G'][i]:
y.append(-1)
else:
y.append(0)
return {'Y':y}
# ### Now we have to calculate Velocity
# In[19]:
def New_V(YY,VV,c1=1,c2=1,w=.75): #Parameter setting: (Jarboui et al., 2008, p. 306)
Y=deepcopy(YY)
V=deepcopy(VV)
lens=[len(i) for i in [Y['Y'],V['V']]]
min_len=min(lens)
for i in range(min_len):
y=Y['Y'][i]
v=V['V'][i]
V['V'][i]= w*v+ np.random.rand()*c1*(-1-y)+np.random.rand()*c2*(1-y)
return V
# ### Now we need to calculate 位
# In[20]:
def Get_位(YY,VV):
Y=deepcopy(YY)
V=deepcopy(VV)
lens=[len(i) for i in [Y['Y'],V['V']]]
min_len=min(lens)
位=[]
for i in range(min_len):
位.append(Y['Y'][i]+V['V'][i])
return {'位':位}
# 位=Get_位(Y,V)
# 位
# ### Update X with Eq-10 (Jarboui et al., 2008, p. 303)
# In[21]:
def Perturbation(xg,xp,R,p_rate):
if np.random.rand()<p_rate:
p1,p2=sorted([xp,min(P_max,max(P_min,max(R)))])
xp= rng.integers(p1,p2+1)
if xp<P_min:
xp=P_min
for j in range(len(xg)): #small purtubration (like mutaion)
if np.random.rand()<p_rate:
xg[j]=0
temp= min(math.ceil(R[j]/xp),math.floor((l_max-Length(xg))/(Y_b[j]/U)))
temp= max(0,temp)
#xg[j]=max(0,temp)
xg[j]=rng.integers(0,temp+1)
return xg,xp
def Update_X(XX,GBest,PBest,位位, 蠒=0.5, p_rate=.05):
X=deepcopy(XX)
位=deepcopy(位位)
lens=[len(i) for i in [X['G'],GBest['G'],PBest['G'],位['位']]]
min_len=min(lens)
XG=[]
XP=[]
R= Update_Res(R=Q_b,GG=XG,PP=XP)
for i in range(min_len):
if 位['位'][i] > 蠒:
#print('Gbest')
#xg,xp=Perturbation(xg=GBest['G'][i],xp=GBest['P'][i],R=R,p_rate=p_rate)
xg=GBest['G'][i]
xp=GBest['P'][i]
elif 位['位'][i] < -蠒:
#print('Pbest')
#xg,xp=Perturbation(xg=GBest['G'][i],xp=GBest['P'][i],R=R,p_rate=p_rate)
xg=PBest['G'][i]
xp=PBest['P'][i]
else:
#print('X')
xg,xp= Perturbation(xg=X['G'][i],xp=X['P'][i],R=R,p_rate=p_rate) #Perturbation function
XG.append(xg)
XP.append(xp)
R= Update_Res(R=Q_b,GG=XG,PP=XP)
if max(R)<=0:
#print('break')
return {'G':XG,'P':XP}
for i in range(min_len, len(X['G'])):
xg,xp= Perturbation(xg=X['G'][i],xp=X['P'][i],R=R,p_rate=p_rate) #Perturbation function
XG.append(xg)
XP.append(xp)
R=Update_Res(R=Q_b,GG=XG,PP=XP)
if max(R)<=0:
#print('break')
return {'G':XG,'P':XP}
if max(R)>0:
#print(R)
#Use H1 or H3 algorithm to pack all sizes
randint =rng.integers(2)
if randint==0:
#print('H1')
h=H1(Q=R,Y=Y_b,Pm=[P_min,P_max],U=U,e=e,l_max=l_max)
else:
#print('H3')
h=H3(Q=R,Y=Y_b,Pm=[P_min,P_max],U=U,e=e,l_max=l_max)
g,p = h.values()
#print(g,p)
XG=XG+g
XP=XP+p
return {'G':XG,'P':XP}
# newX= Update_X(X,Gbest,Pbest,newY)
# newX
# In[22]:
y=[1,2,3,4]
c=[1,2]
print(y[:len(c)])
# In[23]:
def Update_dimension(XX,VV, in_vel_range=[-0.5,0.5]):
mm= len(XX['G'])
m= len(VV['V'])
if mm <= m:
return {'V':VV['V'][:m]}
else:
a,b= in_vel_range
v=(b-a) * np.random.random_sample(mm-m) +a #http://bit.ly/3To2OWe
v=v.tolist()
V=VV['V']+v
return {'V':V}
# In[24]:
def Get_Gbest(p_o_s):
gbest=p_o_s[0]
for i in range(len(p_o_s)):
if Fitness(p_o_s[i])<Fitness(gbest):
gbest= p_o_s[i]
return gbest
# Gbest=Get_Gbest(Pool_of_Sol)
# Gbest
# In[25]:
# newX= Update_X(X,Gbest,Pbest,newY)
# newX
# In[26]:
# Fitness(newX)
# In[27]:
#Pool_of_Sol
# # Main PSO
# In[36]:
get_ipython().run_line_magic('matplotlib', 'inline')
def PSO(swarmsize,iteration,蠒=.7,c1=2,c2=2,w=1, in_vel_range=[-0.6,0.6],p_rate=.2):
P_of_S= GeneratePopulation(swarmsize)
P_of_Pbest=P_of_S
P_of_Velocity= [initial_velocity(in_vel_range,P_of_S[i]) for i in range(len(P_of_S))]
Gbest= P_of_S[rng.integers(0,swarmsize)]
o= Gbest
bests=[Fitness(Gbest)]
for i in range(iteration):
for j in range(len(P_of_S)):
X=P_of_S[j]
Pbest=P_of_Pbest[j]
V= P_of_Velocity[j]
Y= Get_Y(X=X,GBest=Gbest,PBest=Pbest)
newV= New_V(YY=Y,VV=V,c1=c1,c2=c2,w=w)
位= Get_位(YY=Y,VV=newV)
newX= Update_X(XX=X,GBest=Gbest,PBest=Pbest,位位=位,蠒=蠒, p_rate=p_rate)
P_of_S[j]=newX
newV= Update_dimension(XX=newX,VV= newV, in_vel_range=in_vel_range)
P_of_Velocity[j]= newV
f=Fitness(newX)
if f < Fitness(Pbest):
P_of_Pbest[j]= newX
if f < Fitness(Gbest):
Gbest=newX
#print(Gbest, Fitness(Gbest))
bests.append(Fitness(Gbest))
xx=[i for i in range(len(bests))]
fig=px.line(x=xx,
y=bests,
title=f'swarmsize={swarmsize},iteration= {iteration},蠒={蠒},c1= {c1},c2={c2},w={w}, Gbest={bests[-1]}',
labels=dict(x='iteration',y='fitness'))
fig.show()
#plt.plot(xx,bests)
#plt.title(f'swarmsize={swarmsize},iteration= {iteration},蠒={蠒},c1= {c1},c2={c2},w={w}, Gbest={bests[-1]}')
return CleanZeros(Gbest)
PSO(swarmsize=50,iteration=250)
# In[33]:
ObjectiveFunction(o)
# In[34]:
ObjectiveFunction(g)
# In[37]:
Dataset={
'demands':[[872,1743,3486,2614,1743],
[12,67,131,187,191,138,79,27],
[990,1980,3961,2971,1980],
[193,501,1018,1249,998,564,250,128]],
'consumption':[[0.6119,0.6315,0.6499,0.6721,0.6921],
[0.7198,0.7352,0.7614,0.7878,0.8146,0.8423,0.8579,0.8985],
[1.3086,1.3671,1.4183,1.4538,1.5122],
[1.3350,1.3998,1.4356,1.4826,1.5440,1.5878,1.6313,1.6908]],
'price':[1.51,2.43,1.95,2.9]
}
df = pd.DataFrame(columns=['蠒','c1','c2','w','p_rate','solution','fitness'])
for i in range(len(Dataset['demands'])):
demand=Dataset['demands'][i]
Q_b= Shortage_allowance(demand,d)
Y_b=Dataset['consumption'][i]
f=Dataset['price'][i]
PSO(swarmsize=100,iteration=120,c1=1,c2=2,蠒 =.4,w=.75,p_rate=.2)
# In[ ]:
# In[29]:
from itertools import product
# In[30]:
蠒=[.4,.5,.6,.7]
c1=[1,1.5,2]
c2=[1,1.5,2]
ww=[.6,.75,1,1.25]
p_rate=[.05,.1,.2,.3]
iteration=product(蠒,c1,c2,ww,p_rate)
#print(list(iteration))
# In[31]:
df = pd.DataFrame(columns=['蠒','c1','c2','w','p_rate','solution','fitness'])
for 蠒,c1,c2,ww,p_rate in product(蠒,c1,c2,ww,p_rate):
best=PSO(swarmsize=100,iteration=120,c1=c1,c2=c2,蠒=蠒,w=ww,p_rate=p_rate)
fitness=Fitness(best)
df = df.append({'蠒':蠒,'c1':c1,'c2': c2,'w': ww,'p_rate': p_rate,'solution':best,'fitness':fitness}, ignore_index=True)
df.to_csv('PSO_GridSearch_from_Notebook8.csv')
# In[32]:
print(df[['蠒','c1','c2','w','p_rate','fitness']])
# import plotly
# import plotly.graph_objs as go
#
#
# #Read cars data from csv
#
#
# #Set marker properties
# markersize = df['c2']
# markercolor = df['w']
# markershape = df['c1'].replace(1,"square").replace(1.5,"circle").replace(2,'diamond')
#
#
# #Make Plotly figure
# fig1 = go.scater3d( x=df['伪'],
# y=df['p_rate'],
# z=df['fitness'],
# marker=dict(#size=markersize,
# #color=markercolor,
# #symbol=markershape,
# opacity=0.9,
# reversescale=True,
# colorscale='dense'),
# line=dict (width=0.02),
# mode='markers')
#
# #Make Plot.ly Layout
# mylayout = go.Layout(scene=dict(xaxis=dict( title='伪'),
# yaxis=dict( title='p_rate'),
# zaxis=dict(title='fitness')),)
#
# #Plot and save html
# plotly.offline.plot({"data": [fig1],
# "layout": mylayout},
# auto_open=True,
# filename=("6DPlot.html"))
#
# In[33]:
import plotly.express as px #https://plotly.com/python/3d-scatter-plots/
fig = px.scatter_3d(df, x='蠒', y='p_rate', z='fitness',
color='c1', symbol='c2', size='w')
fig.show()
# In[34]:
df['c1+c2']=df['c1'].map(str)+','+df['c2'].map(str)
df
# In[35]:
fig = px.scatter_3d(df, x='蠒', y='p_rate', z='fitness',
color='c1+c2', symbol='w')
fig.show()
# In[36]:
fig = px.parallel_coordinates(df, color="fitness",
dimensions=['c1','蠒', 'c2','p_rate','w','fitness','c1+c2'],
#color_continuous_scale=px.colors.diverging.Tealrose,
#color_continuous_midpoint=0
)
fig.show()
# In[37]:
df.sort_values('fitness').head(10)
# In[38]:
type(df['c1'][1])
# In[39]:
Dataset={
'demands':[[872,1743,3486,2614,1743],
[12,67,131,187,191,138,79,27],
[990,1980,3961,2971,1980],
[193,501,1018,1249,998,564,250,128]],
'consumption':[[0.6119,0.6315,0.6499,0.6721,0.6921],
[0.7198,0.7352,0.7614,0.7878,0.8146,0.8423,0.8579,0.8985],
[1.3086,1.3671,1.4183,1.4538,1.5122],
[1.3350,1.3998,1.4356,1.4826,1.5440,1.5878,1.6313,1.6908]],
'price':[1.51,2.43,1.95,2.9]
}
# In[40]:
for i in range(len(Dataset['demands'])):
demand=Dataset['demands'][i]
Q_b= Shortage_allowance(demand,d)
Y_b=Dataset['consumption'][i]
f=Dataset['price'][i]
best=PSO(swarmsize=100,iteration=120,c1=1,c2=2,蠒=.4,w=.75,p_rate=.2)
fitness=Fitness(best)
df = df.append({'蠒':蠒,'c1':c1,'c2': c2,'w': ww,'p_rate': p_rate,'solution':best,'fitness':fitness}, ignore_index=True)
df.to_csv('PSO_GridSearch_from_Notebook7.csv')
# In[ ]:
# In[ ]:
|
sharif8410/COP_Doc
|
PSO Clean notebook-Heuristic import.py
|
PSO Clean notebook-Heuristic import.py
|
py
| 18,651 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18537103489
|
# Prob_link: https://www.codingninjas.com/studio/problems/construct-binary-tree-from-inorder-and-postorder-traversal_8230837?challengeSlug=striver-sde-challenge&leftPanelTab=0
class TreeNode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def construct(start, end, postorder, pIndex, d):
if start > end:
return None, pIndex
root = TreeNode(postorder[pIndex])
pIndex = pIndex - 1
index = d[root.data]
root.right, pIndex = construct(index + 1, end, postorder, pIndex, d)
root.left, pIndex = construct(start, index - 1, postorder, pIndex, d)
return root, pIndex
def getTreeFromPostorderAndInorder(postorder, inorder):
n = len(inorder)
d = {}
for i, e in enumerate(inorder):
d[e] = i
pIndex = n - 1
return construct(0, n - 1, postorder, pIndex, d)[0]
|
Red-Pillow/Strivers-SDE-Sheet-Challenge
|
P131_Construct_Binary_Tree_from_Inorder_and_Postorder_Traversal.py
|
P131_Construct_Binary_Tree_from_Inorder_and_Postorder_Traversal.py
|
py
| 906 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4243718619
|
from fastapi import APIRouter, HTTPException, status, Query
from datetime import timedelta
from datetime import datetime
from github import Github
import random
router = APIRouter()
github = Github()
@router.get("/repo/health")
def repo_health_check():
return {"status": "OK"}
# return each contributor with their number of commits in the last week
@router.get("/repo/contributors")
def return_individual_contributions(repo_name:str):
try:
repository = github.get_repo(repo_name)
except:
raise HTTPException(status_code=404, detail="Repository not found")
contributor_list = repository.get_stats_contributors()
contributors_info = {}
counter = 0
for contributor in reversed(contributor_list):
if counter == 6:
break
weekly_contribution = contributor.weeks
contributors_info[contributor.author.name] = weekly_contribution[-1].c
counter = counter + 1
return contributors_info
# return total number of commits so for in the week (commits since the most recent monday)
@router.get("/repo/totalweeklycommits")
def return_weekly_commits(repo_name : str):
try:
repository = github.get_repo(repo_name)
except:
raise HTTPException(status_code=404, detail="Repository not found")
today = datetime.datetime.now()
most_recent_monday = today - timedelta(days=today.weekday())
commits=repository.get_commits(since=most_recent_monday)
return {
"Commits in the last week":commits.totalCount,
"Commits since": str(most_recent_monday.date())
}
# return total number of commits in the past week
@router.get("/repo/commits")
def return_weekly_commits(repo_name : str, start : datetime = Query(None), end : datetime = Query(None)):
# the url that the user should be passing in is something like ?start=2022-01-01&end=2022-01-31
# parses the dates passed in to a datetime object. This is the format that the github api uses
start_date = datetime.datetime.strptime(start, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end, "%Y-%m-%d")
repository = github.get_repo(repo_name)
commits=repository.get_commits(since=start_date, until=end_date)
return {"commits":commits.totalCount}
# return percentage increase for commits in the past week
@router.get("/repo/commitsincrease")
def calculate_commits_increase(repo_name:str):
#find commits this week
today = datetime.datetime.now()
most_recent_monday = today - timedelta(days=today.weekday())
last_weeks_monday = most_recent_monday - timedelta(days=7)
try:
repository = github.get_repo(repo_name)
except:
raise HTTPException(status_code=404, detail="Repository not found")
this_week = repository.get_commits(since=most_recent_monday)
commits_this_week = this_week.totalCount
#find commits last week
last_week = repository.get_commits(since=last_weeks_monday, until=most_recent_monday)
commits_last_week = last_week.totalCount
#find percentage increase
percentage = ""
if commits_last_week == 0:
commits_this_week * 100
percentage = str(commits_this_week) + "%"
else:
difference = commits_this_week - commits_last_week
difference = difference / commits_last_week
difference = round(difference*100, 1)
percentage = str(difference) + "%"
return{
"Increase in commits in the past week": percentage,
"Last week's date":str(last_weeks_monday.date())
}
# @router.get("/issues")
# def get_repo_issues(repo_name : str):
# repository = github.get_repo(repo_name)
# issue_list = {}
# for issue in repository.get_issues():
# currentIssue = {
# "Assignee": issue.assignee.assignee,
# "Id": issue.id,
# "Commit Id": issue.commit_id,
# "Event": issue.event,
# "Date created": issue.created_at,
# }
# issue_list[issue.assignee.assignee] = currentIssue
# return issue_list
@router.get("/dummy/repo/commits")
def dummy_repo_commits(start: str, end: str):
# generate a random number of commits based on the start and end dates
# this is just a dummy method for testing purposes
# get the number fo days between the start and end dates
start_date = datetime.strptime(start, "%Y-%m-%d")
end_date = datetime.strptime(end, "%Y-%m-%d")
delta = end_date - start_date
print(delta)
# generate a random number of commits based on the number of days
num_commits = random.randint(0, delta.days)
authors = ['John', 'Jane', 'Bob', 'Alice', 'Joe', 'Mary', 'Tom', 'Sally', 'Sue', 'Sam']
last_num_commits = random.randint(0, delta.days)
# get thepercentage change in commits
percentage = 0
if last_num_commits == 0:
num_commits * 100
percentage = num_commits
else:
difference = num_commits - last_num_commits
difference = difference / last_num_commits
difference = round(difference*100, 1)
percentage = difference
print(percentage)
return {
"num_commits": num_commits,
"percent_change": percentage,
"authors": authors,
"start_date": start,
"end_date": end,
}
@router.get("/dummy/repo/bugs")
def dummy_repo_commits(start: str, end: str):
start_date = datetime.strptime(start, "%Y-%m-%d")
end_date = datetime.strptime(end, "%Y-%m-%d")
delta = end_date - start_date
monthly_bug_rate = []
random.seed(0)
x = 0
for i in range(delta.days):
# values have to be positive
dx = random.gauss(0, 1)
if dx < 0:
dx = 0
x += dx
monthly_bug_rate.append(int(x))
print(monthly_bug_rate)
return {
"num_bugs": len(monthly_bug_rate),
"num_bugs_prev": 10,
"percent_change": 20,
"start_date": start,
"end_date": end,
"values": monthly_bug_rate,
}
|
sweng-project-tcd/dashboard-back
|
router/repo/repo.py
|
repo.py
|
py
| 6,032 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7869741779
|
# 本プログラムを Python で提出すると、比較的処理が遅くなるため実行時間制限オーバー (TLE) となります。
# PyPy3 で提出すると、正解 (AC) することができます。
# リュカの定理で ncr mod 3 を計算
def ncr(n, r):
if n < 3 and r < 3:
A = [
[ 1, 0, 0 ],
[ 1, 1, 0 ],
[ 1, 2, 1 ]
]
return A[n][r]
return ncr(n // 3, r // 3) * ncr(n % 3, r % 3) % 3
# 入力
N = int(input())
C = input()
# 答えを求める
answer = 0
for i in range(N):
code = "BWR".find(C[i])
answer += code * ncr(N - 1, i)
answer %= 3
# 答えを (-1)^(N-1) で掛ける
if N % 2 == 0:
answer = (3 - answer) % 3
# 答えを出力("BWR" の answer 文字目)
print("BWR"[answer])
|
E869120/math-algorithm-book
|
editorial/chap6-26_30/prob6-28.py
|
prob6-28.py
|
py
| 766 |
python
|
ja
|
code
| 897 |
github-code
|
6
|
30448991195
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
# In[27]:
class MultiHeadSelfAttention(layers.Layer):
def __init__(self, embed_dim, num_heads=8):
super(MultiHeadSelfAttention, self).__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
if embed_dim % num_heads != 0:
raise ValueError(
f"embedding dimension = {embed_dim} should be divisible by number of heads = {num_heads}"
)
self.projection_dim = embed_dim // num_heads
self.query_dense = layers.Dense(embed_dim)
self.key_dense = layers.Dense(embed_dim)
self.value_dense = layers.Dense(embed_dim)
self.combine_heads = layers.Dense(embed_dim)
def attention(self, query, key, value):
score = tf.matmul(query, key, transpose_b=True)
dim_key = tf.cast(tf.shape(key)[-1], tf.float32)
scaled_score = score / tf.math.sqrt(dim_key)
weights = tf.nn.softmax(scaled_score, axis=-1)
output = tf.matmul(weights, value)
return output, weights
def separate_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.projection_dim))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, inputs):
# x.shape = [batch_size, seq_len, embedding_dim]
batch_size = tf.shape(inputs)[0]
query = self.query_dense(inputs) # (batch_size, seq_len, embed_dim)
key = self.key_dense(inputs) # (batch_size, seq_len, embed_dim)
value = self.value_dense(inputs) # (batch_size, seq_len, embed_dim)
query = self.separate_heads(
query, batch_size
) # (batch_size, num_heads, seq_len, projection_dim)
key = self.separate_heads(
key, batch_size
) # (batch_size, num_heads, seq_len, projection_dim)
value = self.separate_heads(
value, batch_size
) # (batch_size, num_heads, seq_len, projection_dim)
attention, weights = self.attention(query, key, value)
attention = tf.transpose(
attention, perm=[0, 2, 1, 3]
) # (batch_size, seq_len, num_heads, projection_dim)
concat_attention = tf.reshape(
attention, (batch_size, -1, self.embed_dim)
) # (batch_size, seq_len, embed_dim)
output = self.combine_heads(
concat_attention
) # (batch_size, seq_len, embed_dim)
return output
# In[28]:
class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super(TransformerBlock, self).__init__()
self.att = MultiHeadSelfAttention(embed_dim, num_heads)
self.ffn = keras.Sequential(
[layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),]
)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, inputs, training):
attn_output = self.att(inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
# In[29]:
class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, maxlen, vocab_size, embed_dim):
super(TokenAndPositionEmbedding, self).__init__()
self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)
self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, x):
maxlen = tf.shape(x)[-1]
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
return x + positions
# In[49]:
# vocab_size = 20000 # Only consider the top 20k words
maxlen = 411 # Only consider the first 200 words of each movie review
x_train = np.load("../main/output/x_train.npy", allow_pickle = True)
print(len(x_train), "Training sequences")
# print(len(x_val), "Validation sequences")
x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen)
# x_val = keras.preprocessing.sequence.pad_sequences(x_val, maxlen=maxlen)
# In[51]:
y_train = np.load("../main/output/score_bin.npy", allow_pickle = True)
# In[61]:
gZoneIdx = np.load("../main/output/25_Jul_2020/gZoneIdx.npy", allow_pickle = True)
print("Done! GreenZone has", len(gZoneIdx), "registers!")
# In[67]:
removeIdx = []
# Finding removeIdx in O(n) using two pointers :D
l = 0
n = 0
while(l < len(gZoneIdx)):
if(gZoneIdx[l] != n):
removeIdx.append(n)
else: l += 1
n+=1
# In[69]:
print("Removing useless entries...")
y_train = np.delete(y_train, removeIdx, axis = 0)
# In[73]:
y_train = y_train[:,0]
# In[74]:
len(y_train) == len(x_train)
# In[76]:
embed_dim = 32 # Embedding size for each token
num_heads = 2 # Number of attention heads
ff_dim = 32 # Hidden layer size in feed forward network inside transformer
inputs = layers.Input(shape=(maxlen,))
embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)
x = embedding_layer(inputs)
transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim)
x = transformer_block(x)
x = layers.GlobalAveragePooling1D()(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(20, activation="relu")(x)
x = layers.Dropout(0.1)(x)
outputs = layers.Dense(2, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# In[ ]:
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
# In[79]:
model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"])
history = model.fit(
x_train[:100000], y_train[:100000], batch_size=32, epochs=2
)
|
pdrsa/ECG_Reports_Classification
|
bert/BERT_classification.py
|
BERT_classification.py
|
py
| 6,057 |
python
|
en
|
code
| 1 |
github-code
|
6
|
4737844765
|
from pathlib import Path # Pathlib - Working with file paths
p = Path('.') # Creates a path object in found OS (Windows Path)
test = [x for x in p.iterdir() if x.is_dir()]
print(p.resolve()) # Show file dir in your OS format (D:\Backup\Work\DevOps\Programming\Scripts\Python\fundamentals\Built In Modules\pathlib)
new_p = p / 'Test dir' # Navigating into Test dir folder
new_p.mkdir() # Create folder at location p
for file_name in new_p.iterdir():
if file_name.match('*.txt') or file_name.match('*.py'): # Check for specific file types when iterating through files in path
print(file_name)
new_p /= 'test.txt'
print(new_p)
with new_p.open() as f:
print(f.readline())
print(f.readline())
|
danlhennessy/Learn
|
Python/fundamentals/Built_In_Modules/pathlib/main.py
|
main.py
|
py
| 716 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42107956910
|
import pickle
import numpy
PATH = '../../data/'
def get_all_instances_of_symbol(symbol):
f = open(PATH + symbol.upper(), 'rb')
return pickle.load(f)
def plot(loss_history, train_acc_history, val_acc_history):
plt.subplot(2, 1, 1)
plt.plot(train_acc_history)
plt.plot(val_acc_history)
plt.title('accuracy vs time')
plt.legend(['train', 'val'], loc=4)
plt.xlabel('epoch')
plt.ylabel('classification accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss_history)
plt.title('loss vs time')
plt.xlabel('iteration')
plt.ylabel('loss')
plt.show()
|
nishithbsk/ImaginedSpeech
|
scripts/ConvNet/util.py
|
util.py
|
py
| 550 |
python
|
en
|
code
| 7 |
github-code
|
6
|
7792745843
|
#!/usr/bin/env python3
"""Resolve docker container's name into IPv4 address"""
import docker
from ipaddress import ip_address, IPv4Address, IPv6Address
from threading import Thread
from twisted.internet import reactor, defer
from twisted.names import client, dns, server
LISTEN_ADDRESS = "127.0.0.1"
DNS_PORT = 53
class DockerResolver(client.Resolver):
"""Resolve container name into IP address."""
def __init__(self, dockerClient, servers=None):
super().__init__(resolv=None, servers=servers)
self.dockerClient = dockerClient
self.runningContainers = {}
for c in dockerClient.containers.list():
containerName = c.attrs["Name"][1:]
containerNetworks = c.attrs["NetworkSettings"]["Networks"]
for k, v in containerNetworks.items():
containerIPv4 = v["IPAddress"]
containerIPv6 = v["GlobalIPv6Address"]
if not containerIPv6:
containerIPv6 = None
shouldAddContainer = False
if (("Health" in c.attrs["State"] and
c.attrs["State"]["Health"]["Status"] == "healthy") or
"Health" not in c.attrs["State"]):
shouldAddContainer = True
if shouldAddContainer:
self.addContainer(containerName,
containerIPv4,
containerIPv6)
def addContainer(self, containerName, containerIPv4, containerIPv6=None):
if containerName not in self.runningContainers:
self.runningContainers[containerName] = []
self.runningContainers[containerName].append(containerIPv4)
if containerIPv6:
self.runningContainers[containerName].append(containerIPv6)
def removeContainer(self, containerName):
self.runningContainers.pop(containerName, None)
def __lookup(self, query, timeout=None, type="A"):
allowedTypes = ("A", "AAAA")
if type not in allowedTypes:
raise ValueError
domain = query.decode()
if domain in self.runningContainers:
answers = []
authority = []
additional = []
for address in self.runningContainers[domain]:
if ((type == "A"
and not isinstance(ip_address(address), IPv4Address))
or (type == "AAAA"
and not isinstance(ip_address(address), IPv6Address))):
continue
record = getattr(dns, "Record_%s" % type)
p = record(address=address.encode())
dnsType = getattr(dns, "%s" % type)
answer = dns.RRHeader(name=query, payload=p, type=dnsType)
answers.append(answer)
return defer.succeed((answers, authority, additional))
else:
return None
def lookupAddress(self, query, timeout=None):
response = self.__lookup(query, timeout, "A")
if response:
return response
else:
return super().lookupAddress(query, timeout)
def lookupIPV6Address(self, query, timeout=None):
response = self.__lookup(query, timeout, "AAAA")
if response:
return response
else:
return super().lookupIPV6Address(query, timeout)
def __findContainerByPTRQuery(self, PTRQuery):
query = PTRQuery.decode().rstrip(".")
if query.endswith(".in-addr.arpa"):
ip_list = query.rstrip(".in-addr.arpa").split(".")
i = 0
ipQuery = ""
while i < len(ip_list):
i += 1
ipQuery += ip_list[-i]
if i != len(ip_list):
ipQuery += "."
ipQuery = ip_address(ipQuery)
elif query.endswith(".ip6.arpa"):
ip_list = query.rstrip(".ip6.arpa")[::-1].split(".")
i = 0
ipQuery = ""
while i < len(ip_list):
ipQuery += ip_list[i]
i += 1
if i % 4 == 0 and i != len(ip_list):
ipQuery += ":"
ipQuery = ip_address(ipQuery)
else:
return None
for containerName, IPs in self.runningContainers.items():
for ip in IPs:
if ipQuery == ip_address(ip):
return containerName
return None
def lookupPointer(self, query, timeout=None):
answers = []
authority = []
additional = []
containerName = self.__findContainerByPTRQuery(query)
if containerName is None:
return super().lookupPointer(query, timeout)
p = dns.Record_PTR(name=containerName)
answer = dns.RRHeader(name=query, payload=p, type=dns.PTR)
answers.append(answer)
return defer.succeed((answers, authority, additional))
class EventsListener(Thread):
"""Listen on start and die events."""
def __init__(self, resolver):
super().__init__()
self.resolver = resolver
self.eventListener = None
def run(self):
self.eventListener = self.resolver.dockerClient.events(
filters={"event": ["connect",
"disconnect",
"health_status"]},
decode=True)
for e in self.eventListener:
callback_prefix = e["Action"]
if "health_status:" in e["Action"]:
callback_prefix = e["Action"][:(e["Action"].index(':'))]
callback = getattr(self, callback_prefix + "Callback")
callback(e)
def join(self, timeout=None):
self.eventListener.close()
super().join(timeout)
def __add_container(self, container):
containerName = container["Name"].lstrip('/')
containerNetworks = container["NetworkSettings"]["Networks"]
for k, v in containerNetworks.items():
containerIPv4 = v["IPAddress"]
containerIPv6 = v["GlobalIPv6Address"]
shouldAddContainer = True
# ContainerNetworks contains all the networks. So if we connect a
# second (or third) network after container started, we fire
# connect event several times. This means we should ensure that
# containerName appears once in resolver.runningContainers list.
if containerName in self.resolver.runningContainers:
thisContainer = self.resolver.runningContainers[containerName]
if containerIPv4 in thisContainer:
shouldAddContainer = False
if not containerIPv6:
containerIPv6 = None
if shouldAddContainer:
self.resolver.addContainer(containerName,
containerIPv4,
containerIPv6)
def connectCallback(self, event):
containerID = event["Actor"]["Attributes"]["container"]
api = self.resolver.dockerClient.api
container = api.inspect_container(containerID)
if ("Health" not in container["State"] or
container["State"]["Health"]["Status"] == "healthy"):
self.__add_container(container)
def disconnectCallback(self, event):
containerID = event["Actor"]["Attributes"]["container"]
api = self.resolver.dockerClient.api
try:
container = api.inspect_container(containerID)
containerName = container["Name"].lstrip('/')
self.resolver.removeContainer(containerName)
except docker.errors.NotFound:
pass
def health_statusCallback(self, event):
api = self.resolver.dockerClient.api
container = api.inspect_container(event["id"])
if ("Health" in container["State"] and
container["State"]["Health"]["Status"] == "healthy"):
self.__add_container(container)
class DockerDNS():
"""Start and stop DockerDNS Service"""
def __init__(self, port=None, listenAddress=None, forwarders=None):
if not isinstance(forwarders, list):
raise TypeError
self.port = port
self.listenAddress = listenAddress
self.forwarders = forwarders
self.eventsListener = None
self.udp_listener = None
self.tcp_listener = None
if self.port is None:
self.port = DNS_PORT
if self.listenAddress is None:
self.listenAddress = LISTEN_ADDRESS
def start(self):
"""Configure and execute the DNS server."""
dockerClient = docker.from_env()
resolver = DockerResolver(dockerClient=dockerClient,
servers=self.forwarders)
self.eventsListener = EventsListener(resolver)
self.eventsListener.start()
factory = server.DNSServerFactory(clients=[resolver])
protocol = dns.DNSDatagramProtocol(controller=factory)
self.udp_listener = reactor.listenUDP(port=self.port,
protocol=protocol,
interface=self.listenAddress)
self.tcp_listener = reactor.listenTCP(port=self.port,
factory=factory,
interface=self.listenAddress)
reactor.run()
def clean(self):
"""Clean all the resources"""
self.stop()
self.eventsListener.join()
def stop(self):
"""Stop the reactor if running"""
if reactor.running:
self.udp_listener.stopListening()
self.tcp_listener.stopListening()
reactor.stop()
|
dangoncalves/docker-dns
|
dockerDNS/dockerDNS.py
|
dockerDNS.py
|
py
| 9,886 |
python
|
en
|
code
| 3 |
github-code
|
6
|
811493816
|
# Unique Paths - https://leetcode.com/problems/unique-paths/
'''A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time. The robot is trying to reach the
bottom-right corner of the grid (marked 'Finish' in the diagram below).
How many possible unique paths are there?'''
# Input: m = 3, n = 2
# Output: 3
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
grid = [[0 for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
if i == 0 and j == 0:
grid[i][j] = 1
elif i == 0 and j != 0:
grid[i][j] += grid[i][j - 1]
elif i != 0 and j == 0:
grid[i][j] += grid[i - 1][j]
else:
grid[i][j] = grid[i - 1][j] + grid[i][j - 1]
return grid[m - 1][n - 1]
# Unique Paths II - https://leetcode.com/problems/unique-paths-ii/
'''A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time. The robot is trying to reach the
bottom-right corner of the grid (marked 'Finish' in the diagram below).
Now consider if some obstacles are added to the grids. How many unique paths would there be?'''
# Input:
# [
# [0,0,0],
# [0,1,0],
# [0,0,0]
# ]
# Output: 2
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:
rows = len(obstacleGrid)
cols = len(obstacleGrid[0])
for i in range(rows - 1, -1, -1):
for j in range(cols - 1, -1, -1):
if obstacleGrid[i][j] == 1:
obstacleGrid[i][j] = 0
elif i != rows - 1 and j == cols - 1:
obstacleGrid[i][j] += obstacleGrid[i + 1][j]
elif i == rows - 1 and j != cols - 1:
obstacleGrid[i][j] += obstacleGrid[i][j + 1]
elif i == rows - 1 and j == cols - 1:
obstacleGrid[i][j] = 1
else:
obstacleGrid[i][j] = obstacleGrid[i + 1][j] + obstacleGrid[i][j + 1]
return obstacleGrid[0][0]
# Unique Paths III - https://leetcode.com/problems/unique-paths-iii/
'''On a 2-dimensional grid, there are 4 types of squares:
1 represents the starting square. There is exactly one starting square.
2 represents the ending square. There is exactly one ending square.
0 represents empty squares we can walk over.
-1 represents obstacles that we cannot walk over.
Return the number of 4-directional walks from the starting square to the ending square,
that walk over every non-obstacle square exactly once.'''
# Input: [[1,0,0,0],[0,0,0,0],[0,0,2,-1]]
# Output: 2
class Solution:
def uniquePathsIII(self, grid: List[List[int]]) -> int:
rows = len(grid)
cols = len(grid[0])
self.paths = 0
(x, y) = (0, 0)
end = (0, 0)
emptySquares = 1
for i in range(rows):
for j in range(cols):
if grid[i][j] == 1:
(x, y) = (i, j)
elif grid[i][j] == 2:
end = (i, j)
elif grid[i][j] == 0:
emptySquares += 1
def dfs(i, j, empty):
if not (0 <= i < rows and 0 <= j < cols and grid[i][j] >= 0):
return
if (i, j) == end:
if empty == 0:
self.paths += 1
return
grid[i][j] = -2
dfs(i - 1, j, empty - 1)
dfs(i + 1, j, empty - 1)
dfs(i, j - 1, empty - 1)
dfs(i, j + 1, empty - 1)
grid[i][j] = 0
dfs(x, y, emptySquares)
return self.paths
|
Saima-Chaity/Leetcode
|
Matrix/uniquePaths.py
|
uniquePaths.py
|
py
| 3,502 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15999128955
|
# -*- coding: utf-8 -*-
"""
无签名版本
"""
import re
import json
from scrapy import Spider
from scrapy.http import Request
from douyin_app.docs.conf import HEADER
class DouyinIdolVideoSpider(Spider):
name = "idol_douyin_video"
idol_url = ''
video_list_url = 'https://api.amemv.com/aweme/v1/aweme/post/?user_id={}&max_cursor={}&count=20&device_id=39681429254&ac=wifi&channel=xiaomi&aid=1128&app_name=aweme'
max_cursor = 0
uid = None
def __init__(self, url):
super(DouyinIdolVideoSpider, self).__init__()
self.idol_url = url
def start_requests(self):
try:
self.uid = re.findall(r'user/(\d+)', self.idol_url)[0]
self.logger.info('解析到idol信息{}(•‾̑⌣‾̑•)✧˖°'.format(self.uid))
yield self.start_get_video_list(self.uid)
except Exception:
self.logger.error('解析不到视频信息,,Ծ‸Ծ,,')
def start_get_video_list(self, uid):
url = self.video_list_url.format(uid, self.max_cursor)
header = HEADER
return Request(url=url, headers=header, callback=self.get_video_list)
def start_get_video(self, url, desc):
url = url
header = HEADER
return Request(url=url, headers=header, callback=self.get_video, meta={'desc': desc})
def get_video_list(self, response):
content = response.body.decode('utf-8')
content = json.loads(content)
video_list = content.get('aweme_list')
if video_list:
for video in video_list:
download_url = video.get('video').get('play_addr_lowbr').get('url_list')[0]
desc = video.get('desc')
self.logger.info('解析到下载链接()(•‾̑⌣‾̑•)✧˖°', format(download_url))
yield self.start_get_video(download_url, desc)
if content.get('has_more'):
self.max_cursor = content.get('max_cursor')
yield self.start_get_video_list(self.uid)
def get_video(self, response):
desc = response.meta.get('desc')
content = response.body
with open('./douyin_app/videos/{}.mp4'.format(desc), 'wb') as f:
f.write(content)
self.logger.info('下载完成๑乛◡乛๑')
|
iamxuwenjin/videos_download
|
douyin_app/spiders/douyin_idol_video_download.py
|
douyin_idol_video_download.py
|
py
| 2,271 |
python
|
en
|
code
| 6 |
github-code
|
6
|
17493585244
|
#this function gets the dynamic trajectory of a Car
#inputs are time, position, height, time in, time out, velocity in, velocity out, and Car struct
#outputs are time, Velocity, and position
import math
def trajectory(t,X,h,t_in,t_out,V_in,V_out,Car):
V_top = Car["top_speed"]
t_top = Car['t2top_speed']
if(t_in < 0):
return ValueError("t_in must be greater than zero")
if(t_out < t_in):
return ValueError("t_out must be less than t_in")
if(t < t_in):
return ValueError("t must be greater than t_in")
if(h <= 0):
return ValueError("h must be greater than zero")
if (t+h > t_out):
return ValueError("t_out must be greater than t+h")
if(X<0):
return ValueError("X must be positive or zero")
if(V_in < 0):
return ValueError("V_in must be positive or zero")
if(V_out < 0):
return ValueError("V_out must be positive or zero")
if(Car['top_speed'] < V_in):
return ValueError("Top speed must be greater than V_in")
if(Car['top_speed'] < V_out):
return ValueError("Top speed must be greater than V_out")
if(V_out > V_in):
if( ((V_out-V_in)/(t_out-t_in)) > (V_top/t_top)):
return ValueError("trajectory is greater than max acceleration")
if(V_out < V_in):
if( ((5280*(V_out-V_in)/3600)/(t_out-t_in)) < (-1.4*32.174)):
return ValueError("trajectory deceleration is greater than max deceleration")
V_out = 5280*V_out/3600
V_in = 5280 *V_in/3600
V_t = V_in + ((V_out - V_in) / 2) * (1 - math.cos(math.pi * ((t - t_in) / (t_out - t_in))))
V_th2 = V_in + ((V_out - V_in) / 2) * (1 - math.cos(math.pi * (((t + h/2) - t_in) / (t_out - t_in))))
V_th = V_in + ((V_out - V_in) / 2) * (1-math.cos(math.pi * (((t+h) - t_in) / (t_out - t_in))))
X = X + ((h / 6) * (V_t + 4 * V_th2 + V_th))
V = V_t
t = t+h
return t,X,V
# %disp('Location of front axle at end of step (ft)')
# %disp(X)
# %disp('Velocity of the vehicle at end of step (ft/s)')
# %disp(V)
# %disp('Time at end of integration step (s)')
# %disp(t)
|
brandontran14/CarSimulation
|
trajectory.py
|
trajectory.py
|
py
| 2,155 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43586733257
|
# đọc nọi dung của tệp
# f = open("read.txt","r")
# print(f.read())
# trả về 5 ký tự đầu tiên của tệp:
f = open("read.txt","r")
# print(f.read(5))
# # đọc dòng đầu tiên
# print(f.readline())
# print(f.readline())
# for x in f:
# print(x)
# print(f.readline())
# f.close()
f.write("Now the file has more content")
f.close()
#open and read the file after the appending:
f = open("read.txt", "r")
print(f.read())
|
Lengan0101/Python
|
tep.py
|
tep.py
|
py
| 450 |
python
|
vi
|
code
| 0 |
github-code
|
6
|
11415064866
|
"""
Original multirc format:
{
data: [
{
id: str,
paragraph: {
text: {
},
questions: [
{
question: str,
sentences_used: [ int, ],
idx: int,
multisent: bool // don't know what this is
answers: [
{
text: str,
isAnswer: bool,
scores: {} //empty
},
...
]
},
...
]
}
},
...
]
}
"""
import json
import argparse
from pathlib import Path
from typing import Dict, List, Mapping, Generator, Optional, Union
from copy import deepcopy
import itertools
import re
import logging
from .reader import DatasetReader
from .types import (Sample, SingleQuestionSample,
SingleQuestionSingleOptionSample, NLIWithOptionsSample,
PureNLISample)
from dataclasses import dataclass, asdict
logger = logging.getLogger(__name__)
@dataclass
class OriginalMultircSample(Sample):
paragraph: Dict
html_tags = re.compile(r'<[^>]+>')
setence_tags = re.compile(r'Sent\s+\d+:')
html_plus_sentence_tags = re.compile(r"<[^>]+>|Sent\s+\d+:")
class MultircReader(DatasetReader):
def __init__(self,
input_type: str = 'OriginalMultircSample',
output_type: str = 'SingleQuestionSingleOptionSample'):
self.input_type = input_type
self.output_type = output_type
def _read_data(self, path: Path) -> Dict:
with open(path) as f:
samples = json.load(f)
return samples
def read(self, path: Path,
return_dict: bool = False) -> List[Union[Sample, Dict]]:
if self.input_type == 'OriginalMultircSample':
def reader_func(p: Path) -> List[Sample]:
samples = ((self._read_data(p))['data'])
# remove html
for s in samples:
s['paragraph']['text'] = html_plus_sentence_tags.sub(
'', s['paragraph']['text'])
return [OriginalMultircSample(**x) for x in samples]
else:
raise ValueError(f"input_type {self.input_type} not supported")
if self.output_type == 'SingleQuestionSingleOptionSample':
def sample_converter(
x: OriginalMultircSample) -> OriginalMultircSample:
return x # do nothing
def aggregate_converter(
x: List[OriginalMultircSample]
) -> List[SingleQuestionSingleOptionSample]:
all_res = []
for s in x:
para = s.paragraph['text']
for q in s.paragraph['questions']:
for ans_i, a in enumerate(q['answers']):
all_res.append(
SingleQuestionSingleOptionSample(
id=s.id + f"_{q['idx']}" + f"_{ans_i}",
article=para,
question=q['question'],
option=a['text'],
label=int(a['isAnswer'])))
return all_res
else:
raise ValueError(f"outpu_type {self.output_type} not supported")
input_samples = [sample_converter(s) for s in reader_func(path)]
output_samples = aggregate_converter(input_samples)
if return_dict:
return [s.__dict__ for s in output_samples]
else:
return output_samples
|
nli-for-qa/conversion
|
qa2nli/qa_readers/multirc.py
|
multirc.py
|
py
| 3,998 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32410264814
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('app', '0016_auto_20150828_0735'),
]
operations = [
migrations.AlterField(
model_name='object',
name='background_transparency',
field=models.IntegerField(default=100, null=True, blank=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),
preserve_default=True,
),
]
|
jacyn/burst
|
webapp/app/migrations/0017_auto_20150828_0747.py
|
0017_auto_20150828_0747.py
|
py
| 609 |
python
|
en
|
code
| 0 |
github-code
|
6
|
69805130109
|
#"Fase de geração"
import csv
from builtins import any as b_any
from ExtensoToInteiro import ExtensoToInteiro
def ordena_dets_num_adverq(traducao):
"""
Ordena determinantes, numerais e adverbios de quantidade consoante a LGP.
:param traducao: frase
:return:
"""
indice = 0
while indice < len(traducao):
valor = traducao[indice]
classe = valor[2]
if (classe.startswith("DP") or classe.startswith("Z") or classe.startswith("RGQ")) :
if indice!=len(traducao)-1 and traducao[indice+1][2].startswith("N"):
temp = valor
traducao[indice] = traducao[indice + 1]
traducao[indice + 1] = temp
indice+=1
indice+=1
def orden_neg_int(i, counter, exprFaciais):
"""
Ordena elementos de negação e de interrogação e adiciona as expressões faciais.
:param i.traducao: frase
:return:
"""
count = 0
indice = 0
while indice < len(i.traducao):
valor = i.traducao[indice]
classe = valor[2]
if indice < len(i.traducao) - count:
if classe.startswith("RN"): # adds "Não" at the end
# exprFaciais[str(indice+counter) + "-" + str(indice+counter+1)] = "negativa"
# valor = ("{"+valor[0] +"}(negativa)", "{"+valor[0] +"}(negativa)", valor[2])
i.traducao.append(valor)
del i.traducao[indice]
count += 1
indice = 0
if (classe.startswith("PT") or classe.startswith("RGI")) and "INT" in i.tipo[0]:
exprFaciais[str(indice+counter) + "-" + str(indice+counter+1)] = "interrogativa"
# valor = ("{"+valor[0] +"}(interrogativa)", "{"+valor[0] +"}(interrogativa)", valor[2])
i.traducao.append(valor)
del i.traducao[indice]
count += 1
indice += 1
def tempo_verbal(i):
"""
Ordena os advérbios de tempo se existirem na frase. Caso contrário adiciona gestos que marcam os tempos verbais.
:param traducao: frase
:return:
"""
pronomes = {"1S": "eu", "2S": "tu", "3S": "ele", "1P": "nós", "2P": "vós", "3P": "eles"}
indice_tempo = list(filter(lambda x: x[1][2] == "RGTP" or x[1][2] == "RGTF", enumerate(i.traducao)))
if indice_tempo:
temp = i.traducao[indice_tempo[0][0]]
del i.traducao[indice_tempo[0][0]]
i.traducao.insert(0, temp)
# Adiciona o pronome pessoal se este ou o sujeito não exisitir e se o pronome não foi o da primeira pessoa no singular
for indice, valor in enumerate(i.traducao):
classe = valor[2]
if classe.startswith("V"):
# if indice==0 or indice > 0 and not (i.traducao[indice-1][2].startswith("PP") or i.traducao[indice-1][2].startswith("NC")): # or traducao[indice-1][2].startswith("NC")
if not i.classes_suj:
pronome = classe[4] + classe[5]
if pronome != "00":
temp = (pronomes[pronome], pronomes[pronome], "PP")
i.traducao.insert(indice, temp)
break
# else:
# for indice, valor in enumerate(traducao):
# classe = valor[2]
# if classe.startswith("V"):
# if classe[3] == "S" or classe[3] == "I" or classe[3] == "M":
# temp = ("PASSADO", "PASSADO", "RGTP")
# traducao.insert(0, temp)
# break
# if classe[3] == "F":
# traducao.insert(0, ("FUTURO", "FUTURO", "RGTF"))
# break
# if classe[3] == "P":
# break
def nomes_proprios(traducao, palavras_compostas):
"""
Identifica nomes próprios usando a notação DT().
:param traducao: frase
:param palavras_compostas: palavras compostas
:return:
"""
nomes_proprios_lista = list(filter(lambda x: x[1][2].startswith("NP"), enumerate(traducao)))
if nomes_proprios_lista:
indices_nomes = list(list(zip(*nomes_proprios_lista))[0])
for n in indices_nomes:
valor = traducao[n][0]
nome = ""
if valor in palavras_compostas.values():
glosa_nome_proprio = ""
for palavras, v in palavras_compostas.items():
for p in palavras.split("_"):
nome = ""
for l in p:
nome += l.upper() + "-"
glosa_nome_proprio += "DT(" + nome[:-1] + ")" + " "
glosa_nome_proprio = glosa_nome_proprio[:-1]
else:
for l in valor:
letra = l.upper()
nome += letra + "-"
glosa_nome_proprio = "DT(" + nome[:-1] + ")"
traducao[n] = (glosa_nome_proprio, glosa_nome_proprio, traducao[n][2])
def abre_feminino_excepcoes():
"""
Trata das palavras no feminino que são excepções à regra.
:return:
"""
excepcoes = {}
with open('Feminino_excepcoes.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
excepcoes[row[0]] = row[1]
return excepcoes
def feminino(traducao, excepcoes):
"""
Trata da marcação do feminino. Trata também do diminutivo e aumentativo.
:param traducao: frase
:param excepcoes: dicionário com as palavras que são excepções e as suas traduções.
:return:
"""
indice = 0
while indice < len(traducao):
valor = traducao[indice]
palavra = valor[0]
classe = valor[2]
lema = valor[1]
if classe.startswith("NC"):
if palavra not in excepcoes:
if palavra.lower()!=lema.lower():
if classe.endswith("D"):
if classe.startswith("NCFP") and palavra[:-5].lower() != lema[:-1].lower():
glosa = "MULHER"
traducao.insert(indice, (glosa, lema, "A"))
traducao[indice + 1] = (lema, lema, classe)
traducao.insert(indice + 2, ("PEQUENO", "PEQUENO", classe))
indice += 2
elif classe.startswith("NCFS") and palavra[:-4].lower() != lema[:-1].lower():
glosa = "MULHER"
traducao.insert(indice,(glosa, lema, "A"))
traducao[indice + 1] = (lema, lema, classe)
traducao.insert(indice + 2, ("PEQUENO", "PEQUENO", classe))
indice += 2
elif classe.startswith("NCFS") and palavra[:-4].lower() == lema[:-1].lower():
traducao[indice] = (lema, lema, classe)
traducao.insert(indice + 1, ("PEQUENO", "PEQUENO", classe))
indice += 1
elif classe.startswith("NCFP") and palavra[:-5].lower() == lema[:-1].lower():
traducao[indice] = (lema, lema, classe)
traducao.insert(indice + 1, ("PEQUENO", "PEQUENO", classe))
indice += 1
else:
if classe.endswith("D"):
diminutivo = "PEQUENO"
traducao.insert(indice + 1, (diminutivo, diminutivo, classe))
traducao[indice] = (
traducao[indice][1], traducao[indice][1], traducao[indice][2])
indice += 1
elif classe.endswith("A"):
traducao.insert(indice + 1, ("GRANDE", "GRANDE", classe))
traducao[indice] = (traducao[indice][1], traducao[indice][1], traducao[indice][2])
indice += 1
elif classe.endswith("A"):
if classe.startswith("NCFP") and palavra[:-5].lower() != lema[:-1].lower():
glosa = "MULHER"
traducao.insert(indice, (glosa, lema, "A"))
traducao[indice + 1] = (lema, lema, classe)
traducao.insert(indice + 2, ("GRANDE", "GRANDE", classe))
indice += 2
elif classe.startswith("NCFS") and palavra[:-4].lower() != lema[:-1].lower():
glosa = "MULHER"
traducao.insert(indice, (glosa, lema, "A"))
traducao[indice + 1] = (lema, lema, classe)
traducao.insert(indice + 2, ("GRANDE", "GRANDE", classe))
indice += 2
elif classe.startswith("NCFS") and palavra[:-4].lower() == lema[:-1].lower():
traducao[indice] = (lema, lema, classe)
traducao.insert(indice + 1, ("GRANDE", "GRANDE", classe))
indice += 1
elif classe.startswith("NCFP") and palavra[:-5].lower() == lema[:-1].lower():
traducao[indice] = (lema, lema, classe)
traducao.insert(indice + 1, ("GRANDE", "GRANDE", classe))
indice += 1
else:
if classe.startswith("NCFP") and palavra[:-1].lower() != lema.lower():
glosa = "MULHER"
traducao.insert(indice, (glosa, lema, "A"))
traducao[indice + 1] = (lema, lema, classe)
indice += 1
elif classe.startswith("NCFS") and palavra.lower() != lema.lower():
glosa = "MULHER"
traducao.insert(indice, (glosa, lema, "A"))
traducao[indice + 1] = (lema, lema, classe)
indice += 1
else:
if palavra.lower() != lema.lower():
if classe.endswith("D"):
diminutivo = "PEQUENO"
if "mulher" in excepcoes[palavra].split():
traducao.insert(indice, ("mulher", lema, "A"))
traducao[indice + 1] = (excepcoes[palavra].split()[1], lema, classe)
traducao.insert(indice + 2, ("PEQUENO", "PEQUENO", classe))
indice += 2
else:
traducao[indice] = (excepcoes[palavra], lema, classe)
traducao.insert(indice + 1, ("PEQUENO", "PEQUENO", classe))
indice += 1
elif classe.endswith("A"):
if "mulher" in excepcoes[palavra].split():
traducao.insert(indice, ("mulher", lema, "A"))
traducao[indice + 1] = (excepcoes[palavra].split()[1], lema, classe)
traducao.insert(indice + 2, ("GRANDE", "GRANDE", classe))
indice += 2
else:
traducao[indice] = (excepcoes[palavra], lema, classe)
traducao.insert(indice + 1, ("GRANDE", "GRANDE", classe))
indice += 1
else:
traducao[indice] = (excepcoes[palavra], lema, classe)
indice += 1
def remove_prep(traducao):
indice = 0
count = 0
while indice < len(traducao)-count:
classe = traducao[indice][2]
lema = traducao[indice][1]
palavra = traducao[indice][0]
if classe.startswith("SP"):
del traducao[indice]
count += 1
indice +=1
def remove_ser_estar(traducao):
indice = 0
count = 0
while indice < len(traducao)-count:
classe = traducao[indice][2]
lema = traducao[indice][1]
palavra = traducao[indice][0]
if lema.lower() =="ser" or lema.lower() == "estar":
del traducao[indice]
count +=1
elif classe == "CC" and palavra.lower() == "e":
del traducao[indice]
count += 1
indice+=1
def cliticos(traducao):
"""
Trata dos pronomes clíticos.
:param traducao: frase
:return:
"""
for indice, valor in enumerate(traducao):
classe = valor[2]
palavra = valor[0]
if classe.startswith("PP"):
if palavra.lower() == "te" or palavra.lower() == "ti":
traducao[indice] = ("TU", "TU", classe)
elif palavra.lower() == "me" or palavra.lower() == "mim":
traducao[indice] = ("EU", "EU", classe)
elif palavra.lower() == "nos":
traducao[indice] = ("NÓS", "NÓS", classe)
elif palavra.lower() == "se":
traducao[indice] = ("", "", classe)
def expressao_interrogativa(frase, traducao, tags, indice, exprFaciais):
"""
Adiciona as expressões faciais em interrogativas globais.
:param frase: frase
:param traducao_glosas: frase com algumas regras manuais aplicadas
:param tags: classes gramaticais das palavras
:return:
"""
if frase[-1]=="?" and not b_any("PT" in x for x in tags) and not b_any("RGI" in x for x in tags):
exprFaciais[str(indice) + "-" + str(indice+len(traducao))] = "interrogativa"
# traducao_glosas = "{" + traducao_glosas + "}(q)"
# return traducao_glosas
def converte_glosas(i, counter, exprFaciais):
"""
Converte as palavras em glosas.
:param traducao: frase
:return:
"""
for indice, valor in enumerate(i.traducao):
classe = valor[2]
lema = valor[1]
palavra = valor[0]
if not classe.startswith("A") and not classe.startswith("NC"):
i.traducao[indice] = lema.upper()
else:
i.traducao[indice] = palavra.upper()
# converte _ para - ex. fim_de_semana para fim-de-semana
if "_" in i.traducao[indice]:
i.traducao[indice] = i.traducao[indice].replace("_", "-")
if classe.startswith("Z"):
try:
int(palavra)
except ValueError:
# Transforma numeros por exenso sem ser por extenso
i.traducao[indice] = str(ExtensoToInteiro(palavra))
# Adiciona a expressao negativa no verbo
# if classe.startswith("VMI") and "NEG" in i.tipo[0]:
# exprFaciais[str(indice+counter) + "-" + str(indice+counter+1)] = "negativa"
# Adiciona a expressao negativa no gesto manual NÃO
if classe.startswith("RN") and "NEG" in i.tipo[0]:
exprFaciais[str(indice+counter) + "-" + str(indice+counter+1)] = "negativa"
if "" in i.traducao:
i.traducao.remove('')
def geracao(i, indice, exprFaciais):
"""
Função principal que aplica as regras manuais anteriores conforme a gramática da LGP.
:param i: Frase em português (objeto).
:return: Frase em LGP
"""
classes = list(list(zip(*i.traducao))[2])
# remover preposições
remove_prep(i.traducao)
#altera ordem determinantes, numerais e adverbios quantidade
ordena_dets_num_adverq(i.traducao)
#Verbos
tempo_verbal(i)
#nomes próprios
# nomes_proprios(i.traducao, i.palavras_compostas)
#feminino
excepcoes = abre_feminino_excepcoes()
feminino(i.traducao, excepcoes)
# remover ser e estar
remove_ser_estar(i.traducao)
# transformar cliticos
cliticos(i.traducao)
#advérbio de negação para o fim da frase e interrogativas parciais (pronomes e advérbios)
orden_neg_int(i, indice, exprFaciais)
# passar para glosas && adicionar expressão negativa
converte_glosas(i, indice, exprFaciais)
# join das glosas da traducao
traducao_glosas = " ".join(i.traducao)
traducao_glosas = traducao_glosas.split(" ")
# adicionar a expressao facial da interrogativa
expressao_interrogativa(i.frase, traducao_glosas, classes, indice, exprFaciais)
return traducao_glosas, exprFaciais
|
ineslacerda/PE2LGP-Translator
|
PE2LGP/Modulo_construcao_regras/geracao_fase.py
|
geracao_fase.py
|
py
| 13,124 |
python
|
pt
|
code
| 2 |
github-code
|
6
|
6827592670
|
def even(n):
if n>10:
i=0
sum=0
re=0
while i<n:
re=n%10
sum=sum+re
n=n//10
return even(sum)
else:
if n%2==0:
print(n,"even")
else:
print(n,"odd")
even(n=int(input("enter the number")))
|
Kaguinewme/function
|
write a program in C to find the sum of digits of a number using recursion.py
|
write a program in C to find the sum of digits of a number using recursion.py
|
py
| 308 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4463507689
|
from django.shortcuts import render
from django.views.generic import View
from .models import *
# Create your views here.
class ProjectsList(View):
def get(self,request):
try:
us = User.objects.get(username=request.user)
except:
us = None
projects = Project.objects.filter(user=us)
return render(request,'taskmain/projects_list.html',{'projects':projects,'us':us})
class ProjectDetail(View):
def get(self,request,slug):
project = Project.objects.get(slug=slug)
return render(request,'taskmain/project_detail.html',{'project':project})
|
virasium/TM
|
taskmain/views.py
|
views.py
|
py
| 618 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40749106105
|
from ReverseSequence import reverse
currentdict=[]
with open('1rosalind.txt') as f:
for line in f:
currentdict.append(line.strip())
'''create setS containing all non duplicate items of the strings
and the reverse complements of the strings
'''
seconddict = []
for i in currentdict:
seconddict.append(reverse(i))
setS = [i for i in currentdict]
for a in seconddict:
setS.append(a)
setS = list(set(setS))
#print(setS)
'''
construct the graph
'''
for i in setS:
print('(', end='')
print(i[:-1], end='')
print(',',i[1:], end ='')
print(')')
|
HanielDorton/Project_Rosalind
|
ConstructionaDeBruijnGraph/ConstructionaDeBruijnGraph.py
|
ConstructionaDeBruijnGraph.py
|
py
| 589 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29961826770
|
from selenium import webdriver
import json
import traceback
import urllib.request
def parse_page(driver):
script_clue = "q(\"talkPage.init\","
try:
for script in driver.find_elements_by_tag_name("script"):
content = script.get_attribute("innerHTML")
if content.startswith(script_clue):
content = content.lstrip(script_clue).rstrip(")")
json_content = json.loads(content)
title = json_content["__INITIAL_DATA__"]["name"]
download_url = json_content["__INITIAL_DATA__"]["talks"][0]["downloads"]["subtitledDownloads"]["en"]["high"]
return title, download_url
except Exception:
print(traceback.format_exc())
print("Unable to parse page. Stopping.")
exit(-1)
def main(url):
print("Processing URL %s..." % url)
options = webdriver.FirefoxOptions()
options.add_argument("--headless")
driver = webdriver.Firefox(firefox_options=options)
driver.get(url)
title, download_url = parse_page(driver)
print("TED talk: {}\nDownload URL: {}".format(title, download_url))
file_title = title + ".mp4"
print("Downloading file {}...".format(file_title))
try:
urllib.request.urlretrieve(download_url, file_title)
print("Download completed.")
except Exception:
print(traceback.format_exc())
print("Unable to download video. Stopping.")
exit(-1)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="A simple tool to download TED talks via CLI.")
parser.add_argument("-t", "--talk", type=str, required=True,
help="Link to TED talk")
args = parser.parse_args()
main(args.talk)
|
ShadowTemplate/ted-downloader
|
ted.py
|
ted.py
|
py
| 1,781 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32111244416
|
import os
import cv2
import sys
import math
import time
import numpy as np
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5 import uic
# 이미지를 읽어서 pyqt로 보여주는 함수
def cvtPixmap(frame, img_size):
frame = cv2.resize(frame, img_size)
height, width, channel = frame.shape
bytesPerLine = 3 * width
qImg = QImage(frame.data,
width,
height,
bytesPerLine,
QImage.Format_RGB888).rgbSwapped()
qpixmap = QPixmap.fromImage(qImg)
return qpixmap
# 동공 주변 반사광 채우는 함수
def fill_reflected_light(ori_img, min_thr, iteration=2, add_inter_idx=1):
if len(ori_img.shape) == 3:
ori_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2GRAY)
ret, img_thresh = cv2.threshold(ori_img, min_thr, 255, cv2.THRESH_BINARY)
kernel = np.ones((3, 3), np.uint8)
img_thresh = cv2.dilate(img_thresh, kernel, iterations=iteration) # 팽창연산-대상을 확장한 후 작은 구멍을 채우는 방식
draw_img = ori_img.copy() # 원본 이미지 복사
reflection_points = np.where(img_thresh == 255) # 화소값이 255인 인덱스 반환
for y, x in zip(reflection_points[0], reflection_points[1]):
# x 픽설의 왼쪽 픽셀이 l_x, 오른쪽 픽셀이 r_x
# l_x는 0이상의 값을 가지고, r_x는 이미지크기보다 작아야 함
l_x, r_x = x - 1, x + 1
l_x = l_x if l_x >= 0 else 0
r_x = r_x if r_x < img_thresh.shape[1] else img_thresh.shape[1] - 1
# l_x, r_x가 이미지크기 범위 안에있고, 반사광이면 1칸씩 이동
while l_x >= 0 and img_thresh[y][l_x] == 255:
l_x -= 1
while r_x < (img_thresh.shape[1] - 1) and img_thresh[y][r_x] == 255:
r_x += 1
# 반사광에서 가장 인접한 값이 아닌, 조금 옆의 값으로 반사광 채우기
# 이미 위에서 dilation 연산을 통해 경계가 두꺼워져 반사광 조금 옆의 값을 가져왔기 때문에 add_inter_idx의 큰 의미는 없음
l_x -= add_inter_idx
r_x += add_inter_idx
l_x = l_x if l_x >= 0 else 0
r_x = r_x if r_x < img_thresh.shape[1] else img_thresh.shape[1] - 1
l_val = int(ori_img[y][l_x])
r_val = int(ori_img[y][r_x])
draw_img[y][x] = int((l_val + r_val) / 2) # 반사광 채우기
return draw_img
# 동공 검출 함수
def getPupil(img, thresh, area_val, symmetry_val, fill_cond_val):
'''
:param img: 입력 동공 이미지
:param thresh:
:param area_val:
:param symmetry_val:
:param fill_cond_val:
:return: ((동공중심 x,y), 반지름, (외접 사각형 x,y,w,h))
condition으로 끝나는 변수 3개가 모두 1로 만족해야 res에 append할 수 있음
area_condition : 직사각형 contour로 둘러싸인 부분의 면적
symmetry_condition : 1-종횡비율(contour를 둘러싼 직사각형의 너비/높이 비율)이 symmetry_val(0.2)보다 작으면 1-> 정사각형에 가까워야함
fill_condition : (contour로 둘러싸인 면적인 area/위에서 계산한 내접원의 넓이)을 계산해 얼마나 채워져있는지 비교
'''
res = []
if len(img.shape) == 3:
gray = cv2.cvtColor(~img, cv2.COLOR_BGR2GRAY)
else:
gray = img
ret, thresh_gray = cv2.threshold(gray, thresh[0], thresh[1], cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
draw_img = img.copy()
for i in range(len(contours)):
# # 컨투어 각각 시각화
# cv2.drawContours(draw_img, [contours[i]], 0, (0, 0, 255), 2)
# cv2.putText(draw_img, str(i), tuple(contours[i][0][0]), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 1)
# print(i, hierarchy[0][i])
# cv2.imshow('contour detection', draw_img)
# cv2.waitKey(0)
# for contour in contours:
area = cv2.contourArea(contours[i])
rect = cv2.boundingRect(contours[i])
x, y, width, height = rect # 직사각형 모양 바운딩 박스의 좌표, 너비, 높이
radius = 0.25 * (width + height) # 내접원의 반지름(내 생각엔 직사각형 모양의 contour의 내접원의 반지름같음)
area_condition = (area_val <= area)
symmetry_condition = (abs(1 - float(width) / float(height)) <= symmetry_val/10)
fill_condition = (abs(1 - (area / (math.pi * math.pow(radius, 2.0)))) <= fill_cond_val/10)
# 3가지 조건을 모두 만족해야 동공 영역
if area_condition and symmetry_condition and fill_condition:
res.append(((int(x + radius), int(y + radius)), int(1 * radius), rect)) # 동공중심 x좌표, y좌표, 반지름, rect(외접 사각형)
return res, thresh_gray
# 동공 지름 구하기
def get_pupil_size(roi, binary_eye, pupil_info, add_radius):
info = pupil_info[0] # (동공중심x,y), 반지름, (외접 사각형 x,y,w,h)
# rect_roi = info[2]
rect_roi = pupil_info[0][2]
box_x, box_y, width, height = rect_roi
box_x = box_x - add_radius if box_x - add_radius >= 0 else 0
box_y = box_y - add_radius if box_y - add_radius >= 0 else 0
width = width + (2 * add_radius) if width + (2 * add_radius) <= roi.shape[1] else roi.shape[1]
height = height + (2 * add_radius) if height + (2 * add_radius) <= roi.shape[0] else roi.shape[0]
img_eye_only = binary_eye[box_y:box_y + height, box_x:box_x + width].copy()
img_eye_only = np.where(img_eye_only == 255, 1, img_eye_only)
cv2.rectangle(roi, (box_x, box_y), ((box_x + width), (box_y + height)), (0, 255, 255), 2) # 동공주변 노란색 박스
# 동공 영역 새로 길이
max_idx, max_val = 0, 0
for col_idx in range(img_eye_only.shape[0]):
col_val = sum(img_eye_only[col_idx])
if max_val < col_val:
max_idx = col_idx
max_val = col_val
# 동공 영역이 시작되는 좌우 지점 찾기
l_row, r_row = 0, img_eye_only.shape[1]
for row_idx in range(img_eye_only.shape[1] - 1):
row_val = sum(img_eye_only[:, row_idx])
if row_val != 0:
l_row = row_idx
for row_idx in range(img_eye_only.shape[1] - 1, 0, -1):
row_val = sum(img_eye_only[:, row_idx])
if row_val != 0:
r_row = row_idx
# 동공의 세로 길이를 찾아 가로로 그림
cv2.line(roi,
(box_x + l_row, box_y + max_idx),
(box_x + r_row, box_y + max_idx),
(0, 0, 255), 2) # 동공의 지름 그리기
return roi, max_val
def frames_to_timecode(total_frames, frame_rate=30, drop=False):
"""
Method that converts frames to SMPTE timecode.
:param total_frames: Number of frames
:param frame_rate: frames per second
:param drop: true if time code should drop frames, false if not
:returns: SMPTE timecode as string, e.g. '01:02:12:32' or '01:02:12;32'
"""
if drop and frame_rate not in [29.97, 59.94]:
raise NotImplementedError("Time code calculation logic only supports drop frame "
"calculations for 29.97 and 59.94 fps.")
# for a good discussion around time codes and sample code, see
# http://andrewduncan.net/timecodes/
# round fps to the nearest integer
# note that for frame rates such as 29.97 or 59.94,
# we treat them as 30 and 60 when converting to time code
# then, in some cases we 'compensate' by adding 'drop frames',
# e.g. jump in the time code at certain points to make sure that
# the time code calculations are roughly right.
#
# for a good explanation, see
# https://documentation.apple.com/en/finalcutpro/usermanual/index.html#chapter=D%26section=6
fps_int = int(round(frame_rate))
if drop:
# drop-frame-mode
# add two 'fake' frames every minute but not every 10 minutes
#
# example at the one minute mark:
#
# frame: 1795 non-drop: 00:00:59:25 drop: 00:00:59;25
# frame: 1796 non-drop: 00:00:59:26 drop: 00:00:59;26
# frame: 1797 non-drop: 00:00:59:27 drop: 00:00:59;27
# frame: 1798 non-drop: 00:00:59:28 drop: 00:00:59;28
# frame: 1799 non-drop: 00:00:59:29 drop: 00:00:59;29
# frame: 1800 non-drop: 00:01:00:00 drop: 00:01:00;02
# frame: 1801 non-drop: 00:01:00:01 drop: 00:01:00;03
# frame: 1802 non-drop: 00:01:00:02 drop: 00:01:00;04
# frame: 1803 non-drop: 00:01:00:03 drop: 00:01:00;05
# frame: 1804 non-drop: 00:01:00:04 drop: 00:01:00;06
# frame: 1805 non-drop: 00:01:00:05 drop: 00:01:00;07
#
# example at the ten minute mark:
#ㄺ
# frame: 17977 non-drop: 00:09:59:07 drop: 00:09:59;25
# frame: 17978 non-drop: 00:09:59:08 drop: 00:09:59;26
# frame: 17979 non-drop: 00:09:59:09 drop: 00:09:59;27
# frame: 17980 non-drop: 00:09:59:10 drop: 00:09:59;28
# frame: 17981 non-drop: 00:09:59:11 drop: 00:09:59;29
# frame: 17982 non-drop: 00:09:59:12 drop: 00:10:00;00
# frame: 17983 non-drop: 00:09:59:13 drop: 00:10:00;01
# frame: 17984 non-drop: 00:09:59:14 drop: 00:10:00;02
# frame: 17985 non-drop: 00:09:59:15 drop: 00:10:00;03
# frame: 17986 non-drop: 00:09:59:16 drop: 00:10:00;04
# frame: 17987 non-drop: 00:09:59:17 drop: 00:10:00;05
# calculate number of drop frames for a 29.97 std NTSC
# workflow. Here there are 30*60 = 1800 frames in one
# minute
# 2는 왜 뺴지?
FRAMES_IN_ONE_MINUTE = 1800 - 2
FRAMES_IN_TEN_MINUTES = (FRAMES_IN_ONE_MINUTE * 10) - 2
ten_minute_chunks = total_frames / FRAMES_IN_TEN_MINUTES # 10분짜리가 몇 묶음인지
one_minute_chunks = total_frames % FRAMES_IN_TEN_MINUTES # 1분짜리가 몇 묶음인지
ten_minute_part = 18 * ten_minute_chunks
one_minute_part = 2 * ((one_minute_chunks - 2) / FRAMES_IN_ONE_MINUTE)
if one_minute_part < 0:
one_minute_part = 0
# add extra frames
total_frames += ten_minute_part + one_minute_part
# for 60 fps drop frame calculations, we add twice the number of frames
if fps_int == 60:
total_frames = total_frames * 2
# time codes are on the form 12:12:12;12
smpte_token = ";"
else:
# time codes are on the form 12:12:12:12
smpte_token = ":"
# now split our frames into time code
hours = int(total_frames / (3600 * fps_int))
minutes = int(total_frames / (60 * fps_int) % 60)
seconds = int(total_frames / fps_int % 60)
frames = int(total_frames % fps_int)
return "%02d:%02d:%02d%s%02d" % (hours, minutes, seconds, smpte_token, frames)
|
HanNayeoniee/visual-fatigue-analysis
|
pupil_detection/utils.py
|
utils.py
|
py
| 11,187 |
python
|
en
|
code
| 1 |
github-code
|
6
|
35860329695
|
import os
import subprocess
import tkinter as tk
from tkinter import filedialog, messagebox,ttk
from tkinter import *
def run_command(command):
result = subprocess.run(command, shell=True, capture_output=True, text=True)
return result.stdout.strip()
def browse_directory():
directory_path = filedialog.askdirectory()
if directory_path:
entry_directory_path.delete(0, tk.END)
entry_directory_path.insert(0, directory_path)
def search_files():
file_type = entry_file_type.get().strip()
directory_to_scan = entry_directory_path.get().strip()
if not file_type or not directory_to_scan:
messagebox.showinfo("Error", "Please enter both file type and directory path.")
return
if directory_to_scan == "/":
directory_to_scan = "/ -maxdepth 3"
command = f"sudo find {directory_to_scan} -type f \\( -name *.{file_type} \\) -print0 | xargs -0 du -sh | sort -h "
else:
command = f"find {directory_to_scan} -type f \\( -name *.{file_type} \\) -print0 | xargs -0 du -sh | sort -h "
files_found = run_command(command=command)
text_result.config(state=tk.NORMAL)
text_result.delete("1.0", tk.END)
text_result.insert(tk.END, files_found)
text_result.config(state=tk.DISABLED)
def delete_all_files():
file_type = entry_file_type.get().strip()
directory_to_scan = entry_directory_path.get().strip()
if not file_type or not directory_to_scan:
messagebox.showinfo("Error", "Please enter both file type and directory path.")
return
if directory_to_scan == "/":
directory_to_scan = "/ -maxdepth 3"
command = f"sudo find {directory_to_scan} -type f \\( -name *.{file_type} \\) -print0 | xargs -0 rm -rf"
else:
command = f"find {directory_to_scan} -type f \\( -name *.{file_type} \\) -print0 | xargs -0 rm -rf"
run_command(command=command)
messagebox.showinfo("Deletion Complete", "All files have been deleted.")
def delete_specific_file():
file_path = entry_specific_file.get().strip()
if not file_path:
messagebox.showinfo("Error", "Please enter the path of the file to delete.")
return
try:
os.remove(file_path)
messagebox.showinfo("Deletion Complete", "The file has been deleted.")
except Exception as e:
messagebox.showinfo("Error", f"Failed to delete the file: {e}")
window = tk.Tk()
window.title("File Search and Delete")
window.geometry("600x500")
custom_font = ("Helvetica", 12)
file_types = ["Images", "Videos", "Documents", "Code", "Compressed", "Audio"]
# Dropdown menu for file types
file_type_var = tk.StringVar()
file_type_var.set(file_types[0]) # Set the default selection
file_type_dropdown = ttk.Combobox(window, textvariable=file_type_var, values=file_types, font=custom_font, state="readonly")
file_type_dropdown.grid(row=0, column=2, padx=5, pady=5, sticky="w")
label_file_type = tk.Label(window, text="Enter the file type you want to search for:", font=custom_font)
entry_file_type = tk.Entry(window, width=20, font=custom_font)
label_directory_path = tk.Label(
window,
text="Enter the directory path to scan\n(leave blank for home directory)\n(for root directory enter / (maxdepth will be 3))\n(for current directory just enter .):",
font=custom_font,
)
entry_directory_path = tk.Entry(window, width=40, font=custom_font)
button_browse = tk.Button(window, text="Browse", command=browse_directory, font=custom_font, bg="#f0f0f0", padx=5)
button_search = tk.Button(window, text="Search Files", command=search_files, font=custom_font, bg="#3c8dbc",
fg="white", padx=5)
text_result = tk.Text(window, wrap=tk.WORD, height=8, width=60, font=custom_font, state=tk.DISABLED)
label_specific_file = tk.Label(window, text="Enter the path of the file to delete:", font=custom_font)
entry_specific_file = tk.Entry(window, width=40, font=custom_font)
button_delete_specific_file = tk.Button(window, text="Delete Specific File", command=delete_specific_file,
font=custom_font, bg="#dc3545", fg="white", padx=5)
button_delete_all_files = tk.Button(window, text="Delete All Files", command=delete_all_files, font=custom_font,
bg="#dc3545", fg="white", padx=5)
label_file_type.grid(row=0, column=0, padx=5, pady=5, sticky="w")
entry_file_type.grid(row=0, column=1, padx=5, pady=5, sticky="w")
label_directory_path.grid(row=1, column=0, padx=5, pady=5, sticky="w", columnspan=2)
entry_directory_path.grid(row=2, column=0, padx=5, pady=5, sticky="w", columnspan=2)
button_browse.grid(row=2, column=1, columnspan=3, padx=5, pady=10)
text_result.grid(row=3, column=0, columnspan=3, padx=5, pady=5, sticky="w")
# text_result.grid(row=4, column=0, columnspan=3, padx=5, pady=5)
button_search.grid(row=4, column=0, columnspan=3, padx=5, pady=5, sticky="w")
# button_search.grid(row=3, column=0, columnspan=3, padx=5, pady=10)
label_specific_file.grid(row=5, column=0, padx=5, pady=5, sticky="w")
entry_specific_file.grid(row=6, column=0, padx=5, pady=5, sticky="w")
button_delete_specific_file.grid(row=6, column=1, columnspan=3, padx=5, pady=10)
button_delete_all_files.grid(row=7, column=0, columnspan=3, padx=5, pady=5, sticky="w")
# Run the tkinter main loop
window.mainloop()
|
adityaagg7/Tally-CodeBrewers-INT_WIN
|
OBSOLETE/tkt_scan_updated.py
|
tkt_scan_updated.py
|
py
| 5,333 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20538433969
|
# https://practice.geeksforgeeks.org/problems/nearly-sorted-1587115620/1
"""
Time complexity:- O(N logk)
Space Complexity:- O(K)
"""
import heapq
class Solution:
def nearlySorted(self, arr, k):
res = [] # Result array to store the nearly sorted elements
minHeap = [] # Min heap to maintain the k-sized sliding window
for num in arr:
heapq.heappush(minHeap, num) # Push the current element into the min-heap
if len(minHeap) > k:
# Pop the minimum element if the window size exceeds k
res.append(heapq.heappop(minHeap))
# Process any remaining elements in the min-heap
while minHeap:
res.append(heapq.heappop(minHeap))
return res
|
Amit258012/100daysofcode
|
Day58/nearly_sorted.py
|
nearly_sorted.py
|
py
| 760 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2773267706
|
# Given a 2D board and a word, find if the word exists in the grid.
# The word can be constructed from letters of sequentially adjacent cell, where "adjacent" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once.
# Example:
# board =
# [
# ['A','B','C','E'],
# ['S','F','C','S'],
# ['A','D','E','E']
# ]
# Given word = "ABCCED", return true.
# Given word = "SEE", return true.
# Given word = "ABCB", return false.
class Solution:
def exist(self, board, word: str) -> bool:
### 直接搜索...
if len(word)==0:
return True
m = len(board)
if m==0:
return False
n = len(board[0])
if n==0:
return False
for i in range(m):
for j in range(n):
if(self.serach(board,word,0,i,j)):
return True
return False
def serach(self,board,word,idx,i,j):
if idx == len(word):
return True
m,n = len(board),len(board[0])
if i<0 or j<0 or i>=m or j>=n or board[i][j]!=word[idx]:
return False
c = board[i][j]
board[i][j]='#'
res = self.serach(board,word,idx+1,i-1,j) or self.serach(board,word,idx+1,i,j-1) or \
self.serach(board,word,idx+1,i+1,j) or self.serach(board,word,idx+1,i,j+1)
board[i][j]=c
return res
|
queryor/algorithms
|
leetcode/79. Word Search.py
|
79. Word Search.py
|
py
| 1,405 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35729296994
|
import klepto
import shelve
import pickle
import numpy as np
from scipy.sparse import *
from pyspark.mllib.recommendation import ALS
from pyspark.sql import SparkSession
############### Load Data ##################
rating_matrix_csc = load_npz('netflix/sparse_matrix_100%.npz').tocsc()
rating_matrix_val_csc = load_npz('netflix/sparse_matrix_validation_75%.npz').tocsc()
print("file load DONE")
############################################
''' Save to file 'tree.pkl' '''
start = 0
end = int(rating_matrix_csc.shape[1] * 0.75)
from pyspark.mllib.recommendation import ALS
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark import SparkContext
class MatrixFactorization:
def __init__(self, maxIter=15, regParam=0.01, rank=10):
self.maxIter = maxIter
self.regParam = regParam
self.rank = rank
conf = SparkConf().setAppName("appName").setMaster("local[*]")
conf.set("spark.driver.memory","8g")
conf.set("spark.executor.memory","8g")
self.spark = SparkContext(conf=conf)
print("New SparkSession started...")
def change_parameter(self, regParam):
self.regParam = regParam
def matrix_factorization(self, train_lst):
ratings = self.spark.parallelize(train_lst)
print('create dataframe!')
model = ALS.train(ratings, self.rank, seed=10, \
iterations=self.maxIter, \
lambda_=self.regParam)
print("MF DONE")
userFeatures = sorted(model.userFeatures().collect(), key=lambda d: d[0], reverse=False)
productFeatures = sorted(model.productFeatures().collect(), key=lambda d: d[0], reverse=False)
userProfile = {each[0]: each[1].tolist() for each in userFeatures}
itemProfile = {each[0]: each[1].tolist() for each in productFeatures}
return userProfile, itemProfile
def end(self):
self.spark.stop()
print("SparkSession stopped.")
from scipy.sparse import find
val_num = rating_matrix_val_csc.getnnz(axis=None)
########################################## For Validation #############################################
def calculate_avg_rating_for_pesudo_user(pseudo_user_lst, sMatrix):
ret_array = np.zeros(sMatrix.shape[0])
ret_array = np.array(sMatrix[:, pseudo_user_lst].sum(axis=1))[:,0]/(sMatrix[:, pseudo_user_lst].getnnz(axis=1)+1e-9)
return ret_array
def pred_RMSE_for_validate_user(user_node_ind, user_profile, item_profile, val_user_list, val_item_list, sMatrix):
print("RMSE calculation on valset qstarted.")
RMSE = 0
i = 0
for userid, itemid in zip(val_user_list, val_item_list):
if i % 50000 == 0:
print("%.2f%%" % (100 * i / val_num))
i += 1
RMSE += (sMatrix[itemid, userid] - np.dot(user_profile[user_node_ind[userid]], item_profile[itemid]))**2
return (RMSE / len(val_user_list))**0.5
def generate_prediction_model(lr_bound, tree, rI, sMatrix, plambda_candidates, validation_set):
''' lr_bound: dict {
level 0: [[left_bound, right_bound]], users' bound for one level, each ele in dictionary represents one node
level 1: [[left_bound, right_bound], [left_bound, right_bound], [left_bound, right_bound]], 3
level 2: ..., 9
} (bound means index)
plambda_candidates: {
level 0: [clambda1, clambda2, clambda3, ...]
level 1: [clambda1, clambda2, clambda3, ...]
level 2: [clambda1, clambda2, clambda3, ...]
}
prediction_model: dict {
level 0: { 'best_lambda': x, 'user_profile': ..., 'item_profile': ...}
level 1: { 'best_lambda': x, 'user_profile': ..., 'item_profile': ...}
level 2: { 'best_lambda': x, 'user_profile': ..., 'item_profile': ...}
}
'''
# MF = MatrixFactorization()
# print("MF session started.")
prediction_model = {}
val_item_list = find(validation_set)[0]
val_user_list = find(validation_set)[1]
user_node_ind = np.zeros(sMatrix.shape[1]) #### notice that index is not id
for level in lr_bound:
# level = "10"
print("level:", level)
prediction_model.setdefault(level, {})
train_lst = []
rmse_for_level = []
for pseudo_user_bound, userid in zip(lr_bound[level], range(len(lr_bound[level]))):
# print(str(userid) + "/" + str(pow(3,int(level))))
if pseudo_user_bound[0] > pseudo_user_bound[1]:
continue
pseudo_user_lst = tree[pseudo_user_bound[0]:(pseudo_user_bound[1] + 1)]
pseudo_user_for_item = calculate_avg_rating_for_pesudo_user(pseudo_user_lst, sMatrix)
train_lst += [(userid, itemid, float(pseudo_user_for_item[itemid])) \
for itemid in range(pseudo_user_for_item.shape[0]) if pseudo_user_for_item[itemid]]
#### find node index for each validation user ####
user_node_ind[pseudo_user_lst] = userid
print("Rating Number of level " + level + ": " + str(len(train_lst)))
#### Train MF and Do validation ####
min_RMSE = -1
for plambda in plambda_candidates[level]:
MF = MatrixFactorization(regParam=plambda)
user_profile, item_profile = MF.matrix_factorization(train_lst)
MF.end() #### close MF spark session
del MF
RMSE = pred_RMSE_for_validate_user(user_node_ind, user_profile, item_profile, val_user_list, val_item_list, validation_set)
rmse_for_level.append(RMSE)
if min_RMSE is -1 or RMSE < min_RMSE:
min_RMSE = RMSE
min_user_profile, min_item_profile, min_lambda = user_profile, item_profile, plambda
print("rmse_for_level: ", rmse_for_level)
prediction_model[level]['upro'], prediction_model[level]['ipro'], prediction_model[level]['plambda'] \
= min_user_profile, min_item_profile, min_lambda
d = shelve.open("./prediction_model/"+level, protocol=pickle.HIGHEST_PROTOCOL)
d["content"] = prediction_model[level]
d.close()
print("level " + level + " training DONE")
return prediction_model
import klepto
import numpy as np
Tree = klepto.archives.dir_archive('treeFile', {}, serialized=True)
Tree.load()
plambda_candidates = {"0":[0.0005, 0.0006, 0.0007, 0.0008, 0.0009, 0.001, 0.002, 0.003, 0.004, 0.005],
"1":[0.0005, 0.0006, 0.0007, 0.0008, 0.0009, 0.001, 0.002, 0.003, 0.004, 0.005],
"2":[0.0005, 0.0006, 0.0007, 0.0008, 0.0009, 0.001, 0.002, 0.003, 0.004, 0.005],
"3":[0.0005, 0.0006, 0.0007, 0.0008, 0.0009, 0.001, 0.002, 0.003, 0.004, 0.005],
"4":[0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.010],
"5":[0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.010],
"6":[0.006, 0.007, 0.008, 0.009, 0.01, 0.011, 0.012, 0.013, 0.014, 0.015],
"7":[0.006, 0.007, 0.008, 0.009, 0.01, 0.011, 0.012, 0.013, 0.014, 0.015],
"8":[0.007, 0.008, 0.009, 0.010, 0.014, 0.018, 0.02, 0.022, 0.024, 0.026],
"9":[0.007, 0.008, 0.009, 0.010, 0.014, 0.018, 0.02, 0.022, 0.024, 0.026],
"10":[0.007, 0.008, 0.009, 0.010, 0.014, 0.018, 0.02, 0.022, 0.024, 0.026]}
# for level in Tree["lr_bound"]:
# plambda_candidates[level] = list(np.arange(0.001, 0.05, 0.005))
prediction_model = generate_prediction_model \
(Tree['lr_bound'], \
Tree['tree'], \
Tree['rI'], \
rating_matrix_csc[:, start:end],
plambda_candidates,
rating_matrix_val_csc)
import pickle
import shelve
d = shelve.open("prediction_model", protocol=pickle.HIGHEST_PROTOCOL)
d["content"] = prediction_model
d.close()
print("Write DONE!")
|
clamli/Decision-tree-model-for-active-learning
|
Netflix-based/MF param train.py
|
MF param train.py
|
py
| 8,140 |
python
|
en
|
code
| 7 |
github-code
|
6
|
14540936446
|
"""Escea Fireplace UDP messaging module
Implements simple UDP messages to Fireplace and receiving responses
"""
import asyncio
import logging
from asyncio import Lock
from asyncio.base_events import BaseEventLoop
from async_timeout import timeout
from typing import Any, Dict
# Pescea imports:
from .message import Message, CommandID, expected_response
from .udp_endpoints import open_local_endpoint, open_remote_endpoint
_LOG = logging.getLogger(__name__)
# Port used for discovery and integration
# (same port is used for replies)
CONTROLLER_PORT = 3300
# Time to wait for results from server
REQUEST_TIMEOUT = 5.0
Responses = Dict[str, Message]
class Datagram:
"""Send UDP Datagrams to fireplace and receive responses"""
def __init__(
self, event_loop: BaseEventLoop, device_ip: str, sending_lock: Lock
) -> None:
"""Create a simple datagram client interface.
Args:
event_loop: loop to use for coroutines
device_addr: Device network address. Usually specified as IP
address (can be a broadcast address in the case of fireplace search)
sending_lock: Provided to attempt to make thread safe
Raises:
ConnectionRefusedError: If no Escea fireplace is discovered, or no
device discovered at the given IP address, or the UID does not match
"""
self._ip = device_ip
self._event_loop = event_loop
self.sending_lock = sending_lock
@property
def ip(self) -> str:
"""Target IP address"""
return self._ip
def set_ip(self, ip_addr: str) -> None:
"""Change the Target IP address"""
self._ip = ip_addr
async def send_command(self, command: CommandID, data: Any = None) -> Responses:
"""Send command via UDP
Returns received response(s) and IP addresses they come from
Args:
- command: Fireplace command (refer Message)
- data: ignored except for setting desired temperature
Raises ConnectionError if unable to send command
"""
message = Message(command=command, set_temp=data)
responses = dict() # type: Responses
broadcast = command == CommandID.SEARCH_FOR_FIRES
local = None
remote = None
# set up receiver before we send anything
async with self.sending_lock:
try:
local = await open_local_endpoint(
port=CONTROLLER_PORT,
loop=self._event_loop,
reuse_port=True,
)
remote = await open_remote_endpoint(
host=self._ip,
port=CONTROLLER_PORT,
loop=self._event_loop,
allow_broadcast=broadcast,
)
remote.send(message.bytearray_)
remote.close()
async with timeout(REQUEST_TIMEOUT):
while True:
data, (addr, _) = await local.receive()
response = Message(incoming=data)
if response.is_command:
if not broadcast:
_LOG.error(
"Unexpected command id: %s", response.command_id
)
else: # response
if response.response_id != expected_response(command):
_LOG.debug(
"Message response id: %s does not match command id: %s",
response.response_id,
command,
)
else:
responses[addr] = response
if not broadcast:
break
local.close()
except (asyncio.TimeoutError, ValueError):
pass
finally:
if remote is not None and not remote.closed:
remote.close()
if local is not None and not local.closed:
local.close()
if len(responses) == 0:
_LOG.debug(
"Unable to send UDP message - Local endpoint closed:%s, Remote endpoint closed:%s",
"None" if local is None else local.closed,
"None" if remote is None else remote.closed,
)
raise ConnectionError("Unable to send/receive UDP message")
return responses
|
lazdavila/pescea
|
pescea/datagram.py
|
datagram.py
|
py
| 4,657 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14200766996
|
import json
import os
import traceback
from discord import AllowedMentions, Embed, Forbidden
from discord.ext import commands
class Core(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.db = self.bot.db
async def push_link_json(self, guild) -> None:
data = {}
for invite in (await guild.invites()):
data[f'{invite.code}'] = f'{invite.uses}'
file = open(f'./data/{guild.id}.json', 'w')
json.dump(data, file, indent=4)
@commands.Cog.listener()
async def on_invite_create(self, invite):
await self.push_link_json(invite.guild)
@commands.Cog.listener()
async def on_invite_remove(self, invite):
await self.push_link_json(invite.guild)
@commands.Cog.listener()
async def on_guild_join(self, guild):
await self.push_link_json(guild)
@commands.Cog.listener()
async def on_member_join(self, member):
guild_data = self.db.list_invite_link(member.guild.id)
if not guild_data:
return
data = {}
for invite in (await member.guild.invites()):
data[f'{invite.code}'] = f'{invite.uses}'
if os.path.exists(f'./data/{member.guild.id}.json'):
with open(f'./data/{member.guild.id}.json', 'r', encoding='UTF-8') as config:
g_data = json.load(config)
else:
return
code = list(dict(data.items() - g_data.items()).items())[0]
link_role = self.db.fetch_invite_role(member.guild.id, code[0])
if not link_role:
return
role = member.guild.get_role(link_role[0])
if role:
try:
await member.add_roles(role)
except Forbidden:
return
await self.push_link_json(member.guild)
async def setup(bot):
await bot.add_cog(Core(bot))
|
yutarou12/ChIn-RoleBot
|
cogs/Core.py
|
Core.py
|
py
| 1,875 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1924258051
|
"""Regridding operator."""
# Standard library
import dataclasses as dc
import typing
# Third-party
import numpy as np
import xarray as xr
from rasterio import transform, warp
from rasterio.crs import CRS
Resampling: typing.TypeAlias = warp.Resampling
# For more information: check https://epsg.io/<id>
CRS_ALIASES = {
"geolatlon": "epsg:4326", # WGS84
"swiss": "epsg:21781", # Swiss CH1903 / LV03
"swiss03": "epsg:21781", # Swiss CH1903 / LV03
"swiss95": "epsg:2056", # Swiss CH1903+ / LV95
"boaga-west": "epsg:3003", # Monte Mario / Italy zone 1
"boaga-east": "epsg:3004", # Monte Mario / Italy zone 2
}
def _get_crs(geo):
if geo["gridType"] != "rotated_ll":
raise NotImplementedError("Unsupported grid type")
lon = geo["longitudeOfSouthernPoleInDegrees"]
lat = -1 * geo["latitudeOfSouthernPoleInDegrees"]
return CRS.from_string(
f"+proj=ob_tran +o_proj=longlat +o_lat_p={lat} +lon_0={lon} +datum=WGS84"
)
def _normalise(angle: float) -> float:
return np.fmod(angle + 180, 360) - 180
@dc.dataclass
class RegularGrid:
"""Class defining a regular grid.
Attributes
----------
crs : CRS
Coordinate reference system.
nx : int
Number of grid points in the x direction.
ny : int
Number of grid points in the y direction.
xmin : float
Coordinate of the first grid point in the x direction.
xmax : float
Coordinate of the last grid point in the x direction.
ymin : float
Coordinate of the first grid point in the y direction.
ymax : float
Coordinate of the last grid point in the y direction.
"""
crs: CRS
nx: int
ny: int
xmin: float
xmax: float
ymin: float
ymax: float
@classmethod
def from_field(cls, field: xr.DataArray):
"""Extract grid parameters from grib metadata.
Parameters
----------
field : xarray.DataArray
field containing the relevant metadata.
"""
geo = field.geography
obj = cls(
crs=_get_crs(geo),
nx=geo["Ni"],
ny=geo["Nj"],
xmin=_normalise(geo["longitudeOfFirstGridPointInDegrees"]),
xmax=_normalise(geo["longitudeOfLastGridPointInDegrees"]),
ymin=geo["latitudeOfFirstGridPointInDegrees"],
ymax=geo["latitudeOfLastGridPointInDegrees"],
)
if abs(obj.dx - geo["iDirectionIncrementInDegrees"]) > 1e-5:
raise ValueError("Inconsistent grid parameters")
if abs(obj.dy - geo["jDirectionIncrementInDegrees"]) > 1e-5:
raise ValueError("Inconsistent grid parameters")
return obj
@classmethod
def parse_regrid_operator(cls, op: str):
"""Parse fieldextra out_regrid_target string.
Parameters
----------
op : str
fieldextra out_regrid_target definition
i.e. crs,xmin,ymin,xmay,ymax,dx,dy.
"""
crs_str, *grid_params = op.split(",")
crs = CRS.from_string(CRS_ALIASES[crs_str])
xmin, ymin, xmax, ymax, dx, dy = map(float, grid_params)
if abs(dx) < 1e-10 or abs(dy) < 1e-10:
raise ValueError("Inconsistent regrid parameters")
nx = (xmax - xmin) / dx + 1
ny = (ymax - ymin) / dy + 1
if nx != int(nx) or ny != int(ny):
raise ValueError("Inconsistent regrid parameters")
return cls(crs, int(nx), int(ny), xmin, xmax, ymin, ymax)
@property
def dx(self) -> float:
return (self.xmax - self.xmin) / (self.nx - 1)
@property
def dy(self) -> float:
return (self.ymax - self.ymin) / (self.ny - 1)
@property
def transform(self) -> transform.Affine:
return transform.from_origin(
west=self.xmin - self.dx / 2,
north=self.ymax + self.dy / 2,
xsize=self.dx,
ysize=self.dy,
)
def regrid(field: xr.DataArray, dst: RegularGrid, resampling: Resampling):
"""Regrid a field.
Parameters
----------
field : xarray.DataArray
Input field defined on a regular grid in rotated latlon coordinates.
dst : RegularGrid
Destination grid onto which to project the field.
resampling : Resampling
Resampling method, alias of rasterio.warp.Resampling.
Raises
------
ValueError
If the input field is not defined on a regular grid in rotated latlon or
if the input field geography metadata does not have consistent grid parameters.
Returns
-------
xarray.DataArray
Field regridded in the destination grid.
"""
src = RegularGrid.from_field(field)
def reproject_layer(field):
output = np.zeros((dst.ny, dst.nx))
warp.reproject(
source=field[::-1],
destination=output,
src_crs=src.crs,
src_transform=src.transform,
dst_crs=dst.crs,
dst_transform=dst.transform,
resampling=resampling,
)
return output[::-1]
# output dims renamed to workaround limitation that overlapping dims in the input
# must not change in size
return xr.apply_ufunc(
reproject_layer,
field,
input_core_dims=[["y", "x"]],
output_core_dims=[["y1", "x1"]],
vectorize=True,
).rename({"x1": "x", "y1": "y"})
|
MeteoSwiss-APN/icon_data_processing_incubator
|
src/idpi/operators/regrid.py
|
regrid.py
|
py
| 5,416 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30168367656
|
# %%
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import ks_2samp
import seaborn as sns
import pandas as pd
import random
from collections import defaultdict
from scipy.stats import ks_2samp, wasserstein_distance
from doubt import Boot
from nobias import ExplanationShiftDetector
random.seed(0)
# Scikit Learn
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import roc_auc_score, mean_squared_error
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import LogisticRegression, Lasso, LinearRegression
from sklearn.ensemble import (
GradientBoostingRegressor,
GradientBoostingClassifier,
)
plt.style.use("seaborn-whitegrid")
from matplotlib import rcParams
rcParams["axes.labelsize"] = 14
rcParams["xtick.labelsize"] = 12
rcParams["ytick.labelsize"] = 12
rcParams["figure.figsize"] = 16, 8
rcParams.update({"font.size": 22})
from xgboost import XGBRegressor, XGBClassifier
import shap
from alibi_detect.cd import ChiSquareDrift, TabularDrift, ClassifierDrift
from tqdm import tqdm
import lime.lime_tabular
import os
import sys
def blockPrint():
sys.stdout = open(os.devnull, "w")
blockPrint()
# %%
# %%
def create_explanation(X, model):
exp = X.copy()[:0]
for i, _ in tqdm(enumerate(X.iterrows())):
ex = explainer.explain_instance(X.iloc[i], model.predict)
exx = pd.DataFrame(ex.local_exp[0], columns=["feature", "weight"]).sort_values(
"feature"
)
exx.feature = X.columns
exx = exx.T
# Make header first row
new_header = exx.iloc[0] # grab the first row for the header
exx = exx[1:] # take the data less the header row
exx.columns = new_header
exx.reset_index(inplace=True)
exp = pd.concat([exp, exx])
return exp
def train_esd(X, X_ood, model, detector):
aux = create_explanation(X, model)
aux["y"] = 0
aux_ood = create_explanation(X_ood, model)
aux_ood["y"] = 1
df = aux.append(aux_ood).drop(columns=["index"])
X_tr, X_te, y_tr, y_te = train_test_split(
df.drop("y", axis=1), df["y"], test_size=0.5, random_state=42
)
detector.fit(X_tr, y_tr)
# return auc
return roc_auc_score(y_te, detector.predict_proba(X_te)[:, 1])
# %%
res = []
for i in np.linspace(0, 1, 11):
rho = i
## Sensitivity experiment
sigma = 1
mean = [0, 0]
cov = [[sigma, 0], [0, sigma]]
samples = 5_000
x1, x2 = np.random.multivariate_normal(mean, cov, samples).T
x3 = np.random.normal(0, sigma, samples)
# Different values
mean = [0, 0]
cov = [[sigma, rho], [rho, sigma]]
x11, x22 = np.random.multivariate_normal(mean, cov, samples).T
x33 = np.random.normal(0, sigma, samples)
# Create Data
df = pd.DataFrame(data=[x1, x2, x3]).T
df.columns = ["Var%d" % (i + 1) for i in range(df.shape[1])]
# df["target"] = np.where(df["Var1"] * df["Var2"] > 0, 1, 0)
df["target"] = (
df["Var1"] * df["Var2"] + df["Var3"] + np.random.normal(0, 0.1, samples)
)
df["target"] = np.where(df["target"] > df["target"].mean(), 1, 0)
X_ood = pd.DataFrame(data=[x11, x22, x33]).T
X_ood.columns = ["Var%d" % (i + 1) for i in range(X_ood.shape[1])]
## Split Data
X_tr, X_te, y_tr, y_te = train_test_split(df.drop(columns="target"), df["target"])
## Fit our ML model
model = GradientBoostingClassifier()
model_r = GradientBoostingRegressor()
# model = LinearRegression()
model.fit(X_tr, y_tr)
model_r.fit(X_tr, y_tr)
# Input KS Test
input_ks = 1
# Classifier Drift
classifierDrift = 1
# Output test
output_ks = 1
wass = 1
# Uncertainty
unc = 1
# Explanation Shift
ESD = ExplanationShiftDetector(
model=XGBClassifier(),
gmodel=Pipeline(
[
("scaler", StandardScaler()),
("lr", LogisticRegression(penalty="l1", solver="liblinear")),
]
),
)
ESD.fit(X_tr, y_tr, X_ood)
esd = ESD.get_auc_val()
# Lime
explainer = lime.lime_tabular.LimeTabularExplainer(
X_tr.values,
feature_names=X_tr.columns,
class_names=["y"],
discretize_continuous=True,
verbose=True,
mode="regression",
)
auc_lime = train_esd(
X_te, X_ood, XGBClassifier().fit(X_tr, y_tr), LogisticRegression()
)
res.append([rho, input_ks, classifierDrift, output_ks, wass, unc, esd, auc_lime])
# %%
results = pd.DataFrame(
res,
columns=[
"rho",
"input_ks",
"classifierDrift",
"output_ks",
"wass",
"unc",
"esd",
"lime",
],
)
# %%
plt.figure()
plt.plot(results["rho"], results["esd"], label="Explanation Shift - SHAP")
ci = 1.96 * np.std(results["esd"]) / np.sqrt(len(results["rho"]))
plt.fill_between(
results["rho"], (results["esd"] - ci), (results["esd"] + ci), alpha=0.1
)
plt.plot(results["rho"], results["lime"], label="Explanation Shift - Lime")
ci = 1.96 * np.std(results["lime"]) / np.sqrt(len(results["rho"]))
plt.fill_between(
results["rho"], (results["lime"] - ci), (results["lime"] + ci), alpha=0.1
)
plt.legend()
plt.xlabel("Correlation coefficient")
plt.ylabel("AUC Explanation Shift Detector")
plt.title("Sensitivity to Multicovariate Shift")
plt.tight_layout()
plt.savefig("images/SOTAsensitivityLime.pdf", bbox_inches="tight")
plt.show()
# %%
|
cmougan/ExplanationShift
|
syntheticLime.py
|
syntheticLime.py
|
py
| 5,594 |
python
|
en
|
code
| 1 |
github-code
|
6
|
29792375641
|
#!/usr/bin/env python3
import sys
import numpy as np
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import interp1d
import rospy
import moveit_commander
import actionlib
from franka_gripper.msg import MoveGoal, MoveAction
from geometry_msgs.msg import Point, Pose, PoseStamped
from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal
from trajectory_msgs.msg import JointTrajectoryPoint
from sensor_msgs.msg import JointState
from moveit_commander.conversions import pose_to_list
from std_msgs.msg import Bool
CONTROLLER_TOPIC = "/position_joint_trajectory_controller/follow_joint_trajectory"
DESIRED_JOINT_STATE_TOPIC = "/joint_states_desired"
VELOCITY_MULTIPLIER = 0.2
MAX_COMMAND_POINT_DIFF = 0.05
START_JOINT_VALUES = [0, -0.785, 0, -2.356, 0, 1.571, 0.785]
# Size of goal box
GOAL_SIZE = (0.05, 0.06, 0.055)
# Offset to account for undetected object depth as the camera detects
# a point on the front surface of the goal box
(GOAL_OFFSET_X, GOAL_OFFSET_Y, GOAL_OFFSET_Z) = (-0.03, 0, 0)
DEFAULT_PLANNING_TIME = 0.5
class DemoInterface(object):
"""Demo Interface"""
def __init__(self, node_initialized=False):
if not node_initialized:
rospy.init_node('demo_interface', anonymous=True)
self.set_parameters()
self.setup_moveit()
self.set_ee_approach_dict()
self.prev_goal_point = None
# self.return_first_solution_pub = rospy.Publisher('/return_first_solution', Bool,
# queue_size=1, latch=True)
# self.return_first_solution_pub.publish(Bool(False))
rospy.set_param("return_first_solution", False)
if self.simulation:
rospy.logwarn("Running demo in simulation")
else:
rospy.logwarn("Running demo on hardware")
self.create_hardware_controller_clients()
def set_parameters(self):
self.group_name = rospy.get_param('/group_name', "panda_arm")
self.planner_id = rospy.get_param('/planner_id', "RRTstarkConfigDefault")
self.simulation = rospy.get_param('/simulation', False)
self.planning_time = rospy.get_param('/planning_time', DEFAULT_PLANNING_TIME)
self.end_effector_link = rospy.get_param('/end_effector_link', "panda_hand")
self.goal_object_topic = rospy.get_param('/goal_object_topic', '/goal_object_position')
self.start_joint_values = START_JOINT_VALUES
self.goal_size = GOAL_SIZE
self.goal_offset = Point(GOAL_OFFSET_X, GOAL_OFFSET_Y, GOAL_OFFSET_Z)
def setup_moveit(self):
moveit_commander.roscpp_initialize(sys.argv)
self.scene = moveit_commander.PlanningSceneInterface()
self.move_group = moveit_commander.MoveGroupCommander(self.group_name)
self.set_planner_id(self.planner_id)
self.set_planning_time(self.planning_time)
self.move_group.set_end_effector_link(self.end_effector_link)
def set_planner_id(self, planner_id):
self.move_group.set_planner_id(planner_id)
def set_planning_time(self, planning_time):
self.move_group.set_planning_time(planning_time)
@property
def get_planning_time(self):
return self.move_group.get_planning_time()
def set_ee_approach_dict(self):
rpy_rot_y90 = R.from_euler('y', 90, degrees=True)
self.ee_approach_dict = {
"top": R.from_euler('y', 0, degrees=True) * R.from_euler('x', 180, degrees=True),
"front": rpy_rot_y90,
"back": rpy_rot_y90 * R.from_euler('x', 180, degrees=True),
"left": rpy_rot_y90 * R.from_euler('x', 90, degrees=True),
"right": rpy_rot_y90 * R.from_euler('x', -90, degrees=True)
}
def create_hardware_controller_clients(self):
self.trajectory_client = actionlib.SimpleActionClient(CONTROLLER_TOPIC,
FollowJointTrajectoryAction)
while not self.trajectory_client.wait_for_server(rospy.Duration(2.0)):
rospy.loginfo(f"Waiting for the {CONTROLLER_TOPIC} action server")
self.gripper_client = actionlib.SimpleActionClient('/franka_gripper/move',
MoveAction)
self.gripper_client.wait_for_server()
self.close_goal = MoveGoal(width=0.054, speed=0.08)
self.open_goal = MoveGoal(width=0.08, speed=0.08)
def open_gripper(self, wait=True):
self.gripper_client.send_goal(self.open_goal)
self.gripper_client.wait_for_result(rospy.Duration.from_sec(5.0))
def close_gripper(self, wait=True):
self.gripper_client.send_goal(self.close_goal)
self.gripper_client.wait_for_result(rospy.Duration.from_sec(5.0))
def plan_to_start(self):
return self.plan_to_joint_goal(self.start_joint_values)
def plan_to_joint_goal(self, joint_values, return_first_solution=False):
"""Plan to joint goal.
Returns:
Result tuple (bool, RobotTrajectory, float, MoveItErrorCode):
(success, path, planning time, error code)
"""
# self.return_first_solution_pub.publish(Bool(return_first_solution))
rospy.set_param("return_first_solution", return_first_solution)
self.move_group.set_joint_value_target(joint_values)
return self.move_group.plan()
def plan_to_point(self, point, approach="top"):
"""Plan to point goal with specified end-effector approach.
Returns:
Result tuple (bool, RobotTrajectory, float, MoveItErrorCode):
(success, path, planning time, error code)
Raises:
KeyError: If invalid approach arg provided.
"""
# Adding goal object to scene so we plan around it
self.publish_goal_object(point)
pose_goal = self.create_grasp_pose_msg(point, approach=approach)
self.move_group.set_pose_target(pose_goal)
return self.move_group.plan()
def create_grasp_pose_msg(self, point, approach="top"):
"""Create pose msg based on object point and desired approach.
Args:
point (Point): Point in space of the object
approach (str): Descriptor of the desired approach
orientation of the end effector
Returns:
Pose: End effector pose to grasp object at given position and
desired approach
"""
pose = Pose()
if approach == "interpolated":
theta = self.get_angle(float(point.z))
rpy_rot = (R.from_euler('y', theta, degrees=True)
* R.from_euler('x', 180, degrees=True))
else:
rpy_rot = self.ee_approach_dict[approach]
# Move x point back since we want to focus on the center of the
# box, but we are given the position of the center of the front
# side (The box in use has a depth of about 6 cm)
pose.position = point
quat = rpy_rot.as_quat()
pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w = (
quat[0], quat[1], quat[2], quat[3])
return pose
def get_angle(self, height):
heights = np.linspace(0, 1, num=20, endpoint=True)
angles = [-(45 + 90*h) for h in heights]
f = interp1d(heights, angles)
return float(f(height))
def go_to_start(self, wait=True):
self.go_to_joint_goal(self.start_joint_values, wait=wait)
def go_to_joint_goal(self, joint_values, wait=True):
# Stop the robot before planning & executing new path
self.smooth_stop()
self.move_group.go(joint_values, wait)
def go_to_point(self, point, approach="top", wait=True):
# Stop the robot before planning & executing new path
self.smooth_stop()
# Adding goal object to scene so we plan around it
self.publish_goal_object(point)
pose_goal = self.create_grasp_pose_msg(point, approach=approach)
self.move_group.set_pose_target(pose_goal)
self.move_group.go(wait)
def publish_goal_object(self, point):
self.publish_object("goal", point, size=self.goal_size)
def publish_object_xyz(self, name, x, y, z, size, primitive='box', remove=False):
point = Point(x, y, z)
self.publish_object(name, point, size, primitive=primitive, remove=remove)
def publish_object(self, name, point, size, primitive='box', remove=False):
if remove:
self.remove_object(name)
object_pose = PoseStamped()
object_pose.header.frame_id = self.move_group.get_planning_frame()
object_pose.pose.position = point
if primitive == 'box':
self.scene.add_box(name, object_pose, size=size)
else:
self.scene.add_sphere(name, object_pose, radius=size)
def remove_object(self, name):
self.scene.remove_world_object(name)
def listen_for_goal(self):
rospy.Subscriber(self.goal_object_topic, Point, callback=self.filter_detection_noise,
queue_size=1)
# rospy.spin()
def filter_detection_noise(self, goal_point):
"""
TODO 1: if the planning fails, and the goal point doesn't move above the threshold, then
the robot will not attempt to replan. Should incorporate a method of evaluating whether or
not planning was successful, and if it wasn't then replan to the previous goal point.
TODO 2: Also looks like the demo interface can get behind the camera data stream
occasianally, where the camera is able to recognize the position of the box and publishes
it, but the rviz planning scene shows an old position of the box. Still need to diagnose
why this happens.
"""
rospy.loginfo_throttle(3.0, "filtering detection noise")
if self.prev_goal_point:
diff = self.euclidean_distance(self.prev_goal_point, goal_point)
if diff > MAX_COMMAND_POINT_DIFF:
rospy.loginfo("Goal point movement detected, attempting new plan")
goal_point = self.offset_point(goal_point, self.goal_offset)
self.go_to_point(goal_point, wait=False)
else:
rospy.loginfo("First goal point received, attempting to plan")
goal_point = self.offset_point(goal_point, self.goal_offset)
self.go_to_point(goal_point, wait=False)
self.prev_goal_point = goal_point
def euclidean_distance(self, point1, point2):
arr1 = np.array((point1.x, point1.y, point1.z))
arr2 = np.array((point2.x, point2.y, point2.z))
return np.linalg.norm(arr2 - arr1)
def offset_point(self, point, offset):
point_offset = Point()
point_offset.x = point.x + offset.x
point_offset.y = point.y + offset.y
point_offset.z = point.z + offset.z
return point_offset
def smooth_stop(self):
if self.simulation:
rospy.logwarn("Stopping execution (SIMULATION)")
self.move_group.stop()
else:
# might be a good idea to check if we're already stopped
# before taking the effort to create a stopping msg
# if self.trajectory_client.simple_state == 2:
# return
rospy.logwarn("Stopping execution (HARDWARE)")
stop_goal = self.get_stop_goal()
self.trajectory_client.send_goal_and_wait(stop_goal)
def get_stop_goal(self):
goal = FollowJointTrajectoryGoal()
trajectory_point = JointTrajectoryPoint()
desired_joint_state = rospy.wait_for_message(DESIRED_JOINT_STATE_TOPIC, JointState)
positions = desired_joint_state.position
velocities = desired_joint_state.velocity
trajectory_point.time_from_start = rospy.Duration(0.5)
# Fill msg vectors
for i in range(7):
# Add joint names
goal.trajectory.joint_names.append(f"panda_joint{i+1}")
# Add positions
trajectory_point.positions.append(positions[i] + (velocities[i] * VELOCITY_MULTIPLIER))
# Add velocities (ALL 0)
trajectory_point.velocities.append(0.0)
goal.trajectory.points.append(trajectory_point)
return goal
def all_close(self, goal, actual, tolerance):
"""
Convenience method for testing if a list of values are within a
tolerance of their counterparts in another list.
Args:
goal (list): A list of goal floats, a Pose or a PoseStamped
actual (list): A list of floats, a Pose or a PoseStamped
tolerance (float): Allowed difference between goal and actual
values
Returns:
Bool: Successful if true
"""
if type(goal) is list:
for index in range(len(goal)):
if abs(actual[index] - float(goal[index])) > tolerance:
return False
elif type(goal) is PoseStamped:
return self.all_close(goal.pose, actual.pose, tolerance)
elif type(goal) is Pose:
return self.all_close(pose_to_list(goal), pose_to_list(actual),
tolerance)
return True
|
dwya222/end_effector_control
|
scripts/demo_interface.py
|
demo_interface.py
|
py
| 13,316 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21375968156
|
from django.db import models
# Create your models here.
class Movie(models.Model):
id = models.BigAutoField(
primary_key=True
)
name = models.CharField(
max_length=250,
verbose_name="Moive name"
)
desc = models.TextField(
verbose_name="Description"
)
year = models.PositiveBigIntegerField(
verbose_name="release year"
)
image = models.ImageField(
upload_to='gallery',
verbose_name="Images"
)
director = models.CharField(
max_length=100
)
|
adhilshaw/moviedemoapp
|
movieapp/models.py
|
models.py
|
py
| 558 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71874843708
|
# Import necessary modules and libraries
from dotenv import load_dotenv
import os
import base64
from requests import post, get
import json
# Load environment variables from .env file
load_dotenv()
# Import CLIENT_ID and CLIENT_SECRET from environment variables
client_id = os.getenv("CLIENT_ID")
client_secret = os.getenv("CLIENT_SECRET")
# Function to get Spotify API token using client credentials
def get_token():
# Combine client_id and client_secret and encode in base64
auth_string = client_id + ":" + client_secret
auth_bytes = auth_string.encode("utf-8")
auth_base64 = str(base64.b64encode(auth_bytes), "utf-8")
# API endpoint for token retrieval
url = "https://accounts.spotify.com/api/token"
# Headers for token request
headers = {
"Authorization": "Basic " + auth_base64,
"Content-Type": "application/x-www-form-urlencoded"
}
# Data for token request
data = {"grant_type": "client_credentials"}
# Send a POST request to retrieve the token
result = post(url, headers=headers, data=data)
# Parse the JSON response to extract the access token
json_result = json.loads(result.content)
token = json_result["access_token"]
return token
# Function to generate the authorization header with the provided token
def get_auth_header(token):
return {"Authorization": "Bearer " + token}
# Function to search for an artist by name
def search_for_artist(token, artist_name):
url = "https://api.spotify.com/v1/search"
headers = get_auth_header(token)
query = f"?q={artist_name}&type=artist&limit=1"
# Build the query URL and send a GET request to search for the artist
query_url = url + query
result = get(query_url, headers=headers)
# Parse the JSON response to extract the artist information
json_result = json.loads(result.content)["artists"]["items"]
if len(json_result) == 0:
print("No artist found...")
return None
else:
return json_result[0]
# Function to get top tracks of an artist in a specific country
def get_songs_by_artist(token, artist_id, country):
url =f"https://api.spotify.com/v1/artists/{artist_id}/top-tracks?country={country}"
headers = get_auth_header(token)
# Send a GET request to retrieve the artist's top tracks in the specified country
result = get(url, headers=headers)
# Parse the JSON response to extract the list of tracks
json_result = json.loads(result.content)["tracks"]
return json_result
def get_artist_name():
artist_name = input("Enter an artist name: ")
return artist_name
# Get the API token
token = get_token()
artist_name = get_artist_name()
# Search for an artist by name and get their ID
result = search_for_artist(token, artist_name)
artist_id = result["id"]
# Get top tracks of the artist in different countries
songsTR = get_songs_by_artist(token, artist_id, "TR")
songsUS = get_songs_by_artist(token, artist_id, "US")
#clears the text file
open("text.txt", "w").close()
#Opens a text file
file = open("text.txt", "a")
#Writes top ten songs from us into the text file
for idx, song in enumerate(songsUS):
file.write(f"{idx + 1}. {song['name']}\n")
#Closes the text file
file.close()
# Print the top tracks for each country
for idx, song in enumerate(songsTR):
print(f"{idx + 1}. {song['name']}")
print("\n")
for idx, song in enumerate(songsUS):
print(f"{idx + 1}. {song['name']}")
|
linnathoncode/SpotifyApiApp
|
main.py
|
main.py
|
py
| 3,588 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13300580084
|
# -*- coding: utf-8 -*-
"""
Helper functions for classification and quantization
Created on Mon Dec 5 14:50:27 2016
@author: brady
"""
import os
import numpy as np
from sklearn.tree import tree, _tree
def quantize(data, precision):
"""
Turns floating point into fixed point data
:param data: vector to quantize, assumes np-array
:param precision: number of fixed points bits to used
:returns: vector of length[data], with precision bits
"""
data = np.array(data)
data = data*1e5
xmax = np.amax(np.abs(data))
#if xmax <= 0:
# xmax = 0.000001 # helps with stability
xq = xmax * np.minimum(
np.round(data*(2**(precision-1))/xmax) / (2**(precision-1)),
1-1/(2**(precision-1))
)
return xq/1e5
def tree_to_code(tree, feature_names, precision):
tree_ = tree.tree_
feature_name = [
feature_names[i] if i != _tree.TREE_UNDEFINED else "undefined!"
for i in tree_.feature
]
valid_thresh = [
t if t > 0 else np.min(np.abs(tree_.threshold))
for t in tree_.threshold
]
quant_thresh = quantize(valid_thresh, precision)
def recurse(node, depth, quant_tree_str):
indent = " " * depth
if tree_.feature[node] != _tree.TREE_UNDEFINED:
name = feature_name[node]
threshold = quant_thresh[node]
quant_tree_str += "{}if {} <= {}:\n".format(indent, name, threshold)
quant_tree_str += recurse(tree_.children_left[node], depth + 1, '')
quant_tree_str += "{}else: # if {} > {}\n".format(indent, name, threshold)
quant_tree_str += recurse(tree_.children_right[node], depth + 1, '')
return quant_tree_str
else:
quant_tree_str += "{}return {}\n".format(indent, np.argmax(tree_.value[node]))
return quant_tree_str
quant_tree_str = "def tree_{}b(features):\n".format(precision)
quant_tree_str = recurse(0, 1, quant_tree_str)
return quant_tree_str
def gen_quant_trees_str(tree, precisions):
func_list_str = ''
for p in precisions:
names = ['features['+str(x)+']' for x in range(20)]
func_list_str += tree_to_code(tree, names, p)
func_list_str += "##################################################\n"
return func_list_str
def make_quant_trees_module(filename, tree, precisions):
trees_str = gen_quant_trees_str(tree, precisions)
with open(filename, 'w') as f:
f.write(trees_str)
def get_tree_results(tree, Xtest):
"""
Runs data through a quantized DecisionTreeClassifier
:param tree: DTC function handle
:param Xtest: data to test
:returns: predicted results
"""
results = [tree(X) for X in Xtest]
return np.array([results], ndmin=1).T
if __name__ == '__main__':
DIR = r'C:\Users\brady\GitHub\MinVAD\feature_extract'
tr_data = np.load(os.path.join(DIR, 'train_130k.npy'))
tr_class = np.load(os.path.join(DIR, 'train_130k_class.npy'))
myData = np.hstack((tr_data, tr_class))
np.random.shuffle(myData)
cutoff = int(np.floor(0.8 * len(tr_class)))
clf = tree.DecisionTreeClassifier(max_depth = 5)
clf = clf.fit(myData[:cutoff, :19], myData[:cutoff, 20])
test_str = gen_quant_trees_str(clf, np.arange(16, 15, -1))
print(test_str)
|
bradysalz/MinVAD
|
classifier/training_helpers.py
|
training_helpers.py
|
py
| 3,449 |
python
|
en
|
code
| 0 |
github-code
|
6
|
826135486
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 7 17:36:23 2022
@author: ThinkPad
"""
from __future__ import print_function
import argparse
import os
import numpy as np
import random
import torch
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from PartialScan import PartialScans,unpickle
from model import feature_transform_regularizer
from pointnetCls import PointNetCls
import torch.nn.functional as F
from tqdm import tqdm
import random
parser = argparse.ArgumentParser()
parser.add_argument(
'--batchSize', type=int, default=3, help='input batch size')
parser.add_argument(
'--num_points', type=int, default=2500, help='input batch size')
parser.add_argument(
'--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument(
'--nepoch', type=int, default=250, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='cls', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--checkpoint', type=str, default='/gpfs/data/ssrinath/ychen485/TextCondRobotFetch/pointnet/cls/cls_model_5.pth', help="checkpoint dir")
parser.add_argument('--feature_transform', action='store_true', help="use feature transform")
opt = parser.parse_args()
print(opt)
blue = lambda x: '\033[94m' + x + '\033[0m'
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
latent_code = "/gpfs/data/ssrinath/ychen485/TextCondRobotFetch/pointnet/03001627/ocnet_shapefeature_pc/embed_feats_train.pickle"
latent_code_test = "/gpfs/data/ssrinath/ychen485/TextCondRobotFetch/pointnet/03001627/ocnet_shapefeature_pc/embed_feats_test.pickle"
shape_folder = "/gpfs/data/ssrinath/ychen485/partialPointCloud/03001627"
latent_dim = 512
dataset = PartialScans(latentcode_dir = latent_code, shapes_dir = shape_folder)
test_dataset = PartialScans(latentcode_dir = latent_code_test, shapes_dir = shape_folder)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
testdataloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
latent_dict = unpickle(latent_code)
keylist = list(latent_dict.keys())
latent_dict_test = unpickle(latent_code_test)
keylist_test = list(latent_dict_test.keys())
print("train set lenth: "+ str(len(dataset)) +", test set length: "+ str(len(test_dataset)))
try:
os.makedirs(opt.outf)
except OSError:
pass
classifier = PointNetCls(k=2, feature_transform=opt.feature_transform)
if opt.checkpoint != " ":
checkpoint = torch.load(opt.checkpoint)
classifier.load_state_dict(checkpoint)
optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
classifier.cuda()
num_batch = len(dataset) / opt.batchSize
total_correct = 0
for epoch in range(opt.nepoch):
for i, data in enumerate(dataloader, 0):
points_o, label = data
points = points_o[:,0:1024,:].to(torch.float32)
points.to(torch.float32)
points = points.transpose(2, 1)
target_np = np.zeros((len(label),))
t_idx = random.randint(0,len(label)-1)
target_np[t_idx] = 1
target = torch.from_numpy(target_np).to(torch.int64)
latents = np.zeros((1, latent_dim))
latents[0] = latent_dict[label[t_idx]]
# for j in range(opt.batchSize):
# if target[j] == 1:
# latents[j] = latent_dict[label[j]]
# else:
# idx = random.randint(0,len(keylist))
# name = keylist[idx]
# while(name == label[j]):
# idx = random.randint(0,len(keylist))
# name = keylist[idx]
# latents[j] = latent_dict[name]
z = torch.from_numpy(latents).to(torch.float32)
points, target, z = points.cuda(), target.cuda(), z.cuda()
optimizer.zero_grad()
classifier = classifier.train()
pred, trans, trans_feat = classifier(points, z)
# print(pred.shape)
pred = pred[0]
loss = F.nll_loss(pred, target)
if opt.feature_transform:
loss += feature_transform_regularizer(trans_feat) * 0.001
loss.backward()
optimizer.step()
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
total_correct = total_correct + correct.item()
if i%100 == 0:
print('[%d: %d/%d] train loss: %f accuracy: %f' % (epoch, i, num_batch, loss.item(), total_correct / (100* opt.batchSize)))
total_correct = 0
scheduler.step()
test_correct = 0
for j, data in enumerate(testdataloader, 0):
points_o, label = data
points = points_o[:,0:1024,:].to(torch.float32)
points.to(torch.float32)
points = points.transpose(2, 1)
target_np = np.zeros((len(label),))
t_idx = random.randint(0,len(label)-1)
target_np[t_idx] = 1
target = torch.from_numpy(target_np).to(torch.int64)
latents = np.zeros((1, latent_dim))
latents[0] = latent_dict_test[label[t_idx]]
z = torch.from_numpy(latents).to(torch.float32)
points, target, z = points.cuda(), target.cuda(), z.cuda()
# optimizer.zero_grad()
classifier = classifier.train()
pred, trans, trans_feat = classifier(points, z)
# print(pred.shape)
pred = pred[0]
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
test_correct = test_correct + correct.item()
print('[%d: %d/%d] %s loss: %f accuracy: %f' % (epoch, i, num_batch,
blue('test'), loss.item(), test_correct/float(len(test_dataset))))
torch.save(classifier.state_dict(), '%s/cls_model_%d.pth' %
(opt.outf, epoch))
|
FreddieRao/TextCondRobotFetch
|
pointnet/train.py
|
train.py
|
py
| 6,147 |
python
|
en
|
code
| 2 |
github-code
|
6
|
21077671370
|
import os
import tarfile
#from api_mnode import about
from log_setup import Logging, MLog
from program_data import PDApi
"""
NetApp / SolidFire
CPE
mnode support utility
"""
# set up logging
logmsg = Logging.logmsg()
class UpdateMS():
def __init__(self, repo):
current_version = repo.about["mnode_bundle_version"]
logmsg.info(f'Current mnode version: {current_version}')
def sideload(repo, updatefile):
""" Copy the bundle into place and extract
"""
bundle_dir = "/sf/etc/mnode/bundle/"
copy_cmd = f'cp {updatefile} {bundle_dir}'
new_bundle = (bundle_dir + os.path.basename(updatefile))
services_tar = f'{bundle_dir}services_deploy_bundle.tar.gz'
if not os.path.isdir(bundle_dir):
os.makedirs(bundle_dir)
logmsg.info(f'Copying {updatefile} to /sf/etc/mnode/bundle/')
if os.path.isfile(updatefile):
os.popen(copy_cmd).read()
logmsg.info(f'Extracting {new_bundle}')
try:
bundle = tarfile.open(new_bundle)
bundle.extractall(path="/sf/etc/mnode/bundle/")
bundle.close()
except EOFError as error:
MLog.log_exception(error)
try:
bundle = tarfile.open(services_tar)
bundle.extractall(path="/sf/etc/mnode/bundle/")
bundle.close()
except OSError as error:
MLog.log_exception(error)
else:
logmsg.info(f'{updatefile} File not found. Try specifying full path')
exit(1)
def deploy(repo):
""" deploy the package
"""
url = f'{repo.base_url}/mnode/1/services/deploy'
logmsg.info("Deploying new MS packages and services. Please wait....")
json_return = PDApi.send_put_return_json_nopayload(repo, url)
if json_return:
logmsg.debug(f'{json_return["message"]}')
else:
logmsg.info("Monitor progress with docker ps.")
|
solidfire/mnode-support-util
|
update_ms.py
|
update_ms.py
|
py
| 2,042 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34736461093
|
# -*- coding: utf-8 -*-
import scrapy
import re
import json
from video_scrapy.items import *
import hashlib
from video_scrapy.settings import my_defined_urls
class YoutubeDlSpider(scrapy.Spider):
name = 'video'
youtube_dl_not_you_get = False
handle_httpstatus_list = [404]
def __init__(self, my_url=None, my_playlist=False,*args, **kwargs):
super(YoutubeDlSpider, self).__init__(*args, **kwargs)
if my_url is None:
self.start_urls = []
else:
self.start_urls = ["%s"%my_url]
if isinstance(my_playlist, str):
if my_playlist=="True":
my_playlist=True
else:
my_playlist=False
self.get_playlist=my_playlist
def start_requests(self):
try:
self.state
try:
if "iqiyi_id" in self.state:
self.iqiyi_id = self.state["iqiyi_id"]
else:
self.iqiyi_id
except:
self.iqiyi_id = {}
self.state["iqiyi_id"] = self.iqiyi_id
except:
self.iqiyi_id = {}
if len(self.iqiyi_id)==0:
if self.youtube_dl_not_you_get:
parameter = ['-g', "--rm-cache-dir"]
for i in self.start_urls:
if "bilibili" in i:
yield scrapy.Request(url=i, callback=self.bili_parse)
else:
parameter.append(i)
if len(parameter) == 2:
pass
else:
# from video_scrapy.youtube_dl.YoutubeDL import my_defined_urls
from video_scrapy.youtube_dl import main
print("waiting for youtube_dl get urls")
main(parameter)
print("get youtube_dl urls")
for i in my_defined_urls:
my_url_dict = my_defined_urls[i]
for j in my_url_dict:
name = str(j).rsplit(".", 1)[0]
filetype = str(j).rsplit(".", 1)[-1]
yield scrapy.Request(url=my_url_dict[j], callback=self.savefile, meta={"name": name, "filetype": filetype, "fileid": None, "id": None, "end": None})
else:
iqiyi_url = []
for i in self.start_urls:
if "bilibili" in i:
yield scrapy.Request(url=i, callback=self.bili_parse)
self.start_urls.remove(i)
elif "iqiyi.com" in i:
iqiyi_url.append(i)
self.start_urls.remove(i)
if len(iqiyi_url) == 0:
pass
else:
for i in self.iqiyi_url_process(iqiyi_url):
yield i
if len(self.start_urls) == 0:
pass
else:
from video_scrapy.you_get.common import main
# from video_scrapy.you_get import common
print("waiting for you_get get urls")
main(my_own_urls=self.start_urls,
my_own_playlist=self.get_playlist)
print("get you_get urls finish")
if "error" in my_defined_urls:
print(
"can't get urls for some videos,please look at error.txt for more information!")
error = my_defined_urls.pop("error")
import datetime
nowTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
with open("error.txt", "a") as f:
f.write('\n')
f.write(str(nowTime))
f.write('\n')
f.write("\n".join(error))
for i in my_defined_urls:
my_url_dict = my_defined_urls[i]
name = i
filetype = my_url_dict.pop("filetype")
end_id = len(my_url_dict)
if end_id == 1:
url = my_url_dict.popitem()[1]
filetype = re.search(r"\.(\w+?)\?", url).group(1)
if filetype == "m3u8":
yield scrapy.Request(url=url, callback=self.parse_m3u8, meta={"name": name})
else:
for j in my_url_dict:
if int(j) == int(end_id):
end = True
else:
end = False
yield scrapy.Request(url=my_url_dict[j], callback=self.savefile, meta={"name": name, "filetype": filetype, "fileid": j, "id": None, "end": end})
else:
pass
def check_iqiyi_has_error(self, name):
try:
self.state
try:
if "iqiyi_id" in self.state:
self.iqiyi_id = self.state["iqiyi_id"]
else:
self.iqiyi_id
except:
self.iqiyi_id = {}
self.state["iqiyi_id"] = self.iqiyi_id
except:
self.iqiyi_id = {}
print(self.iqiyi_id)
self.iqiyi_id[name].setdefault("get_num", 0)
if "send_num" in self.iqiyi_id[name]:
if int(self.iqiyi_id[name]["send_num"]) == int(self.iqiyi_id[name]["get_num"]):
if len(self.iqiyi_id[name].setdefault("error", [])) == 0:
pass
else:
self.iqiyi_id[name]["get_num"] = 0
self.iqiyi_id[name]["error_num"] = len(
self.iqiyi_id[name]["error"])
self.iqiyi_id[name].pop("send_num")
return True
return False
def iqiyi_url_process(self, my_iqiyi_url):
try:
self.state
try:
if "iqiyi_id" in self.state:
self.iqiyi_id = self.state["iqiyi_id"]
else:
self.iqiyi_id
except:
self.iqiyi_id = {}
self.state["iqiyi_id"] = self.iqiyi_id
except:
self.iqiyi_id = {}
print("waiting for you_get get iqiyi_urls")
from video_scrapy.you_get.common import main
for iqiyi_url in my_iqiyi_url:
iqiyi_url = [iqiyi_url]
main(my_own_urls=iqiyi_url,
my_own_playlist=self.get_playlist)
print("get iqiyi_urls finish")
if "error" in my_defined_urls:
print(
"can't get urls for some videos,please look at error.txt for more information!")
error = my_defined_urls.pop("error")
import datetime
nowTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
with open("error.txt", "a") as f:
f.write('\n')
f.write(str(nowTime))
f.write('\n')
f.write("\n".join(error))
my_temp = list(my_defined_urls.keys())
for i in my_temp:
my_url_dict = my_defined_urls[i]
name = str(i)
self.iqiyi_id.setdefault(name, {}).setdefault("url", iqiyi_url)
filetype = my_url_dict.pop("filetype")
end_id = len(my_url_dict)
if end_id == 1:
url = my_url_dict.popitem()[1]
filetype = re.search(r"\.(\w+?)\?", url).group(1)
if filetype == "m3u8":
yield scrapy.Request(url=url, callback=self.parse_m3u8, meta={"name": name, "iqiyi": None})
else:
for j in my_url_dict:
if int(j) == int(end_id):
end = True
else:
end = False
yield scrapy.Request(url=my_url_dict[j], callback=self.savefile, meta={"name": name, "filetype": filetype, "fileid": j, "id": None, "end": end})
my_defined_urls.pop(i)
def parse_m3u8(self, response):
try:
self.state
try:
if "iqiyi_id" in self.state:
self.iqiyi_id = self.state["iqiyi_id"]
else:
self.iqiyi_id
except:
self.iqiyi_id = {}
self.state["iqiyi_id"] = self.iqiyi_id
except:
self.iqiyi_id = {}
url = response.url
name = response.meta['name']
if isinstance(response.body, bytes):
page = response.body.decode('ascii')
else:
page = str(response.body)
file_line = page.split("\n")
if file_line[0] != "#EXTM3U":
raise BaseException("非M3U8的链接")
else:
unknow = True # 用来判断是否找到了下载的地址
i = 1
for index, line in enumerate(file_line):
if "EXTINF" in line:
unknow = False
if file_line[index + 1][0:4] == "http":
pd_url = file_line[index + 1]
else:
if file_line[index + 1][0] != '/':
pd_url = url.rsplit(
"/", 1)[0] + "/" + file_line[index + 1]
else:
pd_url = url.rsplit(
"/", 1)[0] + file_line[index + 1]
if "iqiyi" in response.meta:
if len(self.iqiyi_id.setdefault(name, {}).setdefault("error", [])) == 0:
if self.iqiyi_id.setdefault(name, {}).setdefault("error_num", 0) == 0:
yield scrapy.Request(pd_url, callback=self.savefile,
meta={'fileid': int(i), 'name': name, 'end': False, "id": None, "filetype": "ts", "iqiyi": None})
else:
pass
else:
if int(i) in self.iqiyi_id.setdefault(name, {}).setdefault("error", []):
yield scrapy.Request(pd_url, callback=self.savefile,
meta={'fileid': int(i), 'name': name, 'end': False, "id": None, "filetype": "ts", "iqiyi": None})
else:
pass
else:
yield scrapy.Request(pd_url, callback=self.savefile,
meta={'fileid': int(i), 'name': name, 'end': False, "id": None, "filetype": "ts"})
i = i + 1
if "ENDLIST" in line:
if "iqiyi" in response.meta:
if self.iqiyi_id.setdefault(name, {}).setdefault("error_num", 0) != 0:
self.iqiyi_id[name]["send_num"] = self.iqiyi_id[
name]["error_num"]
else:
self.iqiyi_id[name]["send_num"] = i - 1
if self.check_iqiyi_has_error(name):
for k in self.iqiyi_url_process(self.iqiyi_id[name]["url"]):
yield k
item = FileItem()
item["id"] = None
item['fileid'] = i
item['name'] = name
item['end'] = True
item['content'] = b''
item['filetype'] = 'ts'
yield item
if unknow:
raise BaseException("未找到对应的下载链接")
else:
print("下载请求完成 m3u8 %s" % name)
def parse(self, response):
pass
def bili_parse(self, response):
try:
self.state
try:
if "iqiyi_id" in self.state:
self.iqiyi_id = self.state["iqiyi_id"]
else:
self.iqiyi_id
except:
self.iqiyi_id = {}
self.state["iqiyi_id"] = self.iqiyi_id
except:
self.iqiyi_id = {}
if isinstance(response.body, bytes):
file = str(response.body.decode("utf8"))
else:
file = str(response.body)
temp = re.search(r"__INITIAL_STATE__=(\{.*\});\(fun", file, re.S)
temp = str(temp.group(1))
temp = json.loads(temp)
url = "https://www.kanbilibili.com/api/video/%d/download?cid=%d&quality=64&page=%d"
if "videoData" in temp:
videodata = temp['videoData']
pagelist = videodata['pages']
aid = videodata["aid"]
for item in pagelist:
page = item['page']
cid = item['cid']
name = item['part']
new_url = url % (int(aid), int(cid), int(page))
yield scrapy.Request(url=new_url, callback=self.bili_get_json, meta={"name": name, "id": page, "Referer": response.url})
else:
title = temp["mediaInfo"]["title"]
pagelist = temp["epList"]
name = str(title) + "%03d"
for item in pagelist:
aid = item["aid"]
cid = str(item["cid"])
page = item["index"]
access_id = int(item["episode_status"])
if access_id == 2:
if len(item["index_title"]) == 0:
new_name = name % (int(page))
else:
new_name = title + "_" + item["index_title"]
if "bangumi" in response.url:
secretkey = "9b288147e5474dd2aa67085f716c560d"
temp = "cid=%s&module=bangumi&otype=json&player=1&qn=112&quality=4" % (
str(cid))
sign_this = hashlib.md5(
bytes(temp + secretkey, 'utf-8')).hexdigest()
new_url = "https://bangumi.bilibili.com/player/web_api/playurl?" + \
temp + '&sign=' + sign_this
else:
new_url = url % (int(aid), int(cid), int(page))
yield scrapy.Request(url=new_url, callback=self.bili_get_json, meta={"name": new_name, "id": page, "Referer": response.url})
else:
pass
def bili_get_json(self, response):
try:
self.state
try:
if "iqiyi_id" in self.state:
self.iqiyi_id = self.state["iqiyi_id"]
else:
self.iqiyi_id
except:
self.iqiyi_id = {}
self.state["iqiyi_id"] = self.iqiyi_id
except:
self.iqiyi_id = {}
if isinstance(response.body, bytes):
temp_dict = json.loads(response.body.decode("utf8"))
else:
temp_dict = json.loads(str(response.body))
if "err" in temp_dict:
if temp_dict['err'] is None:
my_url_list = temp_dict["data"]["durl"]
filetype = temp_dict["data"]["format"][0:3]
end_id = len(my_url_list)
for i in my_url_list:
fileid = i["order"]
link_url = i["url"]
if int(fileid) == int(end_id):
end = True
else:
end = False
yield scrapy.Request(url=link_url, callback=self.savefile, headers={"Origin": "https://www.bilibili.com", "Referer": response.meta["Referer"]},
meta={"name": response.meta["name"], "id": response.meta["id"], "filetype": filetype, "fileid": fileid, "end": end})
else:
my_url_list = temp_dict["durl"]
filetype = temp_dict["format"][0:3]
end_id = len(my_url_list)
for i in my_url_list:
fileid = i["order"]
link_url = i["url"]
if int(fileid) == int(end_id):
end = True
else:
end = False
yield scrapy.Request(url=link_url, callback=self.savefile, headers={"Origin": "https://www.bilibili.com", "Referer": response.meta["Referer"]},
meta={"name": response.meta["name"], "id": response.meta["id"], "filetype": filetype, "fileid": fileid, "end": end})
def savefile(self, response):
try:
self.state
try:
if "iqiyi_id" in self.state:
self.iqiyi_id = self.state["iqiyi_id"]
else:
self.iqiyi_id
except:
self.iqiyi_id = {}
self.state["iqiyi_id"] = self.iqiyi_id
except:
self.iqiyi_id = {}
item = FileItem()
if response.meta['fileid'] is None and response.meta['end'] is None:
print("get %s" % (response.meta['name']))
item['fileid'] = None
item['end'] = None
else:
print("get %s__%d" %
(response.meta['name'], int(response.meta['fileid'])))
item['fileid'] = int(response.meta['fileid'])
item['end'] = response.meta['end']
if response.meta['id'] is None:
item['id'] = None
else:
item['id'] = int(response.meta['id'])
item['name'] = str(response.meta['name']).encode(
).translate(None, b'\\/:*?"<>|').decode()
item['filetype'] = response.meta['filetype']
if "iqiyi" in response.meta:
self.iqiyi_id[response.meta["name"]]["get_num"] = self.iqiyi_id[
response.meta["name"]].setdefault("get_num", 0) + 1
if int(response.status) == 404:
if int(response.meta['fileid']) in self.iqiyi_id[response.meta["name"]].setdefault("error", []):
pass
else:
self.iqiyi_id[response.meta["name"]].setdefault(
"error", []).append(int(response.meta["fileid"]))
else:
if int(response.meta["fileid"]) in self.iqiyi_id[response.meta["name"]].setdefault("error", []):
self.iqiyi_id[response.meta["name"]][
"error"].remove(int(response.meta["fileid"]))
item['content'] = response.body
yield item
if self.check_iqiyi_has_error(response.meta["name"]):
for i in self.iqiyi_url_process(self.iqiyi_id[response.meta["name"]]["url"]):
yield i
else:
item['content'] = response.body
yield item
|
yllgl/my_video_scrapy
|
video_scrapy/spiders/video_spider.py
|
video_spider.py
|
py
| 19,315 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41119941323
|
import numpy as np
import funcs
from utils import fs, unison_shuffled_copies
# from matplotlib import pyplot as plt
# import random
# from utils import *
# import scipy.io as sio
# from copy import deepcopy
LR = 2
LR_DECAY = .9999
MIN_LR = 0.000000001
DEBUG = False
class Layer:
def __init__(self, in_dim, out_dim):
np.random.seed(42)
self.weights = np.random.normal(size=(in_dim, out_dim), loc=0.0, scale=1.0 )
self.bias= np.random.normal(size=(out_dim, 1), loc=0.0, scale=1.0 )
self.Y = np.zeros((out_dim, 1)) #todo: may need to switch dims order
self.X = None
self.df_dtheta = None
def dy_dw_t_v(self, v, act_tag):
W, b, x = self.weights, self.b, self.X
output = act_tag((W @ x) + b)
output = output * v
output = output * x.T
class NN:
def __init__(self, layer_dims, act=funcs.tanh, act_tag=funcs.tanh_tag, lr=LR):
self.act = act
self.d_act = act_tag
self.lr = lr
self.num_layers = len(layer_dims) - 1
self.layers = [None] * self.num_layers
for i in range(0, self.num_layers):
self.layers[i] = Layer(layer_dims[i], layer_dims[i+1])
def predict(self, X):
my_print("\n\n********** Predict() **********")
my_print(f"input: {X}")
output = X
#propogate data forward through the layers
for i in range(0, self.num_layers):
self.layers[i].X = output
my_print(f"\nlayer: {i}")
my_print(f"\tW: {self.layers[i].weights}")
output = (output @ self.layers[i].weights)
my_print(f"\toutput: {output}")
#output += self.layers[i].bias.T
f = funcs.softmax if (i == self.num_layers - 1) else self.act
f_str = "softmax" if (i == self.num_layers - 1) else "self.act"
output = f(output)
my_print(f"\t\t{f_str}(output): {output}")
self.layers[i].Y = output
return output
def learn(self, expected, data):
my_print("\n\n********** Learn() **********")
pred = self.layers[-1].Y
my_print(f"input: {data}\n")
my_print(f"pred: {pred}\n")
my_print(f"real: {expected}\n")
err = (pred - expected).T
for i in range(2, self.num_layers + 2):
my_print(f"\ni: {i}")
my_print(f"\terr: {err}")
d_f = funcs.grad_softmax_old if (i == 2) else self.d_act
d_f_str = "grad_softmax" if (i == 2) else "self.d_act"
my_print(f"\td_f: {d_f_str}")
input = self.layers[self.num_layers - i].Y if (i < self.num_layers + 1) else data
output = self.layers[self.num_layers - i + 1].Y
my_print(f"\tinput: {input}\n")
my_print(f"\toutput: {output}\n")
a = input.T
b = (err.T * d_f(output))
my_print(f"\ta: {a.shape}, b: {b.shape}")
dW = -self.lr * (err @ input)#(a @ b)
dB = -(self.lr) * np.mean(b, axis=0, keepdims=True)
my_print(f"\tdW:\n{dW}")
err = self.layers[self.num_layers - i + 1].weights @ err
my_print(f"\tW before update:\n{self.layers[self.num_layers - i + 1].weights}")
self.layers[self.num_layers - i + 1].weights += dW.T
my_print(f"\tW after update:\n{self.layers[self.num_layers - i + 1].weights}")
#self.layers[self.num_layers - i + 1].bias += dB.T
self.lr = max(LR_DECAY * self.lr, MIN_LR)
def train(self, inputs, labels, mini_batch_size, batch_size, num_epochs=1):
batch_err = 0
num_correct, total = 0, 0
p_stats = np.zeros((labels.shape[1]))
r_stats = np.zeros((labels.shape[1]))
epoch_acc = 0
for epoch in range(num_epochs):
inputs, labels = unison_shuffled_copies(inputs, labels)
print(f"---------- Epoch #{epoch} ----------")
for i in range(0, len(inputs), mini_batch_size):
input = inputs[i:i+mini_batch_size]
expected = labels[i:i+mini_batch_size]
pred = self.predict(input)
batch_err += get_error(pred, expected)
num_correct_i, total_i = accuracy(pred, expected)
num_correct += num_correct_i
total += total_i
epoch_acc += num_correct_i
p_stats_i, r_stats_i = stats(pred, expected)
p_stats += p_stats_i
r_stats += r_stats_i
self.learn(expected, input)
if (i + mini_batch_size) % batch_size == 0:
print(f"{int(i/batch_size)}\tlr: {fs(self.lr)}\terr: {fs(batch_err/batch_size)}\tacc: {num_correct}/{total}")#\tps: {p_stats}\trs: {r_stats}")
batch_err = 0
num_correct, total = 0, 0
p_stats = np.zeros((labels.shape[1]))
r_stats = np.zeros((labels.shape[1]))
print(f"epoch acc: {epoch_acc} / {len(inputs)}\t({int((epoch_acc * 100)/len(inputs))}%)")
epoch_acc = 0
self.lr = LR
def accuracy(pred, real):
num_correct = 0
total = len(pred)
for i in range(total):
if (np.argmax(pred[i]) == np.argmax(real[i])):
num_correct += 1
return num_correct, total
def print_pred_real_acc(pred, real):
print("*************")
print("pred:")
print(pred)
print("\nreal:")
print(real)
print(f"acc: {accuracy(pred, real)}")
print("*************")
def stats(preds, reals):
p_stats = [0] * preds.shape[1]
r_stats = [0] * reals.shape[1]
for i in range(preds.shape[0]):
pred = np.argmax(preds[i])
p_stats[pred] += 1
real = np.argmax(reals[i])
r_stats[real] += 1
return np.array(p_stats), np.array(r_stats)
def get_error(pred, real):
output = pred - real
output = output * output
output = np.sum(np.sum(output))
# print(f"pred: {pred}\nreal: {real}\nerr: {output}")
return output
def foo(v):
v1, v2, v3 = v[0]
if (v1 + v2 > v3):
return np.array([[0, 1]])
else:
return np.array([[1, 0]])
def my_print(s):
if DEBUG:
print(s)
|
eladfeld/deepLearningFirstAssingnment
|
code/NN2.py
|
NN2.py
|
py
| 6,346 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25961872136
|
# -*- coding: utf-8 -*-
"""
This Python file is been made by the project group Mattek5 C4-202
This is a test of how much packetloss the prediction of a sound file
can have and still be intelligibly
"""
from __future__ import division
import os
import sys
lib_path = '\\Scripts\\libs'
data_path = '\\Lydfiler\\Sound'
export_path = '\\Lydfiler\Predict'
cwd = os.getcwd()[:-8]
sys.path.insert(0, cwd + lib_path)
import scipy.io.wavfile as wav
import sounddevice as sd
import numpy as np
import matplotlib.pyplot as plt
import LP_speech as lps
import scipy.signal as sig
""" Import data """
filename = 'Laura_en_saet'
fs, data= wav.read(cwd + data_path + "/Saetning/" + filename + ".wav")
data = np.array(data,dtype = "float64")
""" Function for packetloss """
def packetlooser(paramters,P):
count = 0
for packet in range(len(parameters)-3):
if np.random.random() <= P:
count += 1
parameters[packet] = \
{"coef":None,"gain":None,"voi":4,"pitch":None,"first_imp":None}
print("Number of packet losses: %d" %count)
print("Packet losses precent : %.1f %%" %((100*count)/(len(parameters)-3)))
return paramters
""" Predict signal with packetloss """
N = 160
p = 12
P_packetloss = .9 # The probability of packet loss
parameters = lps.LP_parameters(data, N, p, .5)
parameters_lossy = packetlooser(parameters, P_packetloss)
predict = lps.LP_predict(parameters_lossy)
""" Plot of data and predicted data """
plt.subplot(211)
plt.plot(data)
plt.subplot(212)
plt.plot(predict)
plt.show()
""" Save and play the predict packetloss file """
#wav.write(cwd + export_path + "/Packetloss/packetloss_90_" + filename + ".wav",\
# fs,np.int16(predict))
#sd.play(np.int16(data),fs)
#sd.play(np.int16(predict),fs)
|
AalauraaA/P5
|
Supplerende_materiale/Scripts/packetloss.py
|
packetloss.py
|
py
| 1,773 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8466448273
|
#!usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import alphabet
from sequence_utils import get_reverse_complement
from get_kmers import get_kmers_from_sequence
import fasta_parser
def iter_kmers(alphabet, k):
"""Generator function that yields every kmer (substring of length k) over an
alphabet, which should be given as a Python set."""
alphabets = [alphabet for i in range(k)]
for kmer in itertools.product(*alphabets):
yield ''.join(kmer)
def iter_palindromes(k, allowed_middle_characters, alphabet):
"""Generator function that yields every DNA reverse-complement palindrome
of length k, including odd palindromes with center characters determined by
allowed_middle_characters.
allowed_middle_characters = ['G', 'T']
"""
for kmer in iter_kmers(alphabet, k // 2):
comp = get_reverse_complement(kmer)
if k % 2 != 0:
for character in allowed_middle_characters:
yield kmer + character + comp
else:
yield kmer + comp
def iter_palindrome_range(k1, k2, allowed_middle_characters, alphabet):
"""Generator function that yields all DNA reverse
complement palindromes from length k1 to k2, including
palindromes of length k1 and k2."""
for k in range(k1, k2 + 1):
for palindrome in iter_palindromes(k, allowed_middle_characters, alphabet):
yield palindrome
def gen_kmers(kmin, kmax, alphabet):
"""
generates possible k-mers of range(kmin, kmax + 1)
:param kmin: int, minimum kmer length
:param kmax: int, maximum kmer length
:param alphabet: str, accepted sequence alphabet for DNA, RNA, Amino Acids
:return list of str, possible kmers
"""
for n in range(kmin, kmax + 1):
return [''.join(mer) for mer in itertools.product(alphabet, repeat=n)]
def gen_rev_palindromes(kmin, kmax, alphabet):
"""
generate list of palindromes of length n,
when kmin<=n<=kmax identical to their reverse complements
:param kmin: int, min length of tested palindrome
:param kmax:int, max length of tested palindrome
:param bases: str, possible bases inserted in middle of odd palindrome
:return: list, palindromes seqs identical to their reverse complements
"""
dromes = []
for n in range(kmin, kmax + 1):
for left_mer in gen_kmers(n // 2, n // 2, alphabet):
if n % 2 == 0: # even palindrome
dromes.append(left_mer + get_reverse_complement(left_mer))
else: # odd palindrome
for midmer in alphabet:
dromes.append(left_mer + midmer + get_reverse_complement(left_mer))
return dromes
def compute_stats(kmer_list, counts, N, max_e):
"""
compute_stats computes the e-values for the supplied data.
Pre-conditions:
'kmer_list' - a list of kmers (for which stats will be produced)
'counts' - any dictionary-type with k-mers as keys (min_k - 2 <= k <= max_k,
where min_k and max_k are the bounds on the k-mer lengths in 'kmer_list')
and counts as values.
'N' - the total length of the sequence(s) read to produce 'counts'.
'max_e' - the upper bound on e-values reported.
Post-conditions:
Retunrs a list of lists ('results') where results[i] is of the form
[k-mer, observed count, expected count, z-score, e-value]
"""
# results is the list of list described in the docstring.
results = []
# number of tests, used to convert p-value to e-value.
n = len(kmer_list)
for kmer in kmer_list:
k = len(kmer)
observed = counts[kmer]
expected = counts[kmer[:-1]] * counts[kmer[1:]] / counts[kmer[1:-1]]
sigma = math.sqrt(expected * (1 - expected / (N - k + 1)))
Z_score = (observed - expected) / sigma
E_value_under = n * math.erfc(-Z_score / math.sqrt(2)) / 2 # E-value for under-rep
E_value_over = n * math.erfc(Z_score / math.sqrt(2)) / 2 # E-value for over-rep
if (E_value_under <= max_e):
results.append([kmer, observed, expected, Z_score, E_value_under])
elif (E_value_over <= max_e):
results.append([kmer, observed, expected, Z_score, E_value_over])
return results
def get_palindromes(alphabet, min_k, max_k):
"""Generates all DNA palindromes over the range from min_k to max_k.
Inputs:
min_k - minimum palindrome length (int)
max_k - maximum palindrome length (int)
Output:
yields all possible DNA palindromes (str) of length min_k to max_k.
Some definitions:
A palindrome is defined as a sequence which is equal to its reverse-complement.
Note: for odd length palindromes, the middle base does not need to be the same
in the reverse-complement.
Ex.: AAT is a legal palindrome even though its reverse-complement is ATT
"""
for k in range(min_k, (max_k + 1)):
for mer in itertools.product(alphabet, repeat=int(k / 2)):
kmer = ''.join(mer)
# even pal
if k % 2 == 0:
pal = kmer + get_reverse_complement(kmer)
yield pal
else:
for base in alphabet: # odd pal
pal = kmer + base + get_reverse_complement(kmer)
yield pal
if __name__ == '__main__':
alphabet = alphabet.iupac_dna
filename = "Data/test/Acidithiobacillus/chromosomes/NC_011206.1_Acidithiobacillus_ferrooxidans_ATCC_53993_complete_genome.fna.gz"
for name, seq in fasta_parser.parse_fasta(filename):
pal_list = list(get_palindromes(alphabet, 4, 6))
print(len(pal_list))
print(pal_list)
|
schlogl2017/PauloSSchlogl
|
get_palindromes.py
|
get_palindromes.py
|
py
| 5,625 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14762866711
|
from tkinter import *
import tkinter.font
from gpiozero import LED
import RPi.GPIO
RPi.GPIO.setmode(RPi.GPIO.BCM)
blue = LED(2)
green = LED(3)
red = LED(4)
win = Tk()
win.title("LED GUI Toggler")
myFont = tkinter.font.Font(family = 'Helvetica', size = 12, weight = "bold")
def ledToggleBlue():
if blue.is_lit:
blue.off()
blueButton["text"] = "Turn Blue LED on"
else:
blue.on()
blueButton["text"] = "Turn Blue LED off"
def ledToggleRed():
if red.is_lit:
red.off()
redButton["text"] = "Turn Red LED on"
else:
red.on()
redButton["text"] = "Turn Red LED off"
def ledToggleGreen():
if green.is_lit:
green.off()
greenButton["text"] = "Turn Green LED on"
else:
green.on()
greenButton["text"] = "Turn Green LED off"
def close():
RPi.GPIO.cleanup()
win.destroy()
blueButton = Button(win, text = 'Turn Blue LED on', font = myFont, command = ledToggleBlue, bg = 'blue', height = 1, width = 24)
blueButton.grid(row=0,column=1)
redButton = Button(win, text = 'Turn Red LED on', font = myFont, command = ledToggleRed, bg = 'red', height = 1, width = 24)
redButton.grid(row=1,column=1)
greenButton = Button(win, text = 'Turn Green LED on', font = myFont, command = ledToggleGreen, bg = 'green', height = 1, width = 24)
greenButton.grid(row=2,column=1)
exitButton = Button(win, text = 'Exit', font = myFont, command = close, bg = 'red2', height = 1, width = 6)
exitButton.grid(row=3,column=1)
|
chris-yl31/SIT210-Task5.2C-RPiGUI
|
GUI.py
|
GUI.py
|
py
| 1,525 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16543286247
|
from nuitka.containers.OrderedDicts import OrderedDict
from nuitka.Errors import NuitkaOptimizationError
from nuitka.PythonVersions import python_version
from nuitka.utils.InstanceCounters import (
counted_del,
counted_init,
isCountingInstances,
)
from nuitka.Variables import LocalsDictVariable, LocalVariable
from .shapes.BuiltinTypeShapes import tshape_dict
from .shapes.StandardShapes import tshape_unknown
locals_dict_handles = {}
def getLocalsDictType(kind):
if kind == "python2_function_exec":
locals_scope = LocalsDictExecHandle
elif kind == "python_function":
locals_scope = LocalsDictFunctionHandle
elif kind == "python3_class":
locals_scope = LocalsMappingHandle
elif kind == "python2_class":
locals_scope = LocalsDictHandle
elif kind == "module_dict":
locals_scope = GlobalsDictHandle
else:
assert False, kind
return locals_scope
def getLocalsDictHandle(locals_name, kind, owner):
# Duplicates are bad and cannot be tolerated.
if locals_name in locals_dict_handles:
raise NuitkaOptimizationError(
"duplicate locals name",
locals_name,
kind,
owner.getFullName(),
owner.getCompileTimeFilename(),
locals_dict_handles[locals_name].owner.getFullName(),
locals_dict_handles[locals_name].owner.getCompileTimeFilename(),
)
locals_dict_handles[locals_name] = getLocalsDictType(kind)(
locals_name=locals_name, owner=owner
)
return locals_dict_handles[locals_name]
class LocalsDictHandleBase(object):
# TODO: Might remove some of these later, pylint: disable=too-many-instance-attributes
__slots__ = (
"locals_name",
# TODO: Specialize what the kinds really use what.
"variables",
"local_variables",
"providing",
"mark_for_propagation",
"prevented_propagation",
"propagation",
"owner",
"complete",
)
@counted_init
def __init__(self, locals_name, owner):
self.locals_name = locals_name
self.owner = owner
# For locals dict variables in this scope.
self.variables = {}
# For local variables in this scope.
self.local_variables = {}
self.providing = OrderedDict()
# Can this be eliminated through replacement of temporary variables, or has
# e.g. the use of locals prevented this, which it should in classes.
self.mark_for_propagation = False
self.propagation = None
self.complete = False
if isCountingInstances():
__del__ = counted_del()
def __repr__(self):
return "<%s of %s>" % (self.__class__.__name__, self.locals_name)
def getName(self):
return self.locals_name
def makeClone(self, new_owner):
count = 1
# Make it unique.
while 1:
locals_name = self.locals_name + "_inline_%d" % count
if locals_name not in locals_dict_handles:
break
count += 1
result = self.__class__(locals_name=locals_name, owner=new_owner)
variable_translation = {}
# Clone variables as well.
for variable_name, variable in self.variables.items():
new_variable = variable.makeClone(new_owner=new_owner)
variable_translation[variable] = new_variable
result.variables[variable_name] = new_variable
for variable_name, variable in self.local_variables.items():
new_variable = variable.makeClone(new_owner=new_owner)
variable_translation[variable] = new_variable
result.local_variables[variable_name] = new_variable
result.providing = OrderedDict()
for variable_name, variable in self.providing.items():
if variable in variable_translation:
new_variable = variable_translation[variable]
else:
new_variable = variable.makeClone(new_owner=new_owner)
variable_translation[variable] = new_variable
result.providing[variable_name] = new_variable
return result, variable_translation
@staticmethod
def getTypeShape():
return tshape_dict
@staticmethod
def hasShapeDictionaryExact():
return True
def getCodeName(self):
return self.locals_name
@staticmethod
def isModuleScope():
return False
@staticmethod
def isClassScope():
return False
@staticmethod
def isFunctionScope():
return False
@staticmethod
def isUnoptimizedFunctionScope():
return False
def getProvidedVariables(self):
return self.providing.values()
def registerProvidedVariable(self, variable):
variable_name = variable.getName()
self.providing[variable_name] = variable
def unregisterProvidedVariable(self, variable):
"""Remove provided variable, e.g. because it became unused."""
variable_name = variable.getName()
if variable_name in self.providing:
del self.providing[variable_name]
registerClosureVariable = registerProvidedVariable
unregisterClosureVariable = unregisterProvidedVariable
def hasProvidedVariable(self, variable_name):
"""Test if a variable is provided."""
return variable_name in self.providing
def getProvidedVariable(self, variable_name):
"""Test if a variable is provided."""
return self.providing[variable_name]
def getLocalsRelevantVariables(self):
"""The variables relevant to locals."""
return self.providing.values()
def getLocalsDictVariable(self, variable_name):
if variable_name not in self.variables:
result = LocalsDictVariable(owner=self, variable_name=variable_name)
self.variables[variable_name] = result
return self.variables[variable_name]
# TODO: Have variable ownership moved to the locals scope, so owner becomes not needed here.
def getLocalVariable(self, owner, variable_name):
if variable_name not in self.local_variables:
result = LocalVariable(owner=owner, variable_name=variable_name)
self.local_variables[variable_name] = result
return self.local_variables[variable_name]
@staticmethod
def preventLocalsDictPropagation():
pass
@staticmethod
def isPreventedPropagation():
return False
def markForLocalsDictPropagation(self):
self.mark_for_propagation = True
def isMarkedForPropagation(self):
return self.mark_for_propagation
def allocateTempReplacementVariable(self, trace_collection, variable_name):
if self.propagation is None:
self.propagation = OrderedDict()
if variable_name not in self.propagation:
provider = trace_collection.getOwner()
self.propagation[variable_name] = provider.allocateTempVariable(
temp_scope=None, name=self.getCodeName() + "_key_" + variable_name
)
return self.propagation[variable_name]
def getPropagationVariables(self):
if self.propagation is None:
return ()
return self.propagation
def finalize(self):
# Make it unusable when it's become empty, not used.
self.owner.locals_scope = None
del self.owner
del self.propagation
del self.mark_for_propagation
for variable in self.variables.values():
variable.finalize()
for variable in self.local_variables.values():
variable.finalize()
del self.variables
del self.providing
def markAsComplete(self, trace_collection):
self.complete = True
self._considerUnusedUserLocalVariables(trace_collection)
self._considerPropagation(trace_collection)
# TODO: Limited to Python2 classes for now, more overloads need to be added, this
# ought to be abstract and have variants with TODOs for each of them.
@staticmethod
def _considerPropagation(trace_collection):
"""For overload by scope type. Check if this can be replaced."""
def onPropagationComplete(self):
self.variables = {}
self.mark_for_propagation = False
def _considerUnusedUserLocalVariables(self, trace_collection):
"""Check scope for unused variables."""
provided = self.getProvidedVariables()
removals = []
for variable in provided:
if (
variable.isLocalVariable()
and not variable.isParameterVariable()
and variable.getOwner() is self.owner
):
empty = trace_collection.hasEmptyTraces(variable)
if empty:
removals.append(variable)
for variable in removals:
self.unregisterProvidedVariable(variable)
trace_collection.signalChange(
"var_usage",
self.owner.getSourceReference(),
message="Remove unused local variable '%s'." % variable.getName(),
)
class LocalsDictHandle(LocalsDictHandleBase):
"""Locals dict for a Python class with mere dict."""
__slots__ = ()
@staticmethod
def isClassScope():
return True
@staticmethod
def getMappingValueShape(variable):
# We don't yet track dictionaries, let alone mapping values.
# pylint: disable=unused-argument
return tshape_unknown
def _considerPropagation(self, trace_collection):
if not self.variables:
return
for variable in self.variables.values():
for variable_trace in variable.traces:
if variable_trace.inhibitsClassScopeForwardPropagation():
return
trace_collection.signalChange(
"var_usage",
self.owner.getSourceReference(),
message="Forward propagate locals dictionary.",
)
self.markForLocalsDictPropagation()
class LocalsMappingHandle(LocalsDictHandle):
"""Locals dict of a Python3 class with a mapping."""
__slots__ = ("type_shape",)
# TODO: Removable condition once Python 3.3 support is dropped.
if python_version >= 0x340:
__slots__ += ("prevented_propagation",)
def __init__(self, locals_name, owner):
LocalsDictHandle.__init__(self, locals_name=locals_name, owner=owner)
self.type_shape = tshape_unknown
if python_version >= 0x340:
self.prevented_propagation = False
def getTypeShape(self):
# TODO: Make mapping available for this.
return self.type_shape
def setTypeShape(self, type_shape):
self.type_shape = type_shape
def hasShapeDictionaryExact(self):
return self.type_shape is tshape_dict
if python_version >= 0x340:
def markAsComplete(self, trace_collection):
# For this run, it cannot be done yet.
if self.prevented_propagation:
# False alarm, this is available.
self.prevented_propagation = False
return
self.complete = True
def preventLocalsDictPropagation(self):
self.prevented_propagation = True
def isPreventedPropagation(self):
return self.prevented_propagation
def _considerPropagation(self, trace_collection):
if not self.variables:
return
if self.type_shape is not tshape_dict:
return
for variable in self.variables.values():
for variable_trace in variable.traces:
if variable_trace.inhibitsClassScopeForwardPropagation():
return
trace_collection.signalChange(
"var_usage",
self.owner.getSourceReference(),
message="Forward propagate locals dictionary.",
)
self.markForLocalsDictPropagation()
@staticmethod
def isClassScope():
return True
class LocalsDictExecHandle(LocalsDictHandleBase):
"""Locals dict of a Python2 function with an exec."""
__slots__ = ("closure_variables",)
def __init__(self, locals_name, owner):
LocalsDictHandleBase.__init__(self, locals_name=locals_name, owner=owner)
self.closure_variables = None
@staticmethod
def isFunctionScope():
return True
@staticmethod
def isUnoptimizedFunctionScope():
return True
def getLocalsRelevantVariables(self):
if self.closure_variables is None:
return self.providing.values()
else:
return [
variable
for variable in self.providing.values()
if variable not in self.closure_variables
]
# TODO: What about the ".0" variety, we used to exclude it.
def registerClosureVariable(self, variable):
self.registerProvidedVariable(variable)
if self.closure_variables is None:
self.closure_variables = set()
self.closure_variables.add(variable)
def unregisterClosureVariable(self, variable):
self.unregisterProvidedVariable(variable)
variable_name = variable.getName()
if variable_name in self.providing:
del self.providing[variable_name]
class LocalsDictFunctionHandle(LocalsDictHandleBase):
"""Locals dict of a Python3 function or Python2 function without an exec."""
__slots__ = ()
@staticmethod
def isFunctionScope():
return True
class GlobalsDictHandle(LocalsDictHandleBase):
__slots__ = ("escaped",)
def __init__(self, locals_name, owner):
LocalsDictHandleBase.__init__(self, locals_name=locals_name, owner=owner)
self.escaped = False
@staticmethod
def isModuleScope():
return True
def markAsEscaped(self):
self.escaped = True
def isEscaped(self):
return self.escaped
|
Nuitka/Nuitka
|
nuitka/nodes/LocalsScopes.py
|
LocalsScopes.py
|
py
| 14,085 |
python
|
en
|
code
| 10,019 |
github-code
|
6
|
72532058109
|
import json
import arrow
import requests
from monitor_release.models import RunningSidecar
from monitor_release.settings import Settings
def get_bearer_token(settings: Settings):
headers = {"accept": "application/json", "Content-Type": "application/json"}
payload = json.dumps(
{
"Username": settings.portainer_username,
"Password": settings.portainer_password,
}
)
response = requests.post(
f"{settings.portainer_url}/portainer/api/auth",
headers=headers,
data=payload,
)
bearer_token = response.json()["jwt"]
return bearer_token
def get_services(settings: Settings, bearer_token):
services_url = f"{settings.portainer_url}/portainer/api/endpoints/{settings.portainer_endpoint_version}/docker/services"
response = requests.get(
services_url,
headers={
"Authorization": "Bearer " + bearer_token,
"Content-Type": "application/json",
},
)
services = response.json()
return services
def get_tasks(settings: Settings, bearer_token):
tasks_url = f"{settings.portainer_url}/portainer/api/endpoints/{settings.portainer_endpoint_version}/docker/tasks"
response = requests.get(
tasks_url,
headers={
"Authorization": "Bearer " + bearer_token,
"Content-Type": "application/json",
},
)
tasks = response.json()
return tasks
def get_containers(settings: Settings, bearer_token):
bearer_token = get_bearer_token(settings)
containers_url = f"{settings.portainer_url}/portainer/api/endpoints/{settings.portainer_endpoint_version}/docker/containers/json?all=true"
response = requests.get(
containers_url,
headers={
"Authorization": "Bearer " + bearer_token,
"Content-Type": "application/json",
},
)
containers = response.json()
return containers
def check_simcore_running_sidecars(settings: Settings, services):
running_sidecars: list[RunningSidecar] = []
for service in services:
if (
service["Spec"]["Name"].startswith("dy-sidecar")
and service["Spec"]["Labels"]["io.simcore.runtime.swarm-stack-name"]
== settings.swarm_stack_name
):
running_sidecars.append(
RunningSidecar(
name=service["Spec"]["Name"],
created_at=arrow.get(service["CreatedAt"]).datetime,
user_id=service["Spec"]["Labels"]["io.simcore.runtime.user-id"],
project_id=service["Spec"]["Labels"][
"io.simcore.runtime.project-id"
],
service_key=service["Spec"]["Labels"][
"io.simcore.runtime.service-key"
],
service_version=service["Spec"]["Labels"][
"io.simcore.runtime.service-version"
],
)
)
return running_sidecars
def _generate_containers_map(containers):
container_map = {}
for container in containers:
git_sha = (
container.get("Labels").get("org.opencontainers.image.revision")
if container.get("Labels").get(
"org.opencontainers.image.revision"
) # container.get("Labels").get("org.label-schema.vcs-ref")
else container.get("Labels").get("org.label-schema.vcs-ref")
)
container_map[container["Id"]] = {"git_sha": git_sha}
return container_map
def check_simcore_deployed_services(settings: Settings, services, tasks, containers):
container_map = _generate_containers_map(containers)
service_task_map = {}
for service in services:
if service["Spec"]["Name"].startswith(settings.starts_with):
service_task_map[service["ID"]] = {
"service_name": service["Spec"]["Name"],
"tasks": [],
}
for task in tasks:
if task["ServiceID"] in service_task_map:
if task["Status"].get("ContainerStatus") is None:
continue
container_id = task["Status"]["ContainerStatus"]["ContainerID"]
service_task_map[task["ServiceID"]]["tasks"].append(
{
"created_at": arrow.get(task["CreatedAt"]).datetime,
"status": task["Status"]["State"],
"timestamp": arrow.get(task["Status"]["Timestamp"]).datetime,
"git_sha": container_map.get(container_id, {}).get("git_sha"),
}
)
return service_task_map
|
ITISFoundation/osparc-simcore
|
scripts/release/monitor/monitor_release/portainer_utils.py
|
portainer_utils.py
|
py
| 4,679 |
python
|
en
|
code
| 35 |
github-code
|
6
|
26929642562
|
import plotly.graph_objects as go
import plotly.io as pio
from PIL import Image
# to render in jupyterlab
#pio.renderers.default = "plotly_mimetype"
# Create figure
fig = go.Figure()
pyLogo = Image.open(r'C:\Users\l.trouw\Documents\Pycharm\Lean_simulation\VSMvisualizationMatrasses.png')
# Add trace
fig.add_trace(
go.Scatter(x=[0, 0.5, 1, 2, 2.2], y=[1.23, 2.5, 0.42, 3, 1])
)
fig.add_layout_image(
dict(
source=pyLogo,
xref="x",
yref="y",
x=0,
y=3,
sizex=2,
sizey=2,
sizing="stretch",
opacity=0.5,
layer="above")
)
fig.show()
|
luuktrouw/Districon_lean_AdvancedAnalytics
|
Testfile.py
|
Testfile.py
|
py
| 665 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14837403984
|
from django.urls import path
from . import views
urlpatterns = [
path('post/<int:comment_pk>/comment_edit/', views.comment_edit, name='comment_edit'),
path('post/new/', views.post_new, name='post_new'),
path('post/list', views.post_list, name='post_list'),
path('post/<int:post_pk>/', views.post_detail, name='post_detail'),
path('post/new/', views.post_new, name='post_new'),
path('post/<int:post_pk>/edit/', views.post_edit, name='post_edit'),
path('delete/<int:post_pk>', views.delete_post, name='delete_post'),
path('delete/comment/<int:comment_pk>', views.delete_comment, name='delete_comment'),
path('post/<int:post_pk>/comment_new', views.comment_new, name='comment_new'),
path('post/<int:post_pk>/post_like_or_dislike/', views.post_like_or_dislike, name='post_like_or_dislike'),
path('post/draft/list/', views.draft_list, name='draft_list'),
path('post/publish/<int:post_pk>/', views.publish, name='publish'),
path('post/tag_list/<int:tag_pk>/', views.tag_list, name='tag_list'),
path('post/category_posts/<int:category_pk>/', views.category_posts, name='category_posts'),
path('post/recommendations/', views.recommendations, name='recommendations'),
path('post/add_to_favorite/<int:post_pk>/', views.add_to_favorite, name='add_to_favorite'),
path('post/favorites/', views.favorites, name='favorites'),
]
|
meeeeeeeh/djangoblog
|
post/urls.py
|
urls.py
|
py
| 1,384 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28585638952
|
from sklearn.model_selection import train_test_split
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
from datetime import datetime
import tensorflow_datasets as tfds
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
from bert import run_classifier_with_tfhub
#https://github.com/google-research/bert.git
import sys
from tensorflow import keras
import os
import re
from transformers import *
import numpy as np
from tensorflow.python.lib.io import file_io
import pickle
import gc
import threading
import logging
import argparse
"""
Usage
>> python -u runBert.py @args.txt
python -u runBert.py @params_model1.txt
------Example args.txt file -----
--tpuAddress node-3
--tpuZone us-central1-f
--outputDir test
--seqLen 15
--modelHub https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1
--batchSize 64
--epochs 40
--dropout .9
"""
####################################################
############ Setting output directory ##############
####################################################
def getDir(bucket, output_dir):
return 'gs://{}/{}'.format(bucket, output_dir)
def setUp_output_dir():
DO_DELETE = True
USE_BUCKET =True
if USE_BUCKET:
OUTPUT_DIR = getDir(BUCKET, OUTPUT_DIR)
if DO_DELETE:
try:
tf.gfile.DeleteRecursively(OUTPUT_DIR)
except:
# doesn't matter if the directory didn't exist
pass
tf.gfile.MakeDirs(OUTPUT_DIR)
print('***** Model output directory: {} *****'.format(OUTPUT_DIR))
#################################################
############# Load Data set #####################
#################################################
def loadPdData(gsPath):
return pd.read_csv(gsPath, sep = "\t")
def saveToGcloud(path,data,isPandas = False ):
'''Saves to gcloud so we dont have to do this long ass step every time'''
if isPandas:
data.to_csv(path, index=False, sep="\t")
else:
with file_io.FileIO(path, mode='w') as f:
pickle.dump(data,f)
def readFromGcloud(path, isPandas = False):
if isPandas:
return pd.read_csv(path,sep="\t" )
else:
with file_io.FileIO(path, mode='rb') as f:
return pickle.load(f)
def worker_downloadTestData(name):
"""
Worker so we can download test data asynch
"""
logging.info("Thread %s: starting for loading test data", name)
global test_features
train_features = readFromGcloud(TEST_TFRecord_PATH)
logging.info("Thread %s: finishing for loading test data", name)
#######################################################
############# Creating a model #######################
#######################################################
def create_model(is_training, input_ids, input_mask, segment_ids, labels,
num_labels, bert_hub_module_handle, dropout):
"""Creates a classification model."""
tags = set()
if is_training:
tags.add("train")
bert_module = hub.Module(bert_hub_module_handle, tags=tags, trainable=True)
bert_inputs = dict(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids)
bert_outputs = bert_module(
inputs=bert_inputs,
signature="tokens",
as_dict=True)
output_layer = bert_outputs["pooled_output"]
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=dropout)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(num_labels, learning_rate, num_train_steps,
num_warmup_steps, use_tpu, bert_hub_module_handle, dropout):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
is_training, input_ids, input_mask, segment_ids, label_ids, num_labels,
bert_hub_module_handle, dropout)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(label_ids, predictions)
loss = tf.metrics.mean(per_example_loss)
true_pos = tf.metrics.true_positives(
label_ids,
predictions)
true_neg = tf.metrics.true_negatives(
label_ids,
predictions)
false_pos = tf.metrics.false_positives(
label_ids,
predictions)
false_neg = tf.metrics.false_negatives(
label_ids,
predictions)
return {
"true_positives": true_pos,
"true_negatives": true_neg,
"false_positives": false_pos,
"false_negatives": false_neg,
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics)
elif mode == tf.estimator.ModeKeys.PREDICT:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions={"probabilities": probabilities})
else:
raise ValueError(
"Only TRAIN, EVAL and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
####################################################
###### FUnctions to train + evaluate model #########
####################################################
def get_run_config(output_dir):
"""
Used for run configuration when TPU used
"""
return tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=output_dir,
save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=ITERATIONS_PER_LOOP,
num_shards=NUM_TPU_CORES,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2))
def getEstimator(mode_fn):
"""
Returns the estimator used to train/eval model
"""
return tf.estimator.tpu.TPUEstimator(
use_tpu=True,
model_fn=mode_fn,
config=get_run_config(OUTPUT_DIR),
train_batch_size=BATCH_SIZE,
eval_batch_size=EVAL_BATCH_SIZE,
predict_batch_size=PREDICT_BATCH_SIZE,
eval_on_tpu = True
)
def model_train(estimator):
"""
Trains the model, rt only good for TPU
"""
#Set drop_remainder =True to fix a TPU error
#https://stackoverflow.com/questions/58029896/bert-fine-tuning-with-estimators-on-tpus-on-colab-typeerror-unsupported-operand
print('***** Started training at {} *****'.format(datetime.now()))
print(' Num examples = {}'.format(len(train_features)))
print(' Batch size = {}'.format(BATCH_SIZE))
tf.logging.info(" Num steps = %d", num_train_steps)
current_time = datetime.now()
train_input_fn = run_classifier.input_fn_builder(
features=train_features,
seq_length=MAX_SEQ_LENGTH,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
print("Finished: Training took time ", datetime.now() - current_time)
#train_features
def model_evaluate(estimator, data):
"""
Evaluates the model
"""
print('***** Started evaluation at {} *****'.format(datetime.now()))
print(' Num examples = {}'.format(len(data)))
print(' Batch size = {}'.format(EVAL_BATCH_SIZE))
# Eval will be slightly WRONG on the TPU because it will truncate
# the last batch.
eval_steps = int(len(data) / EVAL_BATCH_SIZE)
eval_input_fn = run_classifier.input_fn_builder(
features=data,
seq_length=MAX_SEQ_LENGTH,
is_training=False,
drop_remainder=True)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
print('***** Finished evaluation at {} *****'.format(datetime.now()))
output_eval_file = os.path.join(OUTPUT_DIR, "eval","eval_results.txt")
tf.gfile.MakeDirs(os.path.join(OUTPUT_DIR, "eval"))
with tf.gfile.GFile(output_eval_file, "w") as writer:
print("***** Eval results *****")
for key in sorted(result.keys()):
print(' {} = {}'.format(key, str(result[key])))
writer.write("%s = %s\n" % (key, str(result[key])))
####################################################
################# Utility Functions ##############
####################################################
def saveModelParams(params, _dir):
"""
Save model params to gCloud
"""
model_params_file = os.path.join(_dir,"modelParams","model_parameters.txt")
tf.gfile.MakeDirs(os.path.join(_dir, "modelParams"))
with tf.gfile.GFile(model_params_file, "w") as writer:
print("***** Model Parameters *****")
for key in sorted(params.keys()):
print(' {} = {}'.format(key, str(params[key])))
writer.write("%s = %s\n" % (key, str(params[key])))
print("Model paramters at: {}".format(os.path.join(_dir, "modelParams")))
def convert_arg_line_to_args(arg_line):
"""
From: https://stackoverflow.com/questions/25084993/why-isnt-fromfile-prefix-chars-in-python-argparse-working
"""
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
if __name__ == "__main__":
my_parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
prog="runBert",
description='Run bert on patent data!!')
my_parser.convert_arg_line_to_args = convert_arg_line_to_args
my_parser.add_argument(
'-tpuAddress',
action='store',
type=str,
required=True,
help="The address of TPU node"
)
my_parser.add_argument(
'-tpuZone',
action='store',
type=str,
required=False,
nargs='?',
default="us-central1-f",
help="The zone that the TPU is in: default us-central1-f"
)
my_parser.add_argument(
'-outputDir',
action='store',
type=str,
required=True,
help="The output dir of results: will be stored in gs bucket `patents-research` under folder bertResults{outputDir}"
)
my_parser.add_argument(
'-seqLen',
action='store',
type=int,
required=True,
help="The sequence length for the language model"
)
my_parser.add_argument(
'-modelHub',
action='store',
type=str,
required=False,
nargs='?',
default="https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1",
help="The Bert model Hub"
)
my_parser.add_argument(
'-batchSize',
action='store',
type=int,
required=False,
default=64,
nargs='?',
help="The training batch size"
)
my_parser.add_argument(
'-epochs',
action='store',
type=float,
required=False,
default=40.0,
nargs='?',
help="The number of epochs"
)
my_parser.add_argument(
'-dropout',
action='store',
type=float,
required=False,
default=0.7,
nargs='?',
help="Percent of data to keep"
)
args = my_parser.parse_args()
##### SET TPU CONSTANTS AND CONNECT TO IT #######
TPU_ADDRESS = args.tpuAddress
TPU_ZONE = args.tpuZone
USE_TPU =True
ITERATIONS_PER_LOOP = 1000
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=TPU_ADDRESS, zone=TPU_ZONE)
tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
tf.distribute.experimental.TPUStrategy(tpu_cluster_resolver)
#NUM_TPU_CORES = len(tf.config.experimental.list_logical_devices('TPU'))
NUM_TPU_CORES = 8
if NUM_TPU_CORES==0:
sys.exit("Problem with tpu make sure region is correct or tpu is runnign")
###################################
####### CONSTANTS ##################
####################################
DATA_PATH = "gs://patents-research/patent_research/data_frwdcorrect.tsv"
OUTPUT_DIR = "bertResults_{}".format(args.outputDir)# where the model will be saved
BUCKET = "patents-research"
DATA_COLUMN = 'text'
LABEL_COLUMN = 'label'
label_list = [0, 1, 2]
MAX_SEQ_LENGTH = args.seqLen
TRAIN_TFRecord_PATH= "gs://patents-research/patent_research/{}_{}.pickle".format("train_features",MAX_SEQ_LENGTH)
TEST_TFRecord_PATH= "gs://patents-research/patent_research/{}_{}.pickle".format("test_features",MAX_SEQ_LENGTH)
BERT_MODEL_HUB = args.modelHub
#Set output directory
setUp_output_dir()
# Force TF Hub writes to the GS bucket we provide.
os.environ['TFHUB_CACHE_DIR'] = os.path.join(OUTPUT_DIR,"tfhub_cache")
tf.gfile.MakeDirs(os.path.join(OUTPUT_DIR,"tfhub_cache"))
# Model Parameters
# These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)
BATCH_SIZE = args.batchSize
EVAL_BATCH_SIZE = NUM_TPU_CORES
PREDICT_BATCH_SIZE = NUM_TPU_CORES
LEARNING_RATE = 2e-5
NUM_TRAIN_EPOCHS = args.epochs
DROPOUT_KEEP_PROB = args.dropout
# Warmup is a period of time where hte learning rate
# is small and gradually increases--usually helps training.
WARMUP_PROPORTION = 0.1
# Model configs
SAVE_CHECKPOINTS_STEPS = 1000
SAVE_SUMMARY_STEPS = 100
# Compute # train and warmup steps from batch size
num_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS)
num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)
params={
"TPU_ADDRESS":TPU_ADDRESS,
"TPU_ZONE":TPU_ZONE,
"TPU_ITERATIONS_PER_LOOP":ITERATIONS_PER_LOOP,
"NUM_TPU_CORES":NUM_TPU_CORES,
"TFHUB_CACHE_DIR":os.path.join(OUTPUT_DIR,"tfhub_cache"),
"DATA_PATH":DATA_PATH,
"OUTPUT_DIR":OUTPUT_DIR,
"MAX_SEQ_LENGTH":MAX_SEQ_LENGTH,
"TRAIN_TFRecord_PATH":TRAIN_TFRecord_PATH,
"TEST_TFRecord_PATH":TEST_TFRecord_PATH,
"BERT_MODEL_HUB":BERT_MODEL_HUB,
"BATCH_SIZE":BATCH_SIZE,
"EVAL_BATCH_SIZE":EVAL_BATCH_SIZE,
"PREDICT_BATCH_SIZE":PREDICT_BATCH_SIZE,
"LEARNING_RATE":LEARNING_RATE,
"NUM_TRAIN_EPOCHS":NUM_TRAIN_EPOCHS,
"DROPOUT_KEEP_PROB":DROPOUT_KEEP_PROB,
"WARMUP_PROPORTION":WARMUP_PROPORTION,
"SAVE_CHECKPOINTS_STEPS":SAVE_CHECKPOINTS_STEPS,
"SAVE_SUMMARY_STEPS":SAVE_SUMMARY_STEPS,
"num_train_steps":num_train_steps,
"num_warmup_steps":num_warmup_steps
}
saveModelParams(params,OUTPUT_DIR)
#####################################################
########### RUNNING SET UP FUNCTIONS ################
#####################################################
# Download train data
print("Reading {} from gCloud".format(TRAIN_TFRecord_PATH))
#train_features = readFromGcloud(TRAIN_TFRecord_PATH)
print("Finished {} from gCloud!".format(TRAIN_TFRecord_PATH))
# Download test data async - test data will be saved at test_features
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,datefmt="%H:%M:%S")
getTestData_thread = threading.Thread(target=worker_downloadTestData, args=(1,))
#getTestData_thread.start() #async download of test data
#####################################################
########## Train + Eval Model #######################
#####################################################
mode_fn = model_fn_builder(
num_labels=len(label_list),
learning_rate=LEARNING_RATE,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
dropout = DROPOUT_KEEP_PROB,
use_tpu = USE_TPU,
bert_hub_module_handle = BERT_MODEL_HUB
)
#estimator = getEstimator(mode_fn)
#model_train(estimator)
#gc.collect()
#del train_features # Remove train_features might cause mem limit
#gc.collect() # Remove train_features might cause mem limit
#getTestData_thread.join()
#gc.collect()
#model_evaluate(estimator, train_features)
|
jdanene/patent-language-modeling
|
src/analysis/code/runBert.py
|
runBert.py
|
py
| 18,927 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24199790207
|
class Solution:
def nextGreaterElements(self, nums):
"""
Args:
nums: list[int]
Return:
list[int]
"""
res = [-1 for _ in range(len(nums))]
stack = []
for _ in range(2):
for i in range(len(nums)):
while stack and nums[i] > nums[stack[-1]]:
j = stack.pop()
if res[j] == -1:
res[j] = nums[i]
stack.append(i)
return res
|
AiZhanghan/Leetcode
|
code/503. 下一个更大元素 II.py
|
503. 下一个更大元素 II.py
|
py
| 523 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13240819097
|
#
# A framework for messaging between programs
# and visualising the signaling
#
import zmq
import time
import random
import json
import sys
import protobuf_examples.example
ctx = zmq.Context()
class Stub:
def __init__(self, name):
self.name = name
self.socket = ctx.socket(zmq.PUSH)
timeout_milliseconds = 200
self.socket.setsockopt(zmq.LINGER, timeout_milliseconds)
self.socket.connect("tcp://localhost:5556")
def log_signaling(self, source, target, payload=None):
log_entry = (source, target, payload)
#print(type(source),type(target),type(payload), type(payload.decode('utf-8')))
log_entry_string = "|".join(log_entry)
self.socket.send(log_entry_string)
# self.socket.send_string( log_entry_string, zmq.NOBLOCK, encoding='latin-1')
class SpaceShip(Stub):
def send_to_station(self, target, msg):
self.log_signaling(source=self.name, target=target.name, payload=msg)
# Send signal
print("Sending from %s to %s" % (self.name, target.name))
class SpaceStation(Stub):
def send_to_station(self, target, msg):
self.log_signaling(source=self.name, target=target.name, payload=msg)
# Send signal
print("Sending from %s to %s" % (self.name, target.name))
if __name__ == '__main__':
# Create units
space_station_1 = SpaceStation(name='Earth Station')
space_station_2 = SpaceStation(name='Mars Station')
space_ship_v = SpaceShip(name='Starship Voyager')
# Start signaling
while True:
msg = random.choice(['Hello Space!', 'Hello Earth', 'Where are you guys?', 'We are at the final fronter', 'We are done, stop!', 'No can do', protobuf_examples.example.serialized_object])
source = random.choice([space_station_1, space_station_2, space_ship_v])
target = random.choice([space_station_1, space_station_2, space_ship_v])
source.send_to_station(target, msg)
time.sleep(1)
#space_station_1.send_to_station(space_station_2, 'Hello Space!')
#space_station_2.send_to_station(space_station_1, 'Hello Earth')
#space_station_2.send_to_station(space_ship_v, 'Where are you guys?')
#space_ship_v.send_to_station(space_station_1, )
#space_ship_v.send_to_station(space_station_1, 'We are done, stop!')
#space_station_2.send_to_station(space_ship_v, )
|
magnuswahlstrand/home-automation
|
stub_world/send_sequence.py
|
send_sequence.py
|
py
| 2,322 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41656015394
|
""" Methods for doing logistic regression."""
import numpy as np
from utils import sigmoid
import math
def logistic_predict(weights, data):
"""
Compute the probabilities predicted by the logistic classifier.
Note: N is the number of examples and
M is the number of features per example.
Inputs:
weights: (M+1) x 1 vector of weights, where the last element
corresponds to the bias (intercepts).
data: N x M data matrix where each row corresponds
to one data point.
Outputs:
y: :N x 1 vector of probabilities. This is the output of the classifier.
"""
N = len(data)
M = len(data[0])
insert_coln = np.ones((N, 1))
data = np.append(data, insert_coln, axis = 1)
z = np.dot(data, np.asarray(weights))
y = 1/(1+np.exp((-1)*z))
np.reshape(y, (-1, 1))
return y
def evaluate(targets, y):
"""
Compute evaluation metrics.
Inputs:
targets : N x 1 vector of targets.
y : N x 1 vector of probabilities.
Outputs:
ce : (scalar) Cross entropy. CE(p, q) = E_p[-log q]. Here we want to compute CE(targets, y)
frac_correct : (scalar) Fraction of inputs classified correctly.
"""
ce = 0
correct = 0
for i in range(len(targets)):
t = targets[i][0]
y_ind = y[i][0]
ce= ce-t*np.log(y_ind)-(1-t)*np.log(1-y_ind)
if(y_ind > 0.5 and t==1) or (y_ind <= 0.5 and t==0):
correct+=1
frac_correct = correct/len(targets)
return ce, frac_correct
def logistic(weights, data, targets, hyperparameters):
"""
Calculate negative log likelihood and its derivatives with respect to weights.
Also return the predictions.
Note: N is the number of examples and
M is the number of features per example.
Inputs:
weights: (M+1) x 1 vector of weights, where the last element
corresponds to bias (intercepts).
data: N x M data matrix where each row corresponds
to one data point.
targets: N x 1 vector of targets class probabilities.
hyperparameters: The hyperparameters dictionary.
Outputs:
f: The sum of the loss over all data points. This is the objective that we want to minimize.
df: (M+1) x 1 vector of derivative of f w.r.t. weights.
y: N x 1 vector of probabilities.
"""
N = len(data)
M = len(data[0])
y = logistic_predict(weights, data)
insert_coln = np.ones((N, 1))
data = np.append(data, insert_coln, axis = 1)
df=np.zeros((M+1, 1))
for i in range(N):
df+=(y[i]-targets[i][0])*np.reshape(data[i], (-1, 1))
f, frac_correct = evaluate(targets,y)
return f, df, y
def logistic_pen(weights, data, targets, hyperparameters):
"""
Calculate negative log likelihood and its derivatives with respect to weights.
Also return the predictions.
Note: N is the number of examples and
M is the number of features per example.
Inputs:
weights: (M+1) x 1 vector of weights, where the last element
corresponds to bias (intercepts).
data: N x M data matrix where each row corresponds
to one data point.
targets: N x 1 vector of targets class probabilities.
hyperparameters: The hyperparameters dictionary.
Outputs:
f: The sum of the loss over all data points. This is the objective that we want to minimize.
df: (M+1) x 1 vector of derivative of f w.r.t. weights.
"""
f, df, y = logistic(weights, data, targets, hyperparameters)
df = df + weights * hyperparameters["weight_regularization"]
f = f+(hyperparameters["weight_regularization"]*np.linalg.norm(weights)**2)/2
return f, df, y
|
DaPraxis/Logistic_Regression-Neural_Networks
|
q2_materials/logistic.py
|
logistic.py
|
py
| 3,920 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4699294372
|
import math
from pprint import pprint
import timeit
#######PROCESSING#############
class Library(object):
def __init__(self, library_id, signup_time, books, books_per_day):
self.library_id = library_id
self.signup_time = signup_time
self.books = books
self.book_amount = len(self.books)
self.books_per_day = books_per_day
def __repr__(self):
return 'Library: {} \n Signup Time: {} \n Amount of books: {} \n Books per day: {} \n Books: {} \n Time to ' \
'completion: {}'.format(
self.library_id, self.signup_time, self.book_amount, self.books_per_day, ', '.join(map(str,self.books)),
self.time_to_completion()
)
def time_to_completion(self, start_time=0):
return start_time + self.signup_time + math.ceil(
self.book_amount / self.books_per_day)
def get_input(filename):
with open(filename, 'r') as f:
lines = f.readlines()
total_counts = lines[0]
amount_of_books, amount_of_libraries, days_for_scanning = map(
int, total_counts.split(' '))
book_scores_line = lines[1]
book_scores = {}
for index, value in enumerate(book_scores_line.split(' ')):
book_scores[index] = value
return_value = []
for i in range(amount_of_libraries):
lib_stats = lines[2 + 2 * i]
lib_books = lines[2 + 2 * i + 1]
books_in_library, signup_time, shippings_per_day = map(
int, lib_stats.split(' '))
books = list(map(int, lib_books.split(' ')))
return_value.append(
Library(i, signup_time, books, shippings_per_day))
return return_value, book_scores, days_for_scanning
########SOLVING###########
def solve(map_, out_file_name):
with open("Output/" +out_file_name, 'w+') as out_file:
len_ = str(len(map_))
out_file.write(len_ + "\n")
for key, value in map_.items():
first_line = str(key)
second_line = ""
counter = 0
for ll in value:
for l in ll:
counter += 1
second_line += " " + str(l)
first_line += " " + str(counter)
out_file.write(first_line + "\n")
out_file.write(second_line.strip() + "\n")
def main():
input_files = [
'b_read_on.txt', 'c_incunabula.txt', 'd_tough_choices.txt',
'e_so_many_books.txt', 'f_libraries_of_the_world.txt'
]
out_files = ['b.txt', 'c.txt', 'd.txt', 'e.txt', 'f.txt']
counter = 0
for input_file in input_files:
_, ext = input_file.split('.')
libraries, book_scores, days_for_scanning = get_input("Input/" + input_file)
sorted_lib = sort_libraries(libraries)
for l in sorted_lib:
sort_books(l.books, book_scores)
elapsed_days = 0
scanned = set()
current_lib = sorted_lib.pop()
currently_processing = [] #Queue
library_map = dict()
while elapsed_days < days_for_scanning:
if current_lib.signup_time > 0:
current_lib.signup_time -= 1
else:
currently_processing.append(current_lib)
if len(sorted_lib) > 0:
current_lib = sorted_lib.pop()
process_library_queue(currently_processing, library_map, scanned)
elapsed_days += 1
if elapsed_days >= days_for_scanning:
process_library_queue(currently_processing, library_map,
scanned)
solve(library_map, out_files[counter])
counter += 1
def process_library_queue(currently_processing, library_map, scanned):
for l in currently_processing:
result, _ = process_books(l, scanned)
if len(result):
if l.library_id in library_map:
library_map[l.library_id].append(result)
else:
library_map[l.library_id] = []
library_map[l.library_id].append(result)
else:
currently_processing.remove(l)
def process_books(l, scanned):
result = []
for i in range(l.books_per_day):
if len(l.books):
selected = l.books.pop()
while len(l.books) and selected in scanned:
selected = l.books.pop()
result.append(selected)
scanned.add(selected)
else:
return result, True
return result, False
def lib_sorter(l):
return (l.book_amount * l.books_per_day) / l.signup_time
def sort_libraries(libraries):
return sorted(libraries,
key= lib_sorter)
def sort_books(list_books, book_scores):
list_books.sort(key=lambda x: (book_scores[x]),reverse=True)
if __name__ == "__main__":
import timeit
start = timeit.default_timer()
main()
stop = timeit.default_timer()
print('Time: ', stop - start)
|
zeyadkhaled/Hashcode-2020
|
solution.py
|
solution.py
|
py
| 4,999 |
python
|
en
|
code
| 2 |
github-code
|
6
|
37562209258
|
import os
from modules.computation.Dataset import Dataset
def _main():
# Define the default values for the options
pathHome = os.path.expanduser('~')
pathWork = os.path.join( pathHome, 'Desktop/ProyectoGDSA')
pathImages = os.path.join(pathWork,'1_images')
pathDatasets = os.path.join( pathWork, '2_datasets' )
# For each data partition (train & test)
for partition in os.listdir(pathImages):
# If it is a directory
dirPartition = os.path.join( pathImages, partition )
if os.path.isdir(dirPartition):
# Define the filename to contain the list of images to process
filenameOut = os.path.join( pathDatasets, partition + '.txt')
dataset = Dataset( filenameOut,
flagSaveInMemory=False,
flagVerbose=True)
dataset.build( dirPartition, '.jpg' )
#def run():
_main()
|
aamcgdsa21/GDSA
|
Descriptor/tools/2_datasets.py
|
2_datasets.py
|
py
| 965 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33425664451
|
# Q) not a triangle
# you have n sticks and pick any 3 and it must not form a triangle
# we know for any 2 sides a, b if we have a third side c
# such that a + b < c then we have 1 possible answer
import bisect
n = int(input())
def bisect_right(li, target):
n = len(li)
low = 0
high = n
while(low < high):
mid = (high+low)//2
if target >= li[mid]:
low = mid + 1
else:
high = mid - 1
return low
while True:
li = [int(x) for x in input().split()]
li.sort()
ans = 0
for i in range(n-2):
for j in range(i+1, n-1):
ans = ans + len(li) - bisect.bisect_right(li, li[i]+li[j])
print(ans)
n = int(input())
if n == 0:
break
|
harasees-singh/Notes
|
Searching/Binary_Not_A_Triangle.py
|
Binary_Not_A_Triangle.py
|
py
| 757 |
python
|
en
|
code
| 1 |
github-code
|
6
|
16009044244
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Makes an organized git repo of a book folder
"""
from __future__ import print_function
import codecs
import os
from os.path import abspath, dirname
import jinja2
import sh
from .parameters import GITHUB_ORG
class NewFilesHandler():
""" NewFilesHandler - templates and copies additional files to book repos
"""
README_FILENAME = 'README.rst'
def __init__(self, book):
self.book = book
package_loader = jinja2.PackageLoader('gitenberg', 'templates')
self.env = jinja2.Environment(loader=package_loader)
def add_new_files(self):
self.template_readme()
self.travis_files()
self.copy_files()
def template_readme(self):
template = self.env.get_template('README.rst.j2')
readme_text = template.render(
authors=self.book.meta.authors_short(),
**self.book.meta.metadata
)
readme_path = "{0}/{1}".format(
self.book.local_path,
self.README_FILENAME
)
with codecs.open(readme_path, 'w', 'utf-8') as readme_file:
readme_file.write(readme_text)
def travis_files(self):
template = self.env.get_template('.travis.yml')
travis_key = self.book.github_repo.travis_key()
travis_text = template.render({
'epub_title': 'book',
'encrypted_key': travis_key,
'repo_name': self.book.meta._repo,
'repo_owner': GITHUB_ORG
})
fpath = os.path.join(self.book.local_path, ".travis.yml")
with open(fpath, 'w') as f:
f.write(travis_text)
if self.book.github_repo.travis_key():
fpath = os.path.join(self.book.local_path, ".travis.deploy.api_key.txt")
with open(fpath, 'w') as f:
f.write(travis_key)
def copy_files(self):
""" Copy the LICENSE and CONTRIBUTING files to each folder repo
Generate covers if needed. Dump the metadata.
"""
files = [u'LICENSE', u'CONTRIBUTING.rst']
this_dir = dirname(abspath(__file__))
for _file in files:
sh.cp(
'{0}/templates/{1}'.format(this_dir, _file),
'{0}/'.format(self.book.local_path)
)
# copy metadata rdf file
sh.cp(
self.book.meta.rdf_path,
'{0}/'.format(self.book.local_path)
)
if 'GITenberg' not in self.book.meta.subjects:
self.book.meta.metadata['subjects'].append('GITenberg')
if not self.book.meta._version:
self.book.meta.matadata["_version"] = "0.0.1"
self.book.meta.dump_file(os.path.join(self.book.local_path, 'metadata.yaml'))
|
mgotliboym/gitberg
|
gitenberg/make.py
|
make.py
|
py
| 2,764 |
python
|
en
|
code
| null |
github-code
|
6
|
73817284026
|
"""An ext to listen for message events and syncs them to the database."""
import discord
from discord.ext import commands
from sqlalchemy import update
from metricity.bot import Bot
from metricity.config import BotConfig
from metricity.database import async_session
from metricity.exts.event_listeners import _utils
from metricity.models import Message, User
class MessageListeners(commands.Cog):
"""Listen for message events and sync them to the database."""
def __init__(self, bot: Bot) -> None:
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message: discord.Message) -> None:
"""Add a message to the table when one is sent providing the author has accepted."""
if not message.guild:
return
if message.author.bot:
return
if message.guild.id != BotConfig.guild_id:
return
if message.type in (discord.MessageType.thread_created, discord.MessageType.auto_moderation_action):
return
await self.bot.sync_process_complete.wait()
await self.bot.channel_sync_in_progress.wait()
async with async_session() as sess:
if not await sess.get(User, str(message.author.id)):
return
cat_id = message.channel.category.id if message.channel.category else None
if cat_id in BotConfig.ignore_categories:
return
from_thread = isinstance(message.channel, discord.Thread)
await _utils.sync_message(message, sess, from_thread=from_thread)
await sess.commit()
@commands.Cog.listener()
async def on_raw_message_delete(self, message: discord.RawMessageDeleteEvent) -> None:
"""If a message is deleted and we have a record of it set the is_deleted flag."""
async with async_session() as sess:
await sess.execute(update(Message).where(Message.id == str(message.message_id)).values(is_deleted=True))
await sess.commit()
@commands.Cog.listener()
async def on_raw_bulk_message_delete(self, messages: discord.RawBulkMessageDeleteEvent) -> None:
"""If messages are deleted in bulk and we have a record of them set the is_deleted flag."""
async with async_session() as sess:
await sess.execute(update(Message).where(Message.id.in_(messages.message_ids)).values(is_deleted=True))
await sess.commit()
async def setup(bot: Bot) -> None:
"""Load the MessageListeners cog."""
await bot.add_cog(MessageListeners(bot))
|
python-discord/metricity
|
metricity/exts/event_listeners/message_listeners.py
|
message_listeners.py
|
py
| 2,561 |
python
|
en
|
code
| 39 |
github-code
|
6
|
44426526976
|
import json
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, connect_nodes_bi, connect_nodes, sync_blocks, disconnect_nodes_bi
from test_framework.key import CECKey
from test_framework.blocktools import create_block, create_coinbase
from test_framework.script import hash160, CScript, OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG, SignatureHashForkId, SIGHASH_ALL , SIGHASH_FORKID
from test_framework.mininode import CTransaction, CTxOut, CTxIn, COutPoint, ToHex
from test_framework.authproxy import JSONRPCException
class User:
def __init__(self, secret_bytes):
self.key = CECKey()
self.key.set_secretbytes(secret_bytes)
self.pubkey = self.key.get_pubkey()
def spend_to_pkh (self, node, spend_tx, n, amount, to_pubkey):
value = int(amount)
scriptPubKey = CScript([OP_DUP, OP_HASH160, hash160(to_pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
tx = CTransaction()
assert (n < len(spend_tx.vout))
tx.vin.append(CTxIn(COutPoint(spend_tx.sha256, n), b"", 0xffffffff))
tx.vout.append(CTxOut(value, scriptPubKey))
tx.calc_sha256()
self.__sign_tx(tx, spend_tx, n)
tx.rehash()
node.sendrawtransaction(ToHex(tx), False, True)
if False: # if we want to get the tx as json formatted output for debugging
tx_json = node.decoderawtransaction(ToHex(tx))
for output in tx_json['vout']:
output['value'] = float(output['value'])
text = json.dumps(tx_json, indent=4)
print("ds transaction:", text)
return tx
def __sign_tx(self, sign_tx, spend_tx, n):
sighash = SignatureHashForkId( spend_tx.vout[n].scriptPubKey, sign_tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue )
sign_tx.vin[0].scriptSig = CScript([self.key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])), self.pubkey ])
class CompetingChainsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.nodeargs = ["-txindex=1", "-disablesafemode=0", "-debug=1"]
self.extra_args = [self.nodeargs, self.nodeargs]
self.nbDoubleSpends = 3
self.lenChain0 = 8 # more than SAFE_MODE_MAX_VALID_FORK_LENGTH 7
self.lenChain1 = 18 # less than SAFE_MODE_MAX_VALID_FORK_DISTANCE (72)
self.FORK_ROOT_HEIGHT = 200
def setup_network(self):
self.setup_nodes()
def make_coinbase(self, conn_rpc):
tip = conn_rpc.getblock(conn_rpc.getbestblockhash())
coinbase_tx = create_coinbase(tip["height"] + 1)
block = create_block(int(tip["hash"], 16), coinbase_tx, tip["time"] + 1)
block.solve()
conn_rpc.submitblock(ToHex(block))
return coinbase_tx
def send_funds_to_attacker (self, node, attacker, coinbase_tx):
funding_amount = int(coinbase_tx.vout[0].nValue / self.nbDoubleSpends)
funding_tx = CTransaction()
funding_tx.vin.append(CTxIn(COutPoint(coinbase_tx.sha256, 0), b"", 0xffffffff))
scriptPubKey = CScript([OP_DUP, OP_HASH160, hash160(attacker.pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
for i in range(self.nbDoubleSpends):
funding_tx.vout.append(CTxOut(funding_amount, scriptPubKey))
funding_tx.rehash()
funding_txid = node.sendrawtransaction(ToHex(funding_tx), False, True)
assert_equal(node.getrawmempool(), [funding_txid])
return funding_tx
def contains_double_spends (self):
spent_inputs = set([])
seen_transactions = []
ds_counter = 0
for node in self.nodes:
for height in range(node.getblockcount() + 1):
blockhash = node.getblockhash(height)
block = node.getblock(blockhash, 2)
for txraw in block['tx']:
if txraw['txid'] in seen_transactions:
continue
else:
seen_transactions.append(txraw['txid'])
for i in txraw['vin']:
if 'coinbase' in i:
continue
new_element = (i['txid'], i['vout'])
if new_element in spent_inputs:
ds_counter += 1
else:
spent_inputs.add(new_element)
return ds_counter
def run_test(self):
# Test 1:
# 1. fund an attacker for the test on node0
# 2. progress to block height 200
# 3. sync all nodes
# 4. disconnect the two nodes forking at block height 200
# 5. spend attackers fund in node0 and double spend them in node1
# 6. Assert that the two chains actually contain the attackers double-spends
attacker = User(b"horsebattery")
friend0_of_attacker = User(b"fatstack")
friend1_of_attacker = User(b"fatheap")
node0 = self.nodes[0] # victim node
node1 = self.nodes[1] # node under control of attacker
self.log.info("fund attacker. We fund him at height 200 -2")
self.log.info("just for debugging convenience. We plan to fork at height 200")
coinbase_tx = self.make_coinbase(node0)
node0.generate(self.FORK_ROOT_HEIGHT - 2)
assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT - 1)
self.log.info("fund attacker")
funding_tx = self.send_funds_to_attacker (node0, attacker, coinbase_tx)
node0.generate(1)
assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT + 0)
self.log.info("sync nodes. All nodes have the same chain and funding transactions after syncing")
connect_nodes_bi(self.nodes, 0, 1)
sync_blocks(self.nodes)
disconnect_nodes_bi(self.nodes, 0, 1)
# fork from here
assert (node0.getblockcount() == node1.getblockcount())
self.log.info("spends attackers funds in node0")
for i in range(self.nbDoubleSpends):
attacker.spend_to_pkh(node0, funding_tx, i, funding_tx.vout[i].nValue, friend0_of_attacker.pubkey)
node0.generate(1)
assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT + 1)
self.log.info("double spend attacker funds in node1")
for i in range(self.nbDoubleSpends):
attacker.spend_to_pkh(node1, funding_tx, i, funding_tx.vout[i].nValue, friend1_of_attacker.pubkey)
node1.generate(1)
first_bad_block = node1.getbestblockhash()
assert (node1.getblockcount() == self.FORK_ROOT_HEIGHT + 1)
self.log.info("check that funds have been double spent to different addresses")
assert(self.contains_double_spends () == self.nbDoubleSpends)
# Test 2.
# 1. Progress the two competing chains in node0 and node1 to different lengths (configurable).
# node1 shall hold the longer chain and is the one controlled by the attacker.
# The two nodes are not connected to each other directly or indirectly and at this point
# contain the doulbe-spends we have prapared.
# 2. connect the nodes and sync them to force a reorg
# 3. Assert that all double-spends disappeared - which nontheless means the attack succeeded.
assert(self.lenChain0 <= self.lenChain1)
self.log.info("Mine lenChain0 blocks on node0")
node0.generate(self.lenChain0 - 1)
assert(node0.getblockcount() == self.FORK_ROOT_HEIGHT + self.lenChain0)
self.log.info("Mine competing lenChain1 blocks on node1")
node1.generate(self.lenChain1 - 1)
assert(node1.getblockcount() == self.FORK_ROOT_HEIGHT + self.lenChain1)
self.log.info("Connect nodes to force a reorg")
connect_nodes(self.nodes, 1, 0)
sync_blocks(self.nodes[0:2])
if self.lenChain1 > self.lenChain0:
assert(node0.getblockcount() == self.FORK_ROOT_HEIGHT + self.lenChain1)
else:
assert(node1.getblockcount() == self.FORK_ROOT_HEIGHT + self.lenChain0)
self.log.info("check that both nodes have the same chains")
lastblock0 = node0.getbestblockhash()
lastblock1 = node1.getbestblockhash()
assert(lastblock0 == lastblock1)
self.log.info("check that double-spends have been removed")
assert (self.contains_double_spends () == 0)
# Test 3: Assert that safemode has been reached
try:
node0.rpc.getbalance()
assert False, "Should not come to here, should raise exception in line above."
except JSONRPCException as e:
assert e.error["message"] == "Safe mode: Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues. A large valid fork has been detected."
# Test 4: Assert that safemode is exited if the offending chain is invalidated
node0.invalidateblock(first_bad_block)
node0.ignoresafemodeforblock(first_bad_block)
balance = node0.rpc.getbalance()
assert (balance != None)
if __name__ == '__main__':
CompetingChainsTest().main()
|
bitcoin-sv/bitcoin-sv
|
test/functional/bsv-block-ds-attack.py
|
bsv-block-ds-attack.py
|
py
| 9,229 |
python
|
en
|
code
| 597 |
github-code
|
6
|
71261714748
|
# -*- coding: utf-8 -*-
__all__ = ('tianshou_imitation_policy',)
from utils.vec_data import VecData
from torch import nn
import torch
import gym
import numpy as np
from tianshou.data import Batch, to_torch
class tianshou_imitation_policy(nn.Module):
def __init__(self, network, lr, weight_decay, mode='pi'):
assert mode in ['pi', 'q', 'v']
super().__init__()
self._grad_step = 0
self.observation_space = gym.spaces.Box(0, 1, shape=VecData.state_shape[1:], dtype=np.bool)
self.mode = mode
self.action_space = gym.spaces.Discrete(VecData.action_shape[1])
self.network = network
self.device = 'cpu'
weight_decay_list = (param for name, param in network.named_parameters() if name[-4:] != 'bias' and "bn" not in name)
no_decay_list = (param for name, param in network.named_parameters() if name[-4:] == 'bias' or "bn" in name)
assert no_decay_list
parameters = [{'params': weight_decay_list},
{'params': no_decay_list, 'weight_decay': 0.}]
self.optim = torch.optim.Adam(parameters, lr=lr, weight_decay=weight_decay)
for m in network.modules():
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
def to(self, device):
self.device = device
return super().to(device)
def load(self, path):
state_dict = torch.load(path, map_location=self.device)
self.load_state_dict(state_dict)
def forward(self, batch, state=None, mask=None):
if mask is None:
return Batch(act=batch.obs.gt_action, state=state)
logits = self.network(batch)
return logits + (logits.min() - logits.max() - 20) * mask
def post_process_fn(self, batch, buffer, indices):
if hasattr(buffer, "update_weight") and hasattr(batch, "weight"):
buffer.update_weight(indices, batch.weight)
def update(self, sample_size, buffer, val=False):
batch, indices = buffer.sample(sample_size)
if type(batch) is dict:
batch = Batch(obs=batch)
obs = to_torch(batch.obs.obs, device=self.device).float().view(-1, 145, 4, 9)
mask = (~to_torch(batch.obs.mask, device=self.device)).float()
gt_action = to_torch(batch.obs.gt_action, device=self.device).long()
rew = to_torch(batch.obs.rew, device=self.device).float()
losses = []
if self.mode == 'pi':
if val:
logits = self(obs, mask=mask)
loss = nn.CrossEntropyLoss()(logits, gt_action)
losses.append(loss.item())
else:
for i in range(1):
self.optim.zero_grad()
logits = self(obs, mask=mask)
loss = nn.CrossEntropyLoss()(logits, gt_action)
loss.backward()
self.optim.step()
losses.append(loss.item())
self.post_process_fn(batch, buffer, indices)
return {("val-loss" if val else "loss"): losses, "eq-ratio": (logits.detach().argmax(dim=-1) == gt_action).float().mean().item()}
elif self.mode == 'q':
norm_rew = (-0.2 * (rew + rew.mean() * 3)).exp()
norm_rew = 0.4 * norm_rew / (1 + norm_rew).pow(2)
if val:
with torch.no_grad():
logits = self(obs, mask=mask).squeeze(1)
logit = torch.gather(logits.log_softmax(dim=-1), 1, gt_action.unsqueeze(1)).squeeze(0)
loss = (-logit.exp() * norm_rew).mean()
losses.append(loss.item())
else:
for i in range(1):
self.optim.zero_grad()
logits = self(obs, mask=mask).squeeze(1)
logit = torch.gather(logits.log_softmax(dim=-1), 1, gt_action.unsqueeze(1)).squeeze(0)
loss = (-logit.exp() * norm_rew).mean()
loss.backward()
self.optim.step()
losses.append(loss.item())
self.post_process_fn(batch, buffer, indices)
return {("val-loss" if val else "loss"): losses, "eq-ratio": (logits.detach().argmax(dim=-1) == gt_action).float().mean().item()}
elif self.mode == 'v':
# rew = rew * 0.1
# rew = rew.sgn()
if val:
with torch.no_grad():
logits = self.network(obs)
category = torch.empty_like(rew).long()
category[:] = 4
category[rew < 50] = 3
category[rew < 32] = 2
category[rew < 0] = 1
category[rew < -8] = 0
correct_ratio = (logits.argmax(dim=-1) == category).float().mean()
win_ratio = (logits.argmax(dim=-1)[category > 2] == category[category > 2]).float().mean()
loss = nn.CrossEntropyLoss()(logits, category)
# loss = (logits.squeeze(1) - rew * 0.1).pow(2).mean()
losses.append(loss.item())
else:
for i in range(1):
if self._grad_step % 5 == 0:
self.optim.zero_grad()
logits = self.network(obs)
category = torch.empty_like(rew).long()
category[:] = 4
category[rew < 50] = 3
category[rew < 32] = 2
category[rew < 0] = 1
category[rew < -8] = 0
correct_ratio = (logits.argmax(dim=-1) == category).float().mean()
win_ratio = (logits.argmax(dim=-1)[category > 2] == category[category > 2]).float().mean()
loss = nn.CrossEntropyLoss()(logits, category)
# loss = (logits.squeeze(1) - rew * 0.1).pow(2).mean()
loss.backward()
losses.append(loss.item())
self.post_process_fn(batch, buffer, indices)
self._grad_step += 1
if self._grad_step % 5 == 0:
self.optim.step()
# return {("val-loss" if val else "loss"): losses}
return {("val-loss" if val else "loss"): losses, "cr": [correct_ratio.item()] * 10, "wr": [win_ratio.item()] * 10}
def map_action(self, action):
return action
|
illusive-chase/ChineseStandardMahjong
|
learning/imitation.py
|
imitation.py
|
py
| 6,685 |
python
|
en
|
code
| 3 |
github-code
|
6
|
913555112
|
from coc import utils
from datetime import datetime
from discord.ext import commands, tasks
class DatabaseBackground(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.update.start()
def cog_unload(self):
self.update.cancel()
@commands.command(name="add_user")
async def add_user(self, ctx, player_tag):
"""Command is used to register a user to the database"""
player_tag = utils.correct_tag(player_tag)
player = await self.bot.coc.get_player(player_tag)
self.bot.dbconn.register_user((player.tag, player.name, player.town_hall, ))
await ctx.send(f"User added: {player.name}")
@tasks.loop(minutes=3.0)
async def update(self):
"""This method updates the database every 3 minutes"""
tags = self.bot.dbconn.get_players()
tag_list = [tag[0] for tag in tags]
async for player in self.bot.coc.get_players(tag_list):
self.bot.dbconn.update_donation((datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
player.tag,
player.get_achievement("Friend in Need").value))
@update.before_loop
async def before_update(self):
"""This method prevents the task from running before the bot is connected"""
await self.bot.wait_until_ready()
def setup(bot):
bot.add_cog(DatabaseBackground(bot))
|
wpmjones/coc_sample_bot
|
cogs/database_bg.py
|
database_bg.py
|
py
| 1,440 |
python
|
en
|
code
| 14 |
github-code
|
6
|
33831413289
|
from typing import List
def two_sum(lis: List[int], target: int):
dici = {}
for i, value in enumerate(lis):
objetive = target - value
if objetive in dici:
return [dici[objetive], i]
dici[value] = i
return []
print(two_sum([1, 2, 3, 4, 5, 6], 7))
|
R0bertWell/interview_questions
|
reexercises/two_sum_target.py
|
two_sum_target.py
|
py
| 298 |
python
|
en
|
code
| 0 |
github-code
|
6
|
60098854
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import mv
class HPPs_dispersion():
def __init__(self,filename):
self.name = filename
def ReadData(self):
data_file_y = os.path.join(fr'./dispersion/y/n{self.name}.txt')
data_y = pd.read_csv(data_file_y, sep='\s+', header=None)
data_file_z = os.path.join(fr'./dispersion/z/n{self.name}.txt')
data_z = pd.read_csv(data_file_z, sep='\s+', header=None)
# print(data[0])
return data_y,data_z
def Figure(self):
data_y,data_z = HPPs_dispersion.ReadData(self)
# print(data_z)
plt.figure(dpi=200)
# print(data_y)
plt.title(f'{self.name}_dispersion')
# plt.style.use('seaborn-whitegrid')
plt.scatter(data_y[0], data_y[1],s=3,color='b',label='x-y direction')
plt.scatter(data_z[0], data_z[1],s=3,color='r',label='z direction')
#平滑化处理
# x1_smooth = np.linspace(data_y[0].min(),data_y[0].max())
# y1_smooth = make_interp_spline(data_y[0],data_y[1],x1_smooth)
# plt.plot(x1_smooth,y1_smooth)
#legend
plt.xlabel('q(1/m)')
plt.ylabel('frequency(Hz)')
# Process(target=HPPs_dispersion.Figure(self)).start()
plt.legend()
# plt.show()
# plt.savefig(fr'./PNG/dispersion/{self.name}_dispersion_1e+8.png', dpi=200)
def savefig(self):
plt.savefig(fr'./PNG/dispersion/n{self.name}n.png', dpi=500)
print(fr'{self.name}'+" is saved")
# list = ['BN','BP','AlN','AlP']
list = ['BP','AlN','AlP','BN']
# for item in list:
# a1 =HPPs_dispersion(item)
# a1.ReadData()
# a1.Figure()
# a1.savefig()
# filename = 'BN'
for item in list:
# direction = 'z'
# srcfile_y = f'E:\py_project\Linux_connect\dispersion\bulk\{item}\y\dispersion.txt'
# dstfile_y = f'E:\py_project\Linux_connect\HPPP_plt\dispersion\y/{item}_bulk.txt'
# mv.mycopyfile(srcfile_y,dstfile_y)
# srcfile_z = f'E:\py_project\Linux_connect\dispersion\bulk\{item}\z\dispersion.txt'
# dstfile_z = f'E:\py_project\Linux_connect\HPPP_plt\dispersion\z\{item}_bulk.txt'
# mv.mycopyfile(srcfile_z,dstfile_z)
# plt.figure(dpi=200)
a1 =HPPs_dispersion(item)
a1.ReadData()
a1.Figure()
a1.savefig()
|
foreseefy/HPPP
|
HPPs_dispersion.py
|
HPPs_dispersion.py
|
py
| 2,318 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.