seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
โ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
33390551810
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import gzip
from collections import defaultdict
import math
import scipy.optimize
import numpy
import string
import random
from sklearn import linear_model
import sklearn
# In[2]:
# This will suppress any warnings, comment out if you'd like to preserve them
import warnings
warnings.filterwarnings("ignore")
# In[3]:
# Check formatting of submissions
def assertFloat(x):
assert type(float(x)) == float
def assertFloatList(items, N):
assert len(items) == N
assert [type(float(x)) for x in items] == [float]*N
# In[4]:
answers = {}
# In[5]:
f = open("spoilers.json.gz", 'r')
# In[6]:
dataset = []
for l in f:
d = eval(l)
dataset.append(d)
# In[7]:
f.close()
# In[8]:
# A few utility data structures
reviewsPerUser = defaultdict(list)
reviewsPerItem = defaultdict(list)
for d in dataset:
u,i = d['user_id'],d['book_id']
reviewsPerUser[u].append(d)
reviewsPerItem[i].append(d)
# Sort reviews per user by timestamp
for u in reviewsPerUser:
reviewsPerUser[u].sort(key=lambda x: x['timestamp'])
# Same for reviews per item
for i in reviewsPerItem:
reviewsPerItem[i].sort(key=lambda x: x['timestamp'])
# In[9]:
# E.g. reviews for this user are sorted from earliest to most recent
[d['timestamp'] for d in reviewsPerUser['b0d7e561ca59e313b728dc30a5b1862e']]
# In[10]:
### 1
# In[11]:
def MSE(y, ypred):
return sum([(a-b)**2 for (a,b) in zip(y,ypred)]) / len(y)
# In[12]:
# (a)
y = []
y_pred = []
for u in reviewsPerUser:
cur = []
reviews = reviewsPerUser[u]
for i in range(0, len(reviews) - 1):
cur.append(reviews[i]['rating'])
if len(cur) == 0:
continue
y_pred.append(sum(cur)/len(cur))
y.append(reviews[-1]['rating'])
answers['Q1a'] = MSE(y, y_pred)
assertFloat(answers['Q1a'])
# In[13]:
# (b)
y = []
y_pred = []
for u in reviewsPerItem:
cur = []
reviews = reviewsPerItem[u]
for i in range(0, len(reviews) - 1):
cur.append(reviews[i]['rating'])
if len(cur) == 0:
continue
y_pred.append(sum(cur)/len(cur))
y.append(reviews[-1]['rating'])
answers['Q1b'] = MSE(y, y_pred)
assertFloat(answers['Q1b'])
# In[14]:
### 2
answers['Q2'] = []
for N in [1,2,3]:
y = []
y_pred = []
for u in reviewsPerUser:
cur = []
reviews = reviewsPerUser[u]
for i in range(0, len(reviews) - 1):
cur.append(reviews[i]['rating'])
if len(cur) == 0:
continue
if len(cur) < N:
cur_new = cur
if len(cur) >= N:
cur_new = cur[-N:]
y_pred.append(sum(cur_new)/len(cur_new))
y.append(reviews[-1]['rating'])
answers['Q2'].append(MSE(y,y_pred))
# In[15]:
assertFloatList(answers['Q2'], 3)
# In[16]:
answers
# In[17]:
### 3a
# In[18]:
def feature3(N, u): # For a user u and a window size of N
cur = []
reviews = reviewsPerUser[u]
for i in range(0, len(reviews) - 1):
cur.append(reviews[i]['rating'])
feat = [1]
for n in range(1, N + 1):
feat.append(cur[-n])
return feat
# In[19]:
answers['Q3a'] = [feature3(2,dataset[0]['user_id']), feature3(3,dataset[0]['user_id'])]
# In[20]:
assert len(answers['Q3a']) == 2
assert len(answers['Q3a'][0]) == 3
assert len(answers['Q3a'][1]) == 4
# In[21]:
### 3b
answers['Q3b'] = []
def feat(N, u):
feat = [1]
data = reviewsPerUser[u]
for d in data[-N-1:-1]:
feat.append(d['rating'])
return feat
for N in [1,2,3]:
X = []
y = []
for u,data in reviewsPerUser.items():
if len(data) <= N:
continue
else:
X.append(feat(N,u))
y.append(data[-1]['rating'])
model = sklearn.linear_model.LinearRegression(fit_intercept=False)
model.fit(X, y)
y_pred = model.predict(X)
mse = MSE(y, y_pred)
answers['Q3b'].append(mse)
assertFloatList(answers['Q3b'], 3)
answers
# In[22]:
### 4a
globalAverage = [d['rating'] for d in dataset]
globalAverage = sum(globalAverage) / len(globalAverage)
def featureMeanValue(N, u): # For a user u and a window size of N
feat = [1]
data = reviewsPerUser[u]
if len(data) < N + 1:
if len(data) < 2:
for j in range(N):
feat.append(globalAverage)
elif len(data) >= 2:
rate = [review['rating'] for review in data[:-1]]
avg = sum(rate)/len(rate)
for i in range(len(data)-1):
feat.append(data[-i-2]['rating'])
for i in range(N-len(data)+1):
feat.append(avg)
else:
for i in range(N):
feat.append(data[-i-2]['rating'])
return feat
def featureMissingValue(N, u):
feat = [1]
data = reviewsPerUser[u]
if len(data) < N + 1:
if len(data) < 2:
for j in range(N):
feat.append(1)
feat.append(0)
elif len(data) >= 2:
for i in range(len(data)-1):
feat.append(0)
feat.append(data[- i - 2]['rating'])
for i in range(N + 1-len(data)):
feat.append(1)
feat.append(0)
else:
for i in range(N):
feat.append(0)
feat.append(data[-i-2]['rating'])
return feat
answers['Q4a'] = [featureMeanValue(10, dataset[0]['user_id']), featureMissingValue(10, dataset[0]['user_id'])]
answers
# In[23]:
answers['Q4b'] = []
for featFunc in [featureMeanValue, featureMissingValue]:
X = []
y = []
for user,rating in reviewsPerUser.items():
if len(rating) < 1:
continue
else:
X.append(featFunc(10,user))
y.append(rating[-1]['rating'])
model = linear_model.LinearRegression()
model.fit(X,y)
y_pred = model.predict(X)
mse = MSE(y, y_pred)
answers['Q4b'].append(mse)
# In[24]:
answers['Q4b']
# In[25]:
### 5
#(a)
def feature5(sentence):
feat = [1]
feat.append(len(sentence))
feat.append(sentence.count('!')) # Quadratic term
feat.append(sum(i.isupper() for i in sentence))
return feat
X = []
y = []
for d in dataset:
for spoiler,sentence in d['review_sentences']:
X.append(feature5(sentence))
y.append(spoiler)
# In[26]:
answers['Q5a'] = X[0]
# In[27]:
###5(b)
mod = sklearn.linear_model.LogisticRegression( class_weight='balanced', C=1)
mod.fit(X,y)
predictions = mod.predict(X)
TP = sum([(p and l) for (p,l) in zip(predictions, y)])
FP = sum([(p and not l) for (p,l) in zip(predictions, y)])
TN = sum([(not p and not l) for (p,l) in zip(predictions, y)])
FN = sum([(not p and l) for (p,l) in zip(predictions, y)])
TPR = TP / (TP + FN)
TNR = TN / (TN + FP)
BER = 1 - 1/2 * (TPR + TNR)
answers['Q5b'] = [TP, TN, FP, FN, BER]
# In[28]:
assert len(answers['Q5a']) == 4
assertFloatList(answers['Q5b'], 5)
# In[29]:
### 6
def feature6(review):
review = review['review_sentences']
feat = [1]
for i in range(0, 5):
feat.append(review[i][0])
feat.append(len(review[5][1]))
feat.append(review[5].count('!')) # Quadratic term
feat.append(sum(i.isupper() for i in review[5][1]))
return feat
# In[30]:
y = []
X = []
for d in dataset:
sentences = d['review_sentences']
if len(sentences) < 6: continue
X.append(feature6(d))
y.append(sentences[5][0])
# In[31]:
answers['Q6a'] = feature6(dataset[0])
answers
# In[32]:
answers['Q6a'] = X[0]
answers
# In[33]:
mod = sklearn.linear_model.LogisticRegression(class_weight='balanced', C = 1)
mod.fit(X,y)
predictions = mod.predict(X)
TP = sum([(p and l) for (p,l) in zip(predictions, y)])
FP = sum([(p and not l) for (p,l) in zip(predictions, y)])
TN = sum([(not p and not l) for (p,l) in zip(predictions, y)])
FN = sum([(not p and l) for (p,l) in zip(predictions, y)])
TPR = TP / (TP + FN)
TNR = TN / (TN + FP)
BER = 1 - 1/2 * (TPR + TNR)
answers['Q6b'] = BER
# In[34]:
assert len(answers['Q6a']) == 9
assertFloat(answers['Q6b'])
answers
# In[35]:
### 7
# In[36]:
# 50/25/25% train/valid/test split
Xtrain, Xvalid, Xtest = X[:len(X)//2], X[len(X)//2:(3*len(X))//4], X[(3*len(X))//4:]
ytrain, yvalid, ytest = y[:len(X)//2], y[len(X)//2:(3*len(X))//4], y[(3*len(X))//4:]
# In[37]:
def pipeline(reg, bers, BER_test):
mod = linear_model.LogisticRegression(class_weight='balanced', C=reg)
# 50/25/25% train/valid/test split
Xtrain, Xvalid, Xtest = X[:len(X)//2], X[len(X)//2:(3*len(X))//4], X[(3*len(X))//4:]
ytrain, yvalid, ytest = y[:len(X)//2], y[len(X)//2:(3*len(X))//4], y[(3*len(X))//4:]
mod.fit(Xtrain,ytrain)
ypredValid = mod.predict(Xvalid)
ypredTest = mod.predict(Xtest)
# validation
TP = sum([(a and b) for (a,b) in zip(yvalid, ypredValid)])
TN = sum([(not a and not b) for (a,b) in zip(yvalid, ypredValid)])
FP = sum([(not a and b) for (a,b) in zip(yvalid, ypredValid)])
FN = sum([(a and not b) for (a,b) in zip(yvalid, ypredValid)])
TPR = TP / (TP + FN)
TNR = TN / (TN + FP)
BER = 1 - 0.5*(TPR + TNR)
print("C = " + str(reg) + "; validation BER = " + str(BER))
bers = bers.append(BER)
# test
TP = sum([(a and b) for (a,b) in zip(ytest, ypredTest)])
TN = sum([(not a and not b) for (a,b) in zip(ytest, ypredTest)])
FP = sum([(not a and b) for (a,b) in zip(ytest, ypredTest)])
FN = sum([(a and not b) for (a,b) in zip(ytest, ypredTest)])
TPR = TP / (TP + FN)
TNR = TN / (TN + FP)
BER = 1 - 0.5*(TPR + TNR)
BER_test = BER_test.append(BER)
return mod
# In[38]:
bers = []
BER_test = []
for c in [0.01, 0.1, 1, 10, 100]:
pipeline(c, bers, BER_test)
bers
BER_test
# In[39]:
bestC = 0.1
ber = 0.21299572460563176
answers['Q7'] = bers + [bestC] + [ber]
assertFloatList(answers['Q7'], 7)
answers
# In[40]:
### 8
def Jaccard(s1, s2):
numer = len(s1.intersection(s2))
denom = len(s1.union(s2))
if denom == 0:
return 0
return numer / denom
# In[41]:
# 75/25% train/test split
dataTrain = dataset[:15000]
dataTest = dataset[15000:]
# In[42]:
# A few utilities
itemAverages = defaultdict(list)
ratingMean = []
for d in dataTrain:
itemAverages[d['book_id']].append(d['rating'])
ratingMean.append(d['rating'])
for i in itemAverages:
itemAverages[i] = sum(itemAverages[i]) / len(itemAverages[i])
ratingMean = sum(ratingMean) / len(ratingMean)
# In[43]:
reviewsPerUser = defaultdict(list)
usersPerItem = defaultdict(set)
for d in dataTrain:
u,i = d['user_id'], d['book_id']
reviewsPerUser[u].append(d)
usersPerItem[i].add(u)
# In[44]:
# From my HW2 solution, welcome to reuse
def predictRating(user,item):
ratings = []
similarities = []
for d in reviewsPerUser[user]:
i2 = d['book_id']
if i2 == item: continue
ratings.append(d['rating'] - itemAverages[i2])
similarities.append(Jaccard(usersPerItem[item],usersPerItem[i2]))
if (sum(similarities) > 0):
weightedRatings = [(x*y) for x,y in zip(ratings,similarities)]
return itemAverages[item] + sum(weightedRatings) / sum(similarities)
else:
# User hasn't rated any similar items
if item in itemAverages:
return itemAverages[item]
else:
return ratingMean
# In[45]:
predictions = [predictRating(d['user_id'], d['book_id']) for d in dataTest]
labels = [d['rating'] for d in dataTest]
# In[46]:
answers["Q8"] = MSE(predictions, labels)
assertFloat(answers["Q8"])
# In[ ]:
# In[56]:
### 9
item = [d['book_id'] for d in dataTrain]
data0, rating0 = [], []
for d in dataTest:
num = item.count(d['book_id'])
if num == 0:
data0.append([d['user_id'], d['book_id']])
rating0.append(d['rating'])
pred0 = [predictRating(u, i) for u, i in data0]
mse0 = MSE(pred0, rating0)
mse0
# In[57]:
data1, rating1 = [],[]
for d in dataTest:
num = item.count(d['book_id'])
if 1 <= num <= 5:
data1.append([d['user_id'], d['book_id']])
rating1.append(d['rating'])
pred1 = [predictRating(u, i) for u, i in data1]
mse1to5= MSE(pred1, rating1)
mse1to5
# In[58]:
data5, rating5 = [], []
for d in dataTest:
num = item.count(d['book_id'])
if num > 5:
data5.append([d['user_id'], d['book_id']])
rating5.append(d['rating'])
pred5 = [predictRating(u, i) for u, i in data5]
mse5 = MSE(pred5, rating5)
mse5
# In[ ]:
# In[50]:
answers["Q9"] = [mse0, mse1to5, mse5]
assertFloatList(answers["Q9"], 3)
answers
# In[51]:
### 10
# In[52]:
userAverages = defaultdict(list)
for d in dataTrain:
userAverages[d['user_id']].append(d['rating'])
for i in userAverages:
userAverages[i] = sum(userAverages[i]) / len(userAverages[i])
def predictRating(user,item):
ratings = []
similarities = []
for d in reviewsPerUser[user]:
i2 = d['book_id']
if i2 == item: continue
ratings.append(d['rating'] - itemAverages[i2])
similarities.append(Jaccard(usersPerItem[item],usersPerItem[i2]))
if (sum(similarities) > 0):
weightedRatings = [(x*y) for x,y in zip(ratings,similarities)]
return itemAverages[item] + sum(weightedRatings) / sum(similarities)
else:
# User hasn't rated any similar items
if item in itemAverages:
return itemAverages[item]
else:
# return RatingMean
if user in userAverages:
return userAverages[user]
else:
return ratingMean
item = [d['book_id'] for d in dataTrain]
data10, rating10 = [], []
for d in dataTest:
num = item.count(d['book_id'])
if num == 0:
data10.append([d['user_id'], d['book_id']])
rating10.append(d['rating'])
pred10 = [predictRating(u, i) for u, i in data10]
mse10 = MSE(pred10, rating10)
mse10
# In[59]:
answers["Q10"] = ("To improve the prediction function for unseen items, we can modify the predictRating function. Since previously the predictRating only use itemAverages for prediction function, we can add the userAverage to specify the condition and make mse smaller, inside of just categorize data into ratingMean. We can see that the mse become smaller for unseen data.", mse10)
assert type(answers["Q10"][0]) == str
assertFloat(answers["Q10"][1])
# In[60]:
answers
# In[55]:
f = open("answers_midterm.txt", 'w')
f.write(str(answers) + '\n')
f.close()
|
vivianchen04/Master-Projects
|
WebMining&RecommenderSystems/midterm.py
|
midterm.py
|
py
| 14,655 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32170276466
|
#
# demo.py
#
import argparse
import os
import numpy as np
import time
from modeling.deeplab import *
from dataloaders import custom_transforms as tr
from PIL import Image
from torchvision import transforms
from dataloaders.utils import *
from torchvision.utils import make_grid, save_image
torch.set_printoptions(profile="full")
def main():
parser = argparse.ArgumentParser(description="PyTorch DeeplabV3Plus Training")
parser.add_argument('--in-path', type=str, default=r'D:\PT\archive\Testingset10class\dataset\็ปๆ\test_96_label',
help='image to test')
parser.add_argument('--out-path', type=str, default=r'D:\PT\archive\Testingset10class\dataset\็ปๆ\96', help='mask image to save')
parser.add_argument('--backbone', type=str, default='mobilenet',
choices=['resnet', 'xception', 'drn', 'mobilenet'],
help='backbone name (default: resnet)')
parser.add_argument('--ckpt', type=str, default=r'D:\PT\่ถ
ๅ่พจ็่ฏญไนๅๅฒ\ๆจกๅไฟๅญ\10_class\dsrl\128/model_best.pth.tar',
help='saved model')
parser.add_argument('--out-stride', type=int, default=16,
help='network output stride (default: 8)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--dataset', type=str, default='rockdataset',
choices=['pascal', 'coco', 'cityscapes', 'rockdataset'],
help='dataset name (default: pascal)')
parser.add_argument('--crop-size', type=int, default=96,
help='crop image size')
parser.add_argument('--num_classes', type=int, default=11,
help='crop image size')
parser.add_argument('--sync-bn', type=bool, default=None,
help='whether to use sync bn (default: auto)')
parser.add_argument('--freeze-bn', type=bool, default=False,
help='whether to freeze bn parameters (default: False)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
try:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
except ValueError:
raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')
if args.sync_bn is None:
if args.cuda and len(args.gpu_ids) > 1:
args.sync_bn = True
else:
args.sync_bn = False
composed_transforms = transforms.Compose([
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
for name in os.listdir(args.in_path):
image = Image.open(args.in_path + "/" + name).convert('RGB')
# image = Image.open(args.in_path).convert('RGB')
target = Image.open(args.in_path + "/" + name)
sample = {'image': image, 'label': target}
tensor_in = composed_transforms(sample)['label'].unsqueeze(0)
print(tensor_in.shape)
grid_image = make_grid(decode_seg_map_sequence(tensor_in.detach().cpu().numpy()),
3, normalize=False, range=(0, 255))
save_image(grid_image, args.out_path + "/" + "{}_label.png".format(name[0:-4]))
# save_image(grid_image, args.out_path)
# print("type(grid) is: ", type(grid_image))
# print("grid_image.shape is: ", grid_image.shape)
print("image save in in_path.")
if __name__ == "__main__":
main()
# python demo.py --in-path your_file --out-path your_dst_file
|
AlisitaWeb/SSRN
|
ceshi_label.py
|
ceshi_label.py
|
py
| 3,929 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38364671161
|
# ์งํฉ์ ํํ
# ํฉ์งํฉ ์ฐ์ฐ๊ณผ, ๋ ์์๊ฐ ๊ฐ์ ์งํฉ์ ํฌํจ๋์ด ์๋์ง ํ์ธ
# 0 a b ( a๊ฐ ํฌํจ๋์ด ์๋ ์งํฉ๊ณผ b๊ฐ ํฌํจ๋์ด ์๋ ์งํฉ์ ํฉ์น๋ค๋ ์๋ฏธ )
# 1 a b ( a์ b๊ฐ ๊ฐ์ ์งํฉ์ ํฌํจ๋์ด ์๋์ง ํ์ธ)
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**9)
n, m = map(int, input().split())
# parent ํ
์ด๋ธ ์๊ธฐ ์์ ์ผ๋ก ์ด๊ธฐํ
parent = [i for i in range(n+1)]
# ๋ฃจํธ ๋
ธ๋ ์ฐพ์ ๋๊น์ง ์ฌ๊ท ํธ์ถ
def find(x):
if parent[x] != x:
parent[x] = find(parent[x])
return parent[x]
# ํฉ์งํฉ ํ
์ด๋ธ
def union(x,y):
x = find(x)
y = find(y)
if x < y:
parent[y] = x
else:
parent[x] = y
for i in range(m):
num, a, b = map(int, input().split())
# 0์ด๋ฉด ํฉ์งํฉ ํธ์ถ
if num == 0:
union(a, b)
elif num == 1:
# a์ b๊ฐ ๊ฐ์ ์งํฉ์ ํฌํจ๋์ด ์์ ๋
if find(a) == find(b):
print('YES')
else:
print('NO')
|
jy9922/AlgorithmStudy
|
Baekjoon/1717๋ฒ ์งํฉ์ ํํ.py
|
1717๋ฒ ์งํฉ์ ํํ.py
|
py
| 988 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
4391450776
|
from flask import Flask, render_template, request
import sqlite3
app = Flask(__name__)
@app.route('/',methods = ['POST', 'GET'])
def home():
if request.method == 'GET':
return render_template('index.html')
@app.route('/thankyou',methods = ['POST', 'GET'])
def thankyou():
if request.method == 'GET':
return render_template('thankyou.html')
elif request.method == 'POST':
emailid = request.form.get('eid')
conn = sqlite3.connect("emailist")
cur=conn.cursor()
cur.execute("SELECT * from emails_table")
print(cur.fetchall())
#cur.execute('INSERT INTO emails_table (email) VALUES (?), ("[email protected]")')
#cur.execute("INSERT INTO movie VALUES(%s,%s)",(movID,Name))
cur.execute("INSERT INTO emails_table (email) VALUES (?)", (emailid,))
conn.commit()
conn.close()
return render_template('thankyou.html')
if __name__ == '__main__':
app.run(debug=True)
|
senthil-kumar-n/Subscribe_email
|
subscribe.py
|
subscribe.py
|
py
| 1,004 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20801798422
|
n, s = [int(i) for i in input().split()]
nums = [int(i) for i in input().split()]
f = False
i = 0
j = len(nums)-1
while i != j:
if nums[i] + nums[j] == s:
f = True
break
elif nums[i] + nums[j] > s:
j -= 1
else: i += 1
if f:
print("YES")
else: print("NO")
|
michbogos/olymp
|
eolymp/prep6/sum_of_2.py
|
sum_of_2.py
|
py
| 298 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71718726268
|
from . import Funktioner
import datetime
import MyModules.GUIclasses2 as GUI
import numpy as np
import os
from . import FileOps
#fix 14.04.10, simlk. Changed "centering error", which should make the test more forgiving at small distances - at large distances it has no effect.
# Last edit: 2012-01-09 fixed mtl test. Unified, 2012-04-23: fixed possible None return val in TestStretch
#Rejcection criteria reflects a model variance for meas. of a strectch, should correpond to a variance of half the parameter used in the test.
# Test is really - for two meas:
#par=prec=reject_par/2.....
#|diff|<2*sqrt(var_model(d,par)) - always linear in par.
#No more Centering err/ constant 'avoid zero' term!
#Thus the var-models are artificial close to zero. Instead a global min is defined (0,3 mm for now)!!!!!
GLOBAL_MIN_DEV=0.3 #twice precision on mean
def MTL_var_model_linear(dist,parameter):
dist=dist/1000.0
return (dist*parameter)**2
def MTL_var_model(dist,parameter):
dist=dist/1000.0
DLIM=0.2 #km
c_err=0 #divided by two below because 'precision' is (defined to be) half of 'reject par'
if dist<DLIM:
FKLIN=(np.sqrt(DLIM)*parameter-c_err*0.5)/DLIM
return (FKLIN*dist+c_err*0.5)**2
else:
return (parameter**2*dist)
def MGL_var_model(dist,parameter):
dist=dist/1000.0
c_err=0.0 #divided by two below because 'precision' is (defined to be) half of 'reject par'
return (np.sqrt(dist)*parameter+c_err*0.5)**2 #add a centering err....
class FBreject(object):
def __init__(self,database,program="MGL",parameter=2.0,unit="ne"):
if program=="MGL":
self.var_model=MGL_var_model
else:
if unit=="ne":
self.var_model=MTL_var_model
else:
self.var_model=MTL_var_model_linear
self.unit=unit
self.parameter=parameter
self.precision=parameter*0.5 #this is the correpsonding 'precision'
self.initialized=False
self.found=False
self.wasok=False
self.database=database
self.initialized=True
def GetData(self):
data=""
for key in list(self.database.keys()):
s=self.database[key]
data+="%s->%s: dist: %.2f m\n" %(key[0],key[1],s.dist)
for i in range(len(s.hdiffs)):
data+="dh: %.4f m tid: %s j-side: %s\n" %(s.hdiffs[i],s.times[i].isoformat().replace("T",","),s.jpages[i])
return data
def GetDatabase(self):
return self.database
def TestStretch(self,start,end,hdiff): #returns foundstretch,testresult,#found,msg
self.found=False
self.wasok=False
msg=""
key_back=(end,start)
key_forward=(start,end)
nforward=0
nback=0
hdiffs_all=np.empty((0,))
dists=[]
if key_back in self.database:
s_back=self.database[key_back]
nback+=len(s_back.hdiffs)
if nback>0:
dists.append(s_back.dist)
hdiffs_all=np.append(hdiffs_all,np.array(s_back.hdiffs)*-1.0)
if key_forward in self.database:
s_forward=self.database[key_forward]
nforward+=len(s_forward.hdiffs)
if nforward>0:
dists.append(s_forward.dist)
hdiffs_all=np.append(hdiffs_all,np.array(s_forward.hdiffs))
msg+="%s->%s er tidligere m\u00E5lt %d gang(e), og %d gang(e) i modsat retning.\n" %(start,end,nforward,nback)
nall=len(hdiffs_all)
if len(hdiffs_all)>0:
d=np.mean(dists)
norm_d=np.sqrt(d/1e3)
msg+="Afstand: %.2f m\n" %d
if len(hdiffs_all)>1:
raw_mean=np.mean(hdiffs_all)
raw_std=np.std(hdiffs_all,ddof=1)
raw_prec=raw_std/np.sqrt(len(hdiffs_all))
raw_max_diff=hdiffs_all.max()-hdiffs_all.min()
msg+="hdiff_middel: %.4f m, max-diff: %.2f mm (%.2f ne)\n" %(raw_mean,raw_max_diff*1000,raw_max_diff*1e3/norm_d)
msg+="std_dev: %.2f mm, std_dev(middel): %.2f mm (%.2f ne)\n" %(raw_std*1000,raw_prec*1000,raw_prec*1e3/norm_d)
msg+="\nEfter inds\u00E6ttelse af ny m\u00E5ling:\n"
hdiffs_new=np.append(hdiffs_all,[hdiff])
new_mean=np.mean(hdiffs_new)
new_std=np.std(hdiffs_new,ddof=1)
new_prec=new_std/np.sqrt(len(hdiffs_new))
new_max_diff=hdiffs_new.max()-hdiffs_new.min()
msg+="hdiff_middel: %.4f m, max-diff: %.2f mm (%.2f ne)\n" %(new_mean,new_max_diff*1000,new_max_diff*1e3/norm_d)
msg+="std_dev: %.2f mm, std_dev(middel): %.2f mm (%.2f ne)\n" %(new_std*1000,new_prec*1000,new_prec*1e3/norm_d)
msg+="\nForkastelsesparameter: %.3f %s." %(self.parameter,self.unit)
max_dev=self.GetMaxDev(d) #in mm!!
if len(hdiffs_new)==2:
msg+=" Vil acceptere |diff|<%.2f mm" %(2*max_dev)
isok=(new_prec*1e3<=max_dev)
self.found=True
self.wasok=isok
if isok:
msg+="\nDen samlede standardafvigelse p\u00E5 middel er OK.\n"
else:
msg+="\nDen samlede standarafvigelse p\u00E5 middel er IKKE OK\n"
msg+="Foretag flere m\u00E5linger!\n"
if len(hdiffs_all)>1 and new_prec>raw_prec: #or something more fancy
msg+="Den nye m\u00E5ling er tilsyneladende en outlier og kan evt. omm\u00E5les!\n"
isok=False
return True,isok,len(hdiffs_all),msg
else:
msg="%s->%s er ikke m\u00E5lt tidligere" %(start,end)
self.found=False
self.wasok=True
return True,True,0,msg
def GetMaxDev(self,dist): #max dev in mm!
return max(np.sqrt(self.var_model(dist,self.precision)),GLOBAL_MIN_DEV*0.5)
def InsertStretch(self,start,end,hdiff,dist,dato,tid,jside=""):
if not self.initialized:
return True #we havent done anyting
data=self.database
try:
start=start.strip()
end=end.strip()
key=(start,end)
m,h=Funktioner.GetTime(tid)
day,month,year=Funktioner.GetDate(dato)
date=datetime.datetime(year,month,day,h,m)
if key in data:
data[key].AddStretch(hdiff,dist,date,jside)
else:
data[key]=Stretch()
data[key].AddStretch(hdiff,dist,date,jside)
except Exception as msg:
print(repr(msg))
return False
else:
return True
def OutlierAnalysis(self):
data=self.database.copy()
msg=""
noutliers=0
nbad=0
keys=list(data.keys())
for key_forward in keys:
l_msg="%s->%s:" %key_forward
key_back=(key_forward[1],key_forward[0])
if not key_forward in data: #could happen since we delete stuff below
continue
s_forward=data[key_forward]
hdiffs_all=np.array(s_forward.hdiffs)
nforward=len(s_forward.hdiffs)
dists=[s_forward.dist]
nback=0
if key_back in data:
s_back=data[key_back]
nback=len(s_back.hdiffs)
if nback>0:
dists.append(s_back.dist)
hdiffs_all=np.append(hdiffs_all,np.array(s_back.hdiffs)*-1.0)
d=np.mean(dists)
l_msg+=" m\u00E5lt %d gange frem og %d gange tilbage." %(nforward,nback)
report=False
if len(hdiffs_all)>1:
std_dev=np.std(hdiffs_all,ddof=1)
m=np.mean(hdiffs_all)
#same test as above#
prec=std_dev/np.sqrt(len(hdiffs_all))
max_dev=self.GetMaxDev(d) #in mm
#print max_dev,prec
is_ok=(prec*1e3<=max_dev)
if not is_ok:
nbad+=1
report=True
l_msg+="\nForkastelseskriterie IKKE overholdt."
l_msg+="\nTilladt fejl p\u00E5 middel: %.2f mm, aktuel fejl: %.2f mm" %(max_dev,prec*1e3)
if len(hdiffs_all)>2:
dh=np.fabs(hdiffs_all-m)
outlier_limit=1.5*std_dev
if len(hdiffs_all)==3:
outlier_limit=1.1*std_dev
I=np.where(np.fabs(dh)>outlier_limit)[0]
if I.size>0:
report=True
l_msg+="\nOutliere:"
for i in I:
noutliers+=1
if i>nforward-1:
i-=nforward
s=s_back
else:
s=s_forward
l_msg+="\nHdiff: %.4f m, m\u00E5lt %s, journalside: %s" %(s.hdiffs[i],s.times[i].isoformat().replace("T"," "),s.jpages[i])
hdiffs_new=np.delete(hdiffs_all,i)
new_prec=np.std(hdiffs_new,ddof=1)/np.sqrt(len(hdiffs_new))
l_msg+="\nFejl p\u00E5 middel: %.2f mm, fejl p\u00E5 middel uden denne m\u00E5ling: %.2f mm" %(prec*1e3,new_prec*1e3)
if report:
msg+="\n"+"*"*60+"\n"+l_msg
#Finally delete that entry#
del data[key_forward]
if nback>0:
del data[key_back]
nprob=noutliers+nbad
if nprob==0:
return True,"Ingen problemer fundet"
lmsg="%*s %d\n" %(-42,"#overtr\u00E6delser af forkastelseskriterie:",nbad)
lmsg+="%*s %d\n" %(-42,"#outliere:",noutliers)
return False,lmsg+msg
def IsInitialized(self):
return self.initialized
def GetNumber(self):
return len(self.database)
def Disconnect(self):
pass
def GetPlotData(program="MGL",parameter=2.0,unit="ne"):
if program=="MGL":
var_model=MGL_var_model
else:
if unit=="ne":
var_model=MTL_var_model
else:
var_model=MTL_var_model_linear
dists=np.arange(0,1500,10)
precision=0.5*parameter #since parameter is 'reject-parameter' and we define precison as half of dat - man :-)
out=2*np.sqrt([var_model(x,precision) for x in dists])
return np.column_stack((dists,out))
def GetGlobalMinLine(program="MGL"):
dists=[0,400.0]
hs=[GLOBAL_MIN_DEV,GLOBAL_MIN_DEV]
return np.column_stack((dists,hs))
class Stretch(object):
def __init__(self):
self.hdiffs=[]
self.dist=0
self.times=[]
self.jpages=[]
def AddStretch(self,hdiff,dist,date,jpage=""):
n=float(len(self.hdiffs))+1
self.dist=self.dist*(n-1)/n+dist/n
self.hdiffs.append(hdiff)
self.times.append(date)
self.jpages.append(jpage)
def MakeRejectData(resfiles):
data=dict()
nstrk=0
nerrors=0
for file in resfiles:
heads=FileOps.Hoveder(file)
for head in heads:
try:
key=(head[0],head[1])
hdiff=float(head[5])
dist=float(head[4])
jside=head[6]
tid=head[3]
dato=head[2]
m,h=Funktioner.GetTime(tid)
day,month,year=Funktioner.GetDate(dato)
date=datetime.datetime(year,month,day,h,m)
except Exception as msg:
print(repr(msg),head)
nerrors+=1
else:
if key in data:
data[key].AddStretch(hdiff,dist,date,jside)
else:
data[key]=Stretch()
data[key].AddStretch(hdiff,dist,date,jside)
nstrk+=1
return data,nerrors
|
SDFIdk/nivprogs
|
MyModules/FBtest.py
|
FBtest.py
|
py
| 9,922 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16021983077
|
import numpy as np
import matplotlib.pyplot as plt
from cartoplot import cartoplot
import imageio
from netCDF4 import Dataset
import pickle
def get(string):
"""
"Lakes":0,
"Oceans":1,
"Okhotsk":2,
"Bering":3,
"Hudson":4,
"St Lawrence":5,
"Baffin":6,
"Greenland":7,
"Barents":8,
"Kara":9,
"Laptev":10,
"East Siberian":11,
"Chukchi":12,
"Beaufort":13,
"Canadian Archipelago":14,
"Central Arctic":15,
"Land":20,
"Coast":21}
"""
path_grid = "/home/robbie/Dropbox/Data/grid.nc"
if string == 'lon':
grid_data = Dataset(path_grid)
lon = np.array(grid_data.variables["lon"])
return(lon)
elif string == 'lat':
grid_data = Dataset(path_grid)
lat = np.array(grid_data.variables["lat"])
return(lat)
elif string == 'mask':
im = imageio.imread('J_Mask.tif')
mask = np.flipud(np.array(im))
return(mask)
def EASE():
"""
"Lakes":0,
"Oceans":1,
"Okhotsk":2,
"Bering":3,
"Hudson":4,
"St Lawrence":5,
"Baffin":6,
"Greenland":7,
"Barents":8,
"Kara":9,
"Laptev":10,
"East Siberian":11,
"Chukchi":12,
"Beaufort":13,
"Canadian Archipelago":14,
"Central Arctic":15,
"Land":20,
"Coast":21}
"""
mask = pickle.load( open( "/home/robbie/Dropbox/Code/mask_348x348.p", "rb" ) )
return(mask)
def OSISAF():
"""
"Lakes":0,
"Oceans":1,
"Okhotsk":2,
"Bering":3,
"Hudson":4,
"St Lawrence":5,
"Baffin":6,
"Greenland":7,
"Barents":8,
"Kara":9,
"Laptev":10,
"East Siberian":11,
"Chukchi":12,
"Beaufort":13,
"Canadian Archipelago":14,
"Central Arctic":15,
"Land":20,
"Coast":21}
"""
mask = pickle.load( open( "/home/robbie/custom_modules/mask_1120x760.p", "rb" ) )
return(mask)
def plot(region_string):
regions_dict = {"Lakes":0,
"Oceans":1,
"Okhotsk":2,
"Bering":3,
"Hudson":4,
"St Lawrence":5,
"Baffin":6,
"Greenland":7,
"Barents":8,
"Kara":9,
"Laptev":10,
"East Siberian":11,
"Chukchi":12,
"Beaufort":13,
"Canadian Archipelago":14,
"Central Arctic":15,
"Land":20,
"Coast":21}
code = regions_dict[region_string]
fig = plt.figure(figsize=(10, 8))
cartoplot(get('lon'), get('lat'), get('mask'),color_scale=(code+1,code-1))
print(code)
plt.show()
|
robbiemallett/custom_modules
|
mask.py
|
mask.py
|
py
| 3,596 |
python
|
en
|
code
| 3 |
github-code
|
6
|
31512983034
|
import os
import random
import string
CLIENT_SECRET = os.getenv("CLIENT_SECRET")
CLIENT_ID = os.getenv("CLIENT_ID")
SCOPE = "user-library-read playlist-modify-public playlist-modify-private ugc-image-upload"
REDIRECT_URI = "http://127.0.0.1:8080/callback" if os.getenv("LOCAL_DEV") else "https://spotify-recently-liked.herokuapp.com/callback"
AUTH_URL = "https://accounts.spotify.com/api/token"
STATE = os.getenv("STATE")
API_URL = "https://api.spotify.com/v1"
FLASK_PORT = 8080
PLAYLIST_DELETE_LIMIT = 100
DATABASE_URL = os.getenv("DATABASE_URL").replace("postgres://", "postgresql://", 1)
DEFAULT_PLAYLIST_NAME = "Recently liked"
|
rjshearme/spotify_recently_added_playlist
|
constants.py
|
constants.py
|
py
| 633 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23432185259
|
#Count the Number of Words: Write a program that counts the number of words in a string.
def count_words(string):
# Remove leading and trailing whitespace
string = string.strip()
# Split the string into words
words = string.split()
# Return the count of words
return len(words)
# User interface & Test the function
print("Word Count Program")
print("------------------")
while True:
input_string = input("Enter a string (or 'q' to quit): ")
if input_string.lower() == 'q':
break
word_count = count_words(input_string)
print("The number of words in the string is:", word_count)
print()
print("Thank you for using the Word Count Program. Goodbye!")
#new_solution:
def CountOfWord(text):
return len([i for i in text.split() if i.isalpha()])
text=input("enter string: ")
print(CountOfWord(text))
|
rezashokrzad/git_youtube_tutorial
|
Python Challenges/challenge13.py
|
challenge13.py
|
py
| 852 |
python
|
en
|
code
| 6 |
github-code
|
6
|
23944904707
|
def fib(n):
if n < 3:
return 1
else:
return fib(n - 1) + fib(n - 2)
def fast_fib(n):
if n < 3:
return 1
first = 1
second = 1
for i in range(3, n+1):
sum = first + second
first = second
second = sum
return second
|
mengruojun/pylearning
|
src/data_structure/other/other.py
|
other.py
|
py
| 291 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38217727284
|
from utils import pickle_load
from matplotlib import cm
import matplotlib.pyplot as plt
import collections
def show_results(res_paths):
results = {}
for path in res_paths:
result = pickle_load(path)
for k, v in result.items():
if k not in results.keys():
results[k] = result[k]
results = collections.OrderedDict(sorted(results.items()))
fig, ax = plt.subplots(figsize=(9, 5.5))
colors = cm.Dark2(np.linspace(0, 1, len(results)))
count = 0
for k, res in results.items():
mean, std = np.nanmean(res, axis=0), np.nanstd(res, axis=0)
# ax.errorbar(np.arange(mean.shape[0]), mean, yerr=std, color=colors[count], label=k, fmt='-o')
plt.plot(np.arange(mean.shape[0]) + 1, mean, '-o', color=colors[count], label=k)
count += 1
print(np.array_str(mean[8:], precision=3))
print("Average precision of %s for future prediction: %f" % (k, mean[8:].mean()))
# Now add the legend with some customizations.
legend = ax.legend(loc='upper right')
ax.set_xlabel("time step")
ax.set_ylabel("average precision")
plt.axvline(x=8.5, color='r', linestyle='--')
plt.text(3, 0.1, 'tracking', fontsize=18, color='grey')
plt.text(11, 0.1, 'prediction', fontsize=18, color='grey')
plt.show()
def show_best(filename, metric, k=1):
def line_to_list(line):
exclude_next_line = lambda x: x[:-1] if x.endswith('\n') else x
entries = map(exclude_next_line, line.split(','))
return entries
items = []
def print_dict(dic, attrs=None):
if attrs is None:
attrs = ['omega', 'noise_var', 'extent', metric, metric + ' mean']
if 'keep_motion' in dic and dic['keep_motion']:
attrs += ['window_size', 'initial_motion_factor', 'keep_motion_factor']
if 'blur_spatially' in dic and dic['blur_spatially']:
attrs += ['blur_extent', 'blur_var']
for k, v in dic.items():
if attrs is not None and k not in attrs:
continue
print("{}: {}".format(k, v))
with open(filename, 'r') as f:
line = f.readline()
#print(line)
attrs = line_to_list(line)
for i, line in enumerate(f):
#print(line)
values = line_to_list(line)
#print(values)
dict_ = {k: v for (k, v) in zip(attrs, values)}
items.append(dict_)
#print(items[0])
items = sorted(items, key=lambda item: item[metric + ' mean'])
if metric == 'f1_score' or metric == 'average_precision':
items = items[::-1]
for i in range(k):
print("------- {}th best ------- ".format(i+1))
print_dict(items[i])
|
stomachacheGE/bofmp
|
tracking/scripts/show_best_parameter.py
|
show_best_parameter.py
|
py
| 2,753 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1633452952
|
from __future__ import print_function
from builtins import str
from optparse import OptionParser
import sys
from opendiamond.config import DiamondConfig
from opendiamond.protocol import PORT
from opendiamond.server.server import DiamondServer
# Create option parser
# pylint: disable=invalid-name
parser = OptionParser()
attrs = set()
def add_option(*args, **kwargs):
opt = parser.add_option(*args, **kwargs)
attrs.add(opt.dest)
# Configure options
# dest should reflect attr names in DiamondConfig
add_option('-d', dest='daemonize', action='store_true', default=False,
help='Run as a daemon')
add_option('-e', metavar='SPEC',
dest='debug_filters', action='append', default=[],
help='filter name/signature to run under debugger')
add_option('-E', metavar='COMMAND',
dest='debug_command', action='store', default='valgrind',
help='debug command to use with -e (default: valgrind)')
add_option('-f', dest='path',
help='config file')
add_option('-n', dest='oneshot', action='store_true', default=False,
help='do not fork for a new connection')
add_option('-p', dest='diamondd_port', default=PORT, help='accept new clients on port')
def run():
opts, args = parser.parse_args()
if args:
parser.error('unrecognized command-line arguments')
# Calculate DiamondConfig arguments
kwargs = dict([(attr, getattr(opts, attr)) for attr in attrs])
# If we are debugging, force single-threaded filter execution
if kwargs['debug_filters']:
kwargs['threads'] = 1
# Create config object and server
try:
config = DiamondConfig(**kwargs)
server = DiamondServer(config)
except Exception as e: # pylint: disable=broad-except
print(str(e))
sys.exit(1)
# Run the server
server.run()
if __name__ == '__main__':
run()
|
cmusatyalab/opendiamond
|
opendiamond/server/__main__.py
|
__main__.py
|
py
| 1,885 |
python
|
en
|
code
| 19 |
github-code
|
6
|
2893376277
|
# -*- encoding: UTF-8 -*-
from django.http import Http404
from django.db.models.loading import get_model
from django.contrib.staticfiles.storage import staticfiles_storage
from django.contrib.admin.views.decorators import staff_member_required
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.forms.widgets import Select
import models
def detail(request, app, cls, slug):
""" generic view that return direct CMS model rendered content """
model = get_model(app, cls)
if model and issubclass(model, models.CMSModel):
return model.get_response(request, slug)
raise Http404
@staff_member_required
def imagechooser(request, app, cls):
model = get_model(app, cls)
datas = {
'tinymce_path': staticfiles_storage.url('tiny_mce'),
'chosen_path': staticfiles_storage.url('chosen'),
'admin_path': staticfiles_storage.url('admin')
}
if model and issubclass(model, models.CMSModel):
if getattr(model.CMSMeta, 'image_model', None):
images = [('', '----')]
for fileitem in model.CMSMeta.image_model.objects.all().order_by('title'):
if fileitem.file.name.lower().endswith(('.jpg', '.jpeg', '.gif', '.png')):
images.append((fileitem.get_absolute_url(), fileitem.title))
datas['select_files'] = Select(choices=images, attrs={'class': 'chosen-single', 'style': 'width:200px'}).render('file', '')
#datas['form_upload'] = None
# gestion upload if any
# send result back
return render(request, 'imagechooser.html', datas)
@staff_member_required
def tinymcejs(request, app, cls):
datas = {
'tinymce_path': staticfiles_storage.url('tiny_mce'),
'imagechooser_path': reverse('picocms-imagechooser', args=(app, cls))
}
return render(request, 'tiny_mce_src.js', datas, content_type='application/javascript')
|
revolunet/django-picocms
|
picocms/views.py
|
views.py
|
py
| 1,926 |
python
|
en
|
code
| 4 |
github-code
|
6
|
19240250728
|
import tensorflow as tf
import tensorflow_datasets as tfds
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
def load_celeba_dataset(args, shuffle_files=False, batch_size=128):
ds_train, ds_test = tfds.load(name='celeb_a', split=['train', 'test'], data_dir=args.data_dir,
batch_size=batch_size, download=True, shuffle_files=shuffle_files)
return ds_train, ds_test
|
UCSC-REAL/fair-eval
|
celeba/experiments/data.py
|
data.py
|
py
| 452 |
python
|
en
|
code
| 5 |
github-code
|
6
|
71855094268
|
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import datasets
from sklearn import linear_model
import matplotlib.pyplot as plt
def sigmoid(z):
return 1/(1+np.exp(-z))
def costfunction(X, y, w):
cost = 0
size = y.shape[0]
for i in range(size):
if y[i] == 1:
cost -= np.log(sigmoid(X[i]*w))
else:
cost -= np.log(1 - sigmoid(X[i]*w))
return cost / size
def gradAscent(traindata,label,iter,alpha,step,lamda=0.001):
dataMat=np.mat(traindata)
labelMat=np.mat(label)
m,n=np.shape(dataMat)
weights=np.ones((n,1))
weights=np.mat(weights)
for k in range(iter):
temp=costfunction(dataMat,labelMat,weights)
weights=weights-alpha*((dataMat.transpose())*(sigmoid(dataMat*weights)-labelMat)+lamda*weights)
if k%200==0:
print("Loss is: ",temp,weights.transpose())
if (k/step==0 and k!=0):
alpha=alpha/5
return weights
def preprocessing(x_train,x_test):
sc=StandardScaler()
sc.fit(x_train)
x_train_scaled=sc.transform(x_train)
x_test_scaled=sc.transform(x_test)
return x_train_scaled,x_test_scaled
def split(ratio):
Data = datasets.load_iris()
#Data = datasets.load_wine() #for Dataset wine
x = Data.data
y=Data.target
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = ratio, random_state = 0)
return x_train,x_test,y_train,y_test
def plot(X,Y):
x_min, x_max = X[:, 0].min() - .2, X[:, 0].max() + .2
y_min, y_max = X[:, 1].min() - .2, X[:, 1].max() + .2
h = .02
logreg =linear_model.LogisticRegression(C=1e5, solver='lbfgs', multi_class='multinomial')
logreg.fit(X,Y)
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.show()
if __name__=='__main__':
x_train,x_test,y_train,y_test=split(0.3)
x_train_scaled,x_test_scaled=preprocessing(x_train,x_test)
#logreg=linear_model.LogisticRegression(C=1e4) #for ovr
logreg=linear_model.LogisticRegression(C=1e4,multi_class='multinomial',solver='lbfgs') #ovm
logreg.fit(x_train_scaled,y_train)
print("Accuracy:",logreg.score(x_test_scaled,y_test))
plot(x_train_scaled[:,:2],y_train)
|
Fred199683/Logistic-Regression
|
LR.py
|
LR.py
|
py
| 2,556 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3035542775
|
# given an integer array nums, handle multiple queries of the
# following type: calculate the sum of the elements of nums
# between indices left and right inclusive where left <= right
class prefix_sum:
def __init__(self,arr):
self.arr = arr
prefix = []
total = 0
for i in range(len(arr)):
total += arr[i]
prefix.append(total)
self.prefix = prefix
def range_sum(self,left,right):
if left-1<0:
left_val = 0
else:
left_val = self.prefix[left-1]
right_val = self.prefix[right]
return right_val-left_val
|
estimatrixPipiatrix/decision-scientist
|
key_algos/class_prefix_sum.py
|
class_prefix_sum.py
|
py
| 632 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44496456290
|
import json
import logging
import os
import random
import time
from datetime import datetime
from uuid import uuid4
import paho.mqtt.client as mqtt
# MQTT broker details
BROKER_ADDRESS = os.getenv("BROKER_HOST")
BROKER_PORT = 1883
# Configuring file handler for logging
log_file = f"{__file__}.log"
# Logging setup
logging.basicConfig(
filename=log_file,
filemode="w",
format="%(asctime)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
# Creating unique sensor IDs for each sensor
temp_sensor_id = str(uuid4())
hum_sensor_id = str(uuid4())
# Simulated sensor data generation for temperature
def generate_temperature_data() -> dict:
"""
Generate random temperature data.
Returns:
dict: Generated sensor data.
"""
temperature = round(20 + (30 * random.random()), 2)
timestamp = datetime.utcnow().isoformat() # ISO8601 format
data = {
"sensor_id": temp_sensor_id,
"topic": "temperature",
"value": temperature,
"timestamp": timestamp,
}
return data
def generate_humidity_data() -> dict:
"""
Generate random humidity data.
Returns:
dict: Generated sensor data.
"""
humidity = round(40 + (60 * random.random()), 2)
timestamp = datetime.utcnow().isoformat()
data = {
"sensor_id": hum_sensor_id,
"topic": "humidity",
"value": humidity,
"timestamp": timestamp,
}
return data
def on_publish(client, userdata, mid):
"""
MQTT on_publish callback function.
Args:
client: The MQTT client instance.
userdata: User data.
mid: Message ID.
"""
logger.info(f"Message Published: {mid}")
def on_connect(client, userdata, flags, rc):
"""
MQTT on_connect callback function.
Args:
client: The MQTT client instance.
userdata: User data.
flags: Flags.
rc: Return code.
"""
if rc == 0:
logger.info("Connected to Mosquitto MQTT Broker!")
else:
logger.error(f"Failed to connect, return code: {rc}")
# Create MQTT client instance
client = mqtt.Client()
client.on_connect = on_connect
client.on_publish = on_publish
# Connect to broker
client.connect(BROKER_ADDRESS, port=BROKER_PORT)
# Start the MQTT loop
client.loop_start()
try:
while True:
sensor_data_temp = generate_temperature_data()
sensor_data_hum = generate_humidity_data()
temperature_payload = json.dumps(sensor_data_temp)
humidity_payload = json.dumps(sensor_data_hum)
# Publishing the topics
client.publish("sensors/temperature", temperature_payload)
client.publish("sensors/humidity", humidity_payload)
time.sleep(15) # Publish every 5 seconds
except KeyboardInterrupt:
logger.info("Publisher stopped.")
client.loop_stop()
client.disconnect()
|
SudeepKumarS/mqtt-sensor-api
|
mqtt-publisher/mqtt_publisher.py
|
mqtt_publisher.py
|
py
| 2,912 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12900539476
|
from fastapi import APIRouter
from pydantic import BaseModel
from starlette.requests import Request
from ozz_backend import app_logger
from ozz_backend.persistence_layer import User
router = APIRouter(
prefix="/user",
tags=["user"],
# dependencies=[Depends(get_token_header)],
)
class UserOngoingOut(BaseModel):
user_id: str
mission_id: int
quest_id: int
@router.get('/test')
def test_api():
app_logger.info('test')
return {'test'}
@router.get('/user-ongoing', response_model=UserOngoingOut)
def get_ongoing_info(request: Request, user_id: int, mission_id: int):
app_logger.info(f'[{request.method}] {request.url}: {request.client.host}:{request.client.port}')
result = User.get_user_ongoing_info(user_id, mission_id)
ongoing = UserOngoingOut(user_id=result.user_id, mission_id=result.mission_id, quest_id=result.quest_id)
return ongoing
|
honeybeeveloper/plat_back
|
ozz_backend/api/user.py
|
user.py
|
py
| 895 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40449109187
|
import argparse
import json
EXAMPLE_USAGE = """
Example Usage via RLlib CLI:
rllib rollout /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN
--env CartPole-v0 --steps 1000000 --out rollouts.pkl
Example Usage via executable:
./rollout.py /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN
--env CartPole-v0 --steps 1000000 --out rollouts.pkl
"""
def create_parser(parser_creator = None):
#parser = argparse.ArgumentParser("Ray training with custom IG environment")
## parser for rollouts
parser_creator = parser_creator or argparse.ArgumentParser
parser = parser_creator(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Roll out a reinforcement learning agent "
"given a checkpoint.",
epilog=EXAMPLE_USAGE)
parser.add_argument(
"--checkpoint", default='' ,type=str, help="Checkpoint from which to roll out.")
required_named = parser.add_argument_group("required named arguments")
required_named.add_argument(
"--run",
type=str,
required=True,
help="The algorithm or model to train. This may refer to the name "
"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a "
"user-defined trainable function or class registered in the "
"tune registry.")
required_named.add_argument(
"--env", type=str, help="The gym environment to use.")
parser.add_argument(
"--no-render",
default=False,
action="store_const",
const=True,
help="Suppress rendering of the environment.")
parser.add_argument(
"--monitor",
default=False,
action="store_true",
help="Wrap environment in gym Monitor to record video. NOTE: This "
"option is deprecated: Use `--video-dir [some dir]` instead.")
parser.add_argument(
"--video-dir",
type=str,
default=None,
help="Specifies the directory into which videos of all episode "
"rollouts will be stored.")
parser.add_argument(
"--steps",
default=20000,
help="Number of timesteps to roll out (overwritten by --episodes).")
parser.add_argument(
"--episodes",
default=0,
help="Number of complete episodes to roll out (overrides --steps).")
parser.add_argument("--out", default=None, help="Output filename.")
parser.add_argument(
"--config",
default="{}",
type=json.loads,
help="Algorithm-specific configuration (e.g. env, hyperparams). "
"Gets merged with loaded configuration from checkpoint file and "
"`evaluation_config` settings therein.")
parser.add_argument(
"--save-info",
default=False,
action="store_true",
help="Save the info field generated by the step() method, "
"as well as the action, observations, rewards and done fields.")
parser.add_argument(
"--use-shelve",
default=False,
action="store_true",
help="Save rollouts into a python shelf file (will save each episode "
"as it is generated). An output filename must be set using --out.")
parser.add_argument(
"--track-progress",
default=False,
action="store_true",
help="Write progress to a temporary file (updated "
"after each episode). An output filename must be set using --out; "
"the progress file will live in the same folder.")
# save and restore file management
parser.add_argument(
"--policy-dir", type=str, help="folder name of the policy.", default="")
parser.add_argument(
"--experiment", type=str, help="chosen experiment to reload.", default="")
parser.add_argument(
"--ncheckpoint", type=str, help="chosen checkpoint to reload.", default="")
parser.add_argument(
"--heuristic-policy", type=bool, help="chosen checkpoint to reload.", default=False)
parser.add_argument(
"--static-targets", type=bool, help="chosen checkpoint to reload.", default=False)
parser.add_argument(
"--video_dir", type=str, help="chosen folder to save video.", default="")
parser.add_argument(
"--horizon", type=int, help="limit of timesteps.", default=40)
### Old arguments needs a cleanup
parser.add_argument("--scenario", type=str, default="simple_spread_assigned",
choices=['simple', 'simple_speaker_listener',
'simple_crypto', 'simple_push',
'simple_tag', 'simple_spread', 'simple_adversary', 'simple_spread_assigned',
'matlab_simple_spread_assigned','matlab_simple_spread_assigned_hardcoll', 'matlab_simple_spread_assigned_checkpoints'],
help="name of the scenario script")
parser.add_argument("--max-episode-len", type=int, default=100,
help="maximum episode length")
parser.add_argument("--num-episodes", type=int, default=60000,
help="number of episodes")
parser.add_argument("--num-adversaries", type=int, default=0,
help="number of adversaries")
parser.add_argument("--good-policy", type=str, default="maddpg",
help="policy for good agents")
parser.add_argument("--adv-policy", type=str, default="maddpg",
help="policy of adversaries")
# Core training parameters
parser.add_argument("--lr", type=float, default=1e-3,
help="learning rate for Adam optimizer")
parser.add_argument("--gamma", type=float, default=0.99,
help="discount factor")
# NOTE: 1 iteration = sample_batch_size * num_workers timesteps * num_envs_per_worker
parser.add_argument("--sample-batch-size", type=int, default=25,
help="number of data points sampled /update /worker")
parser.add_argument("--train-batch-size", type=int, default=1024,
help="number of data points /update")
parser.add_argument("--n-step", type=int, default=1,
help="length of multistep value backup")
parser.add_argument("--num-units", type=int, default=128,
help="number of units in the mlp")
parser.add_argument("--replay-buffer", type=int, default=1000000,
help="size of replay buffer in training")
parser.add_argument("--seed", type=int, default=100,
help="initialization seed for the network weights")
# Checkpoint
parser.add_argument("--checkpoint-freq", type=int, default = 10, #75,
help="save model once every time this many iterations are completed")
parser.add_argument("--local-dir", type=str, default="./ray_results",
help="path to save checkpoints")
parser.add_argument("--restore", type=str, default=None,
help="directory in which training state and model are loaded")
parser.add_argument("--in-evaluation", type=bool, default=False, help="trigger evaluation procedure")
# Parallelism
#parser.add_argument("--num-workers", type=int, default=0)
#parser.add_argument("--num-envs-per-worker", type=int, default=1)
#parser.add_argument("--num-gpus", type=int, default=0)
parser.add_argument("--num-workers", type=int, default=0) #0
parser.add_argument("--num-envs-per-worker", type=int, default=1) #1
parser.add_argument("--num-gpus", type=int, default=0) #0
#parser.add_argument("--num-cpus-per-worker", type=int, default=1)
parser.add_argument("--num-gpus-per-worker", type=int, default=0) #0
# From the ppo
parser.add_argument("--stop-iters", type=int, default=100)
parser.add_argument("--stop-timesteps", type=int, default=160000000)
# parser.add_argument("--stop-reward", type=float, default=7.99)
# For rollouts
parser.add_argument("--stop-iters-rollout", type=int, default=1)
parser.add_argument("--nagents", type=int, default=1)
parser.add_argument("--ntargets", type=int, default=1)
parser.add_argument("--nrobots", type=int, default=1)
# mode of hand-engineered comm. policy (-1 no hand-engineered)
parser.add_argument("--mode", type=int, default=-1)
parser.add_argument("--test", type=int, default=0, choices = [0,1], help="whether we want to test the policy or not")
parser.add_argument("--test-env", type=int, default=0, choices = [0,1], help="whether we want to act in the test environment or not")
parser.add_argument("--deterministic", type=int, default=1, choices=[0, 1],
help="enable exploration or not during execution")
return parser
|
tud-amr/AC-LCP
|
utils/parse_args_rollout.py
|
parse_args_rollout.py
|
py
| 8,847 |
python
|
en
|
code
| 2 |
github-code
|
6
|
15143757328
|
from atelier_4_ex1 import gen_list_random_int
import matplotlib.pyplot as plt
import numpy as np
import time ,random
def extract_elements_list(list_in_which_to_choose,int_nbr_of_element_to_extract=10):
list_in_which_to_choose_length,mix_length = len(list_in_which_to_choose),0
mixList = list()
while mix_length < int_nbr_of_element_to_extract :
random_ = gen_list_random_int(0,list_in_which_to_choose_length)
if random_ not in mixList :
mixList.append(random_)
mix_length += 1
else :
continue
return [ list_in_which_to_choose[elem] for elem in mixList ]
# Test de votre code
# def extract_elements_list2(list_in_which_to_choose,int_nbr_of_element_to_extract=10):
# list_in_which_to_choose_length,mix_length = len(list_in_which_to_choose),0
# mixList = list()
# while mix_length < int_nbr_of_element_to_extract :
# random_ = gen_list_random_int(0,list_in_which_to_choose_length)
# mixList.append(random_)
# mix_length += 1
# return [ list_in_which_to_choose[elem] for elem in mixList ]
# print(extract_elements_list( [ i for i in range(1,11)],4))
def pref_mix(func1,func2,lst,num=100):
result = ([],[])
for elem in lst :
data1 ,data2= [],[]
nb_elements = int(elem / 2)
for index in range(num) :
lst_elem = list(range(elem))
# first function
start = time.perf_counter()
func1(lst_elem,nb_elements)
end = time.perf_counter() - start
data1.append(end)
start = time.perf_counter()
func2(lst_elem,nb_elements)
end = time.perf_counter() - start
data2.append(end)
result[0].append(sum(data1)/len(data1))
result[1].append(sum(data2)/len(data2))
return result
list_test = [500,1000,2500,5000,7500]
result = pref_mix(extract_elements_list,random.sample, list_test ,100)
print(result)
#Ici on dรฉcrit les abscisses
#Entre 0 et 5 en 10 points
fig, ax = plt.subplots()
#Dessin des courbes, le premier paramรจtre
#correspond aux point d'abscisse le
#deuxiรจme correspond aux points d'ordonnรฉes
#le troisiรจme paramรจtre, optionnel permet de
#choisir รฉventuellement la couleur et le marqueur
ax.plot(list_test,result[0], 'bo-',label='extract_elements_list')
ax.plot(list_test,result[1], 'r*-',label='random.sample')
ax.set(xlabel='temps', ylabel='nombre d\'elements',
title='temps dโexรฉcution moyen pour extract_elements_list et random.sample')
ax.legend(loc='upper center', shadow=True, fontsize='x-large')
#fig.savefig("test.png")
plt.show()
|
K-Ilyas/python
|
atelier_4/atelier_4_ex4.py
|
atelier_4_ex4.py
|
py
| 2,594 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11356022056
|
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import argodb as argo
import research_tools as research
plt.ion()
plt.close('all')
dirtopo = '/datawork/fsi2/mars/DATA/BATHY/ETOPO2'
topofile = 'etopo2.nc'
dirtopo = '/net/alpha/exports/sciences/data/BATHYMETRIE/BATHYMETRIE'
topofile = 'ETOPO2v2c_f4.nc'
dirtile = '/net/libra/local/tmp/1/herry/tiles'
itile = 50
figsize = (9, 7)
reso = 0.5
argodic = research.read_argo_filter(itile)
minlon, maxlon, minlat, maxlat = argodic['LONMIN_NO_M'], argodic['LONMAX_NO_M'], argodic['LATMIN_NO_M'], argodic['LATMAX_NO_M']
lon = np.arange(reso*np.floor(minlon/reso), reso*np.floor(maxlon/reso)+reso, reso)
lat = np.arange(reso*np.floor(minlat/reso), reso*np.floor(maxlat/reso)+reso, reso)
#lon_deg, lat_deg = define_grid(minlon, maxlon, minlat, maxlat, reso_deg)
with Dataset('%s/%s' % (dirtopo, topofile)) as nc:
z = nc.variables['z'][:,:]
dl0 = 1/30. # 1/30deg for etopo
lontopo = np.arange(-180, 180+dl0, dl0)
lattopo = np.arange(-90, 90+dl0, dl0)
def get_idx_of_box(lontopo, lattopo, cell):
minlon, maxlon, minlat, maxlat = cell
ilon = [i for i, x in enumerate(lontopo) if (x>=minlon) and (x<=maxlon)]
jlon = [j for j, x in enumerate(lattopo) if (x>=minlat) and (x<=maxlat)]
return ilon[0], ilon[-1], jlon[0], jlon[-1]
domain = [minlon, maxlon, minlat, maxlat]
i0, i1, j0, j1 = get_idx_of_box(lontopo, lattopo, domain)
def average_topo_on_box(depth, cell):
""" average high resolution depth array on cell """
i0, i1, j0, j1 = get_idx_of_box(lontopo, lattopo, cell)
return np.mean(depth[j0:j1, i0:i1].ravel())
def box(cell, d=0):
x1, x2, y1, y2 = cell
plt.plot([x1-d, x1-d, x2+d, x2+d, x1-d],
[y1-d, y2+d, y2+d, y1-d, y1-d], 'k')
plt.figure(figsize=figsize)
plt.imshow(z[j0:j1, i0:i1],
origin='lower', extent=[minlon, maxlon, minlat, maxlat])
plt.axis('tight')
plt.colorbar()
reso = 0.5
lon = np.arange(minlon, maxlon, reso)
lat = np.arange(minlat, maxlat, reso)
nlon = len(lon)
nlat = len(lat)
bathy = np.zeros((nlat, nlon))
for j in range(nlat-1):
for i in range(nlon-1):
reso2 = reso*0.5
gridcell = [lon[i]-reso2, lon[i]+reso2, lat[j]-reso2, lat[j]+reso2]
box(gridcell)
get_idx_of_box(lontopo, lattopo, gridcell)
bathy[j, i] = average_topo_on_box(z, gridcell)
msk = bathy < 0
fig, ax = plt.subplots(2,1)
divider = make_axes_locatable(ax[0])
ax_cb = divider.new_horizontal(size="4%", pad=0.2)
im = ax[0].imshow(bathy,
origin='lower', interpolation='nearest',
extent=[minlon, maxlon, minlat, maxlat])
ax[0].set_title('tile #%03i' % itile)
fig.add_axes(ax_cb)
fig.colorbar(im, cax=ax_cb)
divider = make_axes_locatable(ax[1])
ax_cb = divider.new_horizontal(size="4%", pad=0.2)
ax[1].imshow(msk,
origin='lower', interpolation='nearest',
extent=[minlon, maxlon, minlat, maxlat])
|
pvthinker/pargopy
|
pargopy_v0/define_landmask.py
|
define_landmask.py
|
py
| 2,984 |
python
|
en
|
code
| 1 |
github-code
|
6
|
29756907883
|
# Author: Sirui Feng
'''
This file splits each review on periods and conjuctions.
'''
import re
import json
from textblob import TextBlob
from textblob.sentiments import NaiveBayesAnalyzer
import csv
from word_stemmer import word_stemmer
public_utilities_path = 'data/public_utilities.json'
def split_period(review):
'''
Splits sentences on periods.
'''
p = re.compile(r'[^\s\.][^\.\n]+')
sentences = p.findall(review)
return sentences
def split_conjunctions(sentence):
'''
Splits each sentence on conjuctions.
'''
conjuctions = [';', 'for', 'and', 'nor', 'but', 'or', 'yet', 'so']
clause = re.split('; | and | nor | but | or | yet | so | although | despite | though | however | on the other hand | in contrast ', sentence)
clause = [x.strip() for x in clause]
clause = [x for x in clause if len(x) != 0]
return clause
def gen_sentences():
'''
Reads in the sentences and splits on periods and conjuctions.
'''
with open(public_utilities_path) as datafile:
with open('data/full_data.csv', 'w') as outfile:
writer = csv.DictWriter(outfile, fieldnames = ['review_id', \
'business_id', 'user_id', 'stars', 'blob_polarity', 'review', \
'label'])
writer.writeheader()
i=0
for line in datafile:
i += 1
print(i)
row = json.loads(line)
review = row['text']
review = review.lower()
#split only on periods
sentences = split_period(review)
for s in sentences:
blob = TextBlob(s, analyzer = NaiveBayesAnalyzer())
polarity = blob.polarity
#s = word_stemmer(s)
writer.writerow({'review_id':row['review_id'], \
'business_id': row['business_id'], \
'user_id':row['user_id'], 'stars':row['stars'], \
'blob_polarity': polarity, 'review': s})
gen_sentences()
|
vi-tnguyen/textinsighters
|
gen_sentences.py
|
gen_sentences.py
|
py
| 1,757 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30099395988
|
import imaplib
import socket
class IMAP4WithTimeout(imaplib.IMAP4):
def __init__(self, address, port, timeout):
self._timeout = timeout
imaplib.IMAP4.__init__(self, address, port)
def open(self, host="", port=143, timeout=None):
# This is overridden to make it consistent across Python versions.
self.host = host
self.port = port
self.sock = self._create_socket(timeout)
self.file = self.sock.makefile("rb")
def _create_socket(self, timeout=None):
return socket.create_connection(
(self.host, self.port), timeout if timeout is not None else self._timeout
)
|
mjs/imapclient
|
imapclient/imap4.py
|
imap4.py
|
py
| 657 |
python
|
en
|
code
| 466 |
github-code
|
6
|
40483417494
|
import tkinter, threading
from tkinter import ttk
from interface.onglets.onglets_map import OngletsMap
from interface.onglets.onglets_packets import OngletsPackets
from interface.onglets.onglets_personnage import OngletsPersonnage
from interface.onglets.onglets_sorts import OngletsSorts
import time
class MainInterface(threading.Thread):
def __init__(self):
threading.Thread(None,self.launch).start()
while True:
time.sleep(1)
if self.ongletsSorts:
break
def set_character(self, character):
self.character = character
self.ongletsMap.set_character(character)
self.ongletsSorts.set_character(character)
self.ongletsPersonnage.set_character(character)
threading.Thread(None,self.character_statue).start()
def character_statue(self):
en_mouvement = tkinter.Label(self.main, bg="red", text = "En mouvement")
en_mouvement.place(relx=0.05, rely=0.05, relwidth=0.08, relheight=0.04)
en_recolte = tkinter.Label(self.main, bg="red", text = "En recolte")
en_recolte.place(relx=0.05, rely=0.10, relwidth=0.08, relheight=0.04)
en_combat = tkinter.Label(self.main, bg="red", text = "En combat")
en_combat.place(relx=0.05, rely=0.15, relwidth=0.08, relheight=0.04)
while True:
time.sleep(1)
if self.character.deplacement.ismouving:
en_mouvement.configure(bg = "Green")
else:
en_mouvement.configure(bg = "Red")
if self.character.isharvest:
en_recolte.configure(bg = "Green")
else:
en_recolte.configure(bg = "red")
if self.character.isfighting:
en_combat.configure(bg = "Green")
else:
en_combat.configure(bg = "red")
def launch(self):
self.main = tkinter.Tk()
self.main.title("LeafBot")
self.main.geometry('1200x900')
self.create_notebook()
self.main.mainloop()
def create_notebook(self):
self.onglets = tkinter.ttk.Notebook(self.main)
self.onglets.pack()
self.onglets.place(relx=0.15, rely=0.05, relwidth=0.83, relheight=0.83)
self.ongletsPackets = OngletsPackets(self.onglets)
self.ongletsPersonnage = OngletsPersonnage(self.onglets)
self.ongletsMap = OngletsMap(self.onglets)
self.ongletsSorts = OngletsSorts(self.onglets)
def base_start(self,character):
self.vita = tkinter.Label(self.main, bg="red", text = character.vie_actuelle +" / " + character.vie_max)
self.vita.pack()
self.vita.place(relx=0.20, rely=0.90, relwidth=0.08, relheight=0.08)
self.energie = tkinter.Label(self.main, bg="yellow", text = character.ennergie_actuelle +" / " + character.ennergie_max)
self.energie.pack()
self.energie.place(relx=0.40, rely=0.90, relwidth=0.08, relheight=0.08)
self.xp = tkinter.Label(self.main,bg="deep sky blue", text = character.xp_actuelle +" / " + character.xp_fin)
self.xp.pack()
self.xp.place(relx=0.60, rely=0.90, relwidth=0.1, relheight=0.08)
self.kamas = tkinter.Label(self.main, bg="orange", text = character.kamas)
self.kamas.pack()
self.kamas.place(relx=0.80, rely=0.90, relwidth=0.08, relheight=0.08)
if __name__ == "__main__":
MainInterface()
|
Azzary/LeafMITM
|
interface/main_interface.py
|
main_interface.py
|
py
| 3,458 |
python
|
en
|
code
| 3 |
github-code
|
6
|
137559983
|
# Definir una funciรณn inversa() que calcule la inversiรณn de una cadena. Por ejemplo la cadena "estoy probando" deberรญa devolver la cadena "odnaborp yotse".
def inversa(cad1):
cad2 = ""
for i in range(1, len(cad1)+1): #arranco en 1 porque no existe el -0
cad2 += cad1[-i]
return cad2
assert(inversa('Hola como estas') == 'satse omoc aloH')
assert(inversa('123456789') == '987654321')
|
solchusalin/frro-utn-soporte2019-05
|
practico_01/ejercicio-06.py
|
ejercicio-06.py
|
py
| 411 |
python
|
es
|
code
| 0 |
github-code
|
6
|
17043338534
|
# https://atcoder.jp/contests/past202004-open/tasks/past202004_h
N, M = list(map(int, input().split()))
A = []
for _ in range(N):
A.append(input())
group = []
for _ in range(11):
group.append([])
for i in range(N):
for j in range(M):
if A[i][j] == 'S':
n = 0
elif A[i][j] == 'G':
n = 10
else:
n = int(A[i][j])
group[n].append([i ,j])
# cost[i][j]:(i,j)ใซใใฉใ็ใใพใงๆๅฐ็งปๅๅๆฐใฎ็ทๅ
cost = []
INF = 10**3
for i in range(N):
cost.append([INF]*M)
# ๅๆๆกไปถ
si, sj = group[0][0]
cost[si][sj] = 0
for n in range(1, 11):
for i, j in group[n]:
for i2, j2 in group[n-1]:
cost[i][j] = min(cost[i][j], cost[i2][j2]+abs(i-i2)+abs(j-j2))
gi, gj = group[10][0]
if cost[gi][gj] == INF:
print(-1)
else:
print(cost[gi][gj])
|
atsushi-matsui/atcoder
|
middle/6-4-6.py
|
6-4-6.py
|
py
| 858 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28470996419
|
import os
import sys
from lockdoors import main
from lockdoors import sanitize
from lockdoors import shrts
from pathlib import Path
from datetime import datetime
from time import sleep
#VAR
yes = set(['yes', 'y', 'ye', 'Y'])
no = set(['no', 'n', 'nop', 'N'])
cwd = os.getcwd()
null = ""
###Cheatsheets
def revsh():
shrts.clscprilo()
print("\033[91mHere is the list of the files :\033[90m")
print("\033[92m")
os.system(" find " + shrts.getinstalldir() + "/REVERSE/CHEATSHEETS/ -type f")
print("\033[90m")
shrts.okrev()
#Tools
def radar2():
radar2.title = "Radar 2 : unix-like reverse engineering framework"
tool_dir = "/REVERSE/Tools/radar2"
shrts.prilogspc()
os.system("git clone https://github.com/radare/radare2.git " + shrts.getinstalldir() + tool_dir + null)
shrts.clscprilo()
print("\033[92m Radar2 Downlaoded successfully \033[90m")
shrts.spc()
print("\033[92m Check " + shrts.getinstalldir() + tool_dir +" Folder\033[90m")
shrts.okrev()
def virustotal():
virustotal.title = "VirusTotal tools"
tool_dir = "/REVERSE/Tools/virustotal"
if os.path.exists('/usr/local/bin/virustotal'):
shrts.prilogspc()
os.system("git clone https://github.com/botherder/virustotal.git " + shrts.getinstalldir() + tool_dir + null)
shrts.prilogspc()
print("\033[92m " + virustotal.title + "\033[90m")
shrts.spc()
key = sanitize.bash_escape_restrictor(input("\033[92mEnter the Virtustoal Api ? : \033[90m"))
outp = sanitize.bash_escape_restrictor(input("\033[92mEnter directory containing files to scan ? : \033[90m"))
os.system("python2 " + shrts.getinstalldir() + tool_dir + "/vt.py --key "+key+" " +outp)
shrts.okrev()
else:
shrts.prilogspc()
print("\033[92m " + virustotal.title + "\033[90m")
shrts.spc()
print("\033[91mDownloading ...\033[0m")
shrts.spc()
os.system("git clone https://github.com/botherder/virustotal.git " + shrts.getinstalldir() + tool_dir + null)
shrts.prilogspc()
print("\033[92m " + virustotal.title + "\033[90m")
shrts.spc()
shrts.prilogspc()
print("\033[91mInstalling ...\033[0m.")
shrts.spc()
os.system("""echo "#!/bin/bash" > /usr/local/bin/virustotal""")
os.system("""echo "#Dev : Sofiane Hamlaoui" >> /usr/local/bin/virustotal""")
os.system("echo python2 " + shrts.getinstalldir() + tool_dir + "/vt.py >> /usr/local/bin/virustotal")
os.system("chmod +x /usr/local/bin/virustotal")
print(("You can now use " + "\033[91m" + virustotal.title + "\033[90m" + " from Lockdoor [\033[92m Lockdoor \033[90m ]" ))
shrts.okrev()
def miasm():
miasm.title = "miasm : Reverse engineering framework"
tool_dir = "/REVERSE/Tools/miasm"
shrts.prilogspc()
os.system("git clone https://github.com/cea-sec/miasm.git " + shrts.getinstalldir() + tool_dir + null)
shrts.prilogspc()
os.system("cd " +shrts.getinstalldir() + tool_dir + " && python2 setup.py build")
os.system("cd " +shrts.getinstalldir() + tool_dir + " && python2 setup.py install")
shrts.spc()
print("\033[92m Miasm Downlaoded successfully \033[90m")
shrts.spc()
print("\033[92m Check " + shrts.getinstalldir() + tool_dir +" Folder\033[90m")
shrts.okrev()
def mirror():
mirror.title = "mirror : reverses the bytes of a file"
tool_dir = "/REVERSE/Tools/mirror"
shrts.prilogspc()
os.system("git clone https://github.com/guelfoweb/mirror.git " + shrts.getinstalldir() + tool_dir + null)
shrts.clr()
shrts.prilogspc()
print("\033[92m Mirror Downlaoded successfully \033[90m")
shrts.spc()
print("\033[92m Check " + shrts.getinstalldir() + tool_dir +" Folder\033[90m")
shrts.okrev()
def Dnspy():
Dnspy.title = "Dnspy : reverses the bytes of a file"
tool_dir = "/REVERSE/Tools/Dnspy"
shrts.prilogspc()
os.system("git clone https://github.com/0xd4d/dnSpy.git " + shrts.getinstalldir() + tool_dir + null)
shrts.clr()
shrts.prilogspc()
print("\033[92m Dnspy Downlaoded successfully \033[90m")
shrts.spc()
print("\033[92m Check " + shrts.getinstalldir() + tool_dir +" Folder\033[90m")
shrts.okrev()
def angrio():
angrio.title = "angrio : a python framework for analyzing binaries"
tool_dir = "/REVERSE/Tools/angrio"
shrts.prilogspc()
print("\033[92m Installing \033[90m")
shrts.spc()
os.system("pip install angr ")
shrts.clr()
shrts.prilogspc()
print("\033[92m Dnspy Downlaoded successfully \033[90m")
shrts.spc()
print("\033[92m Check Angr.io docs to learn more about the tool \033[90m")
print("\033[92m https://github.com/angr/angr-doc \033[90m")
shrts.okrev()
def dllrunner():
dllrunner.title = "Dllrunner : a smart DLL execution script for malware analysis"
tool_dir = "/REVERSE/Tools/dllrunner"
shrts.prilogspc()
os.system("git clone https://github.com/Neo23x0/DLLRunner " + shrts.getinstalldir() + tool_dir + null)
shrts.clr()
shrts.prilogspc()
print("\033[92m Dllrunner Downlaoded successfully \033[90m")
shrts.spc()
print("\033[92m Check "+ shrts.getinstalldir() + tool_dir + " Folder\033[90m")
shrts.okrev()
def yara():
yara.title = "YARA : a tool to identify and classify malwares "
tool_dir = "/REVERSE/Tools/yara"
shrts.prilogspc()
print("\033[92m Installing \033[90m")
shrts.spc()
os.system("pip install yara-python")
shrts.clr()
shrts.prilogspc()
print("\033[92m YARA Downlaoded successfully \033[90m")
shrts.spc()
print("\033[92m Check YARA Docs to learn more about the tool\033[90m")
print("\033[92m https://yara.readthedocs.io/en/latest/\033[90m")
shrts.okrev()
#Menu
def menu():
shrts.clscprilo()
print("""\033[94m
[ REVERSE ENGINEERING ]
Make A Choice :\033[90m
\033[91m -[!]----- Tools ------[!]-\033[90m
\033[93m1) Radar2
2) Virustotal
3) Miasm
4) Mirror
5) Dnspy
6) Angrio
7) DLLRunner
8) Yara\033[90m
\033[91m-[!]----- Cheatsheets ------[!]-\033[90m
\033[93m 9) Reverse Engineering Cheatsheets\033[90m
------------------------
\033[94mb) Back to ROOT MENU
q) Leave Lockdoor\033[94m
""")
choice = input("\033[92mLockdoor@ReverseEngineering~# \033[0m")
os.system('clear')
if choice == "1":
radar2()
elif choice == "2":
virustotal()
elif choice == "3":
miasm()
elif choice == "4":
mirror()
elif choice == "5":
Dnspy()
elif choice == "6":
angrio()
elif choice == "7":
dllrunner()
elif choice == "8":
yara()
elif choice == "9":
revsh()
elif choice == "b":
main.menu()
elif choice == "q":
shrts.prilogspc()
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print(" \033[91m-[!]- LOCKDOOR IS EXITING -[!]-\033[0m")
shrts.spc()
print(" \033[91m-[!]- EXITING AT " + dt_string + " -[!]-\033[0m")
sys.exit()
elif choice == "":
menu()
else:
menu()
|
SofianeHamlaoui/Lockdoor-Framework
|
lockdoors/reverse.py
|
reverse.py
|
py
| 7,496 |
python
|
en
|
code
| 1,248 |
github-code
|
6
|
70818525628
|
#import Library
import speech_recognition as sr
# Initialize recognizer class
r = sr.Recognizer()
# Reading Audio file as source
# listening the audio file and store in audio_text variable
# The path should be correct
with sr.AudioFile('Sample.wav') as source:
audio = r.listen(source)
# Using exception handling in case the api could not be acceessed successfully.
try:
# using google speech recognition
text = r.recognize_google(audio)
print('Convertint Speech into text successfully!')
print(text)
except:
print('Could not access API, please run it again.')
|
CHAODENG/Project4
|
SpeechToText.py
|
SpeechToText.py
|
py
| 632 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40205551139
|
# encoding: utf-8
"""
GraphicInterface.py
Displays the op amp calculator
Dario Marroquin 18269 (dariomarroquin)
Pablo Ruiz 18259 (PingMaster99)
Version 1.0
Updated March 4, 2020
"""
from tkinter import *
from CalculationsModule import *
import matplotlib.pyplot as plt
import numpy as np
# Constants
TITLE_SIZE = 15
def calculate():
"""
Performs the op amp calculator calculations
"""
plt.clf()
inverter = int(opa.get()) == 1
point = vin.get()
try:
point = float(point)
except ValueError:
vin.delete(0, END)
point = None
# Needed data
populate_calculations()
function, result, real_value = calculate_opamp_function(point, inverter)
spline_result, spline_print = calculate_opamp_spline(point)
error = calculate_error(point, result, inverter)
spline_error = calculate_error(point, spline_result, inverter)
# Error comparison
print("Error mรญnimo cuadrado:", error, "%\nError trazadores cรบbicos: ", spline_error, "%\n\nTrazadores:\n",
spline_print, "\n\n")
if type(result) is not str:
str(round(result, 4))
if type(error) is not str:
error = str(round(error, 4)) + " %"
if function[0] > 0:
a0 = "+ " + str(round(function[0], 4))
elif function[0] < 0:
a0 = "- " + str(round(function[0], 4))[1:]
else:
a0 = ""
result_funcion["text"] = f"{round(function[1], 4)} * Vin {a0}"
result_ev["text"] = result
result_err["text"] = error
x_1 = np.linspace(0, 20)
y_1 = x_1 * real_value
y_2 = x_1 * function[1] + function[0]
# Results graph
plt.plot(x_1, y_1, label="Teรณrico")
plt.plot(x_1, y_2, label="Experimental")
plt.legend()
plt.title("Funciรณn teรณrica y experimental")
plt.xlabel("Vin")
plt.ylabel("Vout")
plt.show()
"""
GUI window with grid layout
"""
window = Tk()
window.columnconfigure(0, minsize=100)
window.columnconfigure(1, minsize=100)
window.columnconfigure(2, minsize=100)
window.columnconfigure(3, minsize=100)
window.columnconfigure(4, minsize=100)
window.columnconfigure(5, minsize=100)
window.columnconfigure(6, minsize=100)
window.columnconfigure(7, minsize=50)
window.rowconfigure(0, minsize=30)
window.rowconfigure(1, minsize=30)
window.rowconfigure(2, minsize=30)
window.rowconfigure(3, minsize=30)
window.rowconfigure(4, minsize=30)
window.rowconfigure(5, minsize=30)
window.rowconfigure(6, minsize=30)
window.rowconfigure(7, minsize=30)
"""
Titles
"""
title = Label(window, text="Calculadora de Op amps", bg="#595358", fg="white")
title.config(font=("Arial", 20))
title.grid(column=0, row=0, columnspan=8, sticky="we")
"""
Input
"""
vin = Entry(window, font="Arial 20")
vin.grid(row=1, column=4)
vin_title = Label(window, text="Vin", bg="#3891A6", fg="BLACK")
vin_title.config(font=("Arial", TITLE_SIZE))
vin_title.grid(row=1, column=3)
"""
RadioButton
"""
opa = StringVar(window, True)
# Dictionary to create multiple buttons
radio = {"Opamp Amplificador Inversor": True,
"Opamp Amplificador no inversor": False,
}
# Loop is used to create multiple Radiobuttons
# rather than creating each button separately
for (text, value) in radio.items():
Radiobutton(window, text=text, variable=opa, value=value).grid(columnspan=2, pady=(1, 0))
"""
Buttons
"""
calculate_button = Button(window, text="Calcular", padx=20, pady=10, command=calculate, bg="#99c24d")
calculate_button.config(font=("Arial", 15))
calculate_button.grid(row=2, column=6)
"""
Results
"""
result_funcion = Label(window)
result_funcion.grid(row=2, column=4)
rsf_title = Label(window, text="Funciรณn", bg="#3891A6", fg="BLACK")
rsf_title.config(font=("Arial", TITLE_SIZE))
rsf_title.grid(row=2, column=3)
result_ev = Label(window)
result_ev.grid(row=3, column=4)
rsev_title = Label(window, text="Vout", bg="#3891A6", fg="BLACK")
rsev_title.config(font=("Arial", TITLE_SIZE))
rsev_title.grid(row=3, column=3)
result_err = Label(window)
result_err.grid(row=4, column=4)
rserr_title = Label(window, text="Error (%)", bg="#3891A6", fg="BLACK")
rserr_title.config(font=("Arial", TITLE_SIZE))
rserr_title.grid(row=4, column=3)
"""
Circuit picture
"""
photo = PhotoImage(file=r"./OPAMPS.png")
image = Button(window, image=photo, padx=0, pady=0)
image.config(height=200, width=500)
image.grid(row=6, column=1, columnspan=5, pady=(0, 20))
"""
Window display
"""
window.geometry("980x500")
window.config(bg="#B2CEDE")
window.mainloop()
|
PingMaster99/MNOpampCalculator
|
GraphicInterface.py
|
GraphicInterface.py
|
py
| 4,699 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23660254288
|
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
""" A Python logging library with super powers """
import sys
import textwrap
from os import getcwd, path as p
from argparse import RawTextHelpFormatter, ArgumentParser
from pickle import dump, load
from io import open
from functools import partial, lru_cache
from signal import signal, SIGINT
import pygogo as gogo
from dateutil.parser import parse as parse_date
from chakula import tail, __version__
from chakula.formatter import PLACEHOLDERS, Formatter
try:
from redisworks import Root as OldRoot
except ImportError:
OldRoot = object
DEF_TIME_FMT = '%Y/%m/%d %H:%M:%S'
DEF_INTERVAL = '300s'
CURDIR = p.basename(getcwd())
LOGFILE = '%s.log' % CURDIR
FIELDS = sorted(PLACEHOLDERS)
logger = gogo.Gogo(__name__, monolog=True).logger
examples = r'''
Format specifiers must have one the following forms:
%%(placeholder)[flags]s
{placeholder:flags}
Examples:
%(prog)s <url>
echo '<url>' | %(prog)s --reverse
%(prog)s -s pubdate -s title -s author <url1> <url2> <url3>
%(prog)s --interval 60s --newer "2011/12/20 23:50:12" <url>
%(prog)s --format '%%(timestamp)-30s %%(title)s\n' <url>
%(prog)s --format '%%(title)s was written on %%(pubdate)s\n' <url>
%(prog)s --format '{timestamp:<30} {title} {author}\n' <url>
%(prog)s --format '{timestamp:<20} {pubdate:^30} {author:>30}\n' <url>
%(prog)s --time-format '%%Y/%%m/%%d %%H:%%M:%%S' <url>
%(prog)s --time-format 'Day of the year: %%j Month: %%b' <url>
Useful flags in this context are:
%%(placeholder)-10s - left align and pad
%%(placeholder)10s - right align and pad
{placeholder:<10} - left align and pad
{placeholder:>10} - right align and pad
{placeholder:^10} - center align and pad
'''
available = textwrap.wrap('Available fields: {}'.format(', '.join(FIELDS)))
epilog = [textwrap.dedent(examples)] + available
def timespec(value):
"""Parse the 'timespec' option:
>>> timespec(1)
1
>>> timespec('5m')
300
>>> timespec('1h')
3600
"""
try:
return int(value)
except ValueError:
multiply = {'s': 1, 'm': 60, 'h': 3600}
suffix = value[-1]
msg = 'invalid timespec value {} - hint: 60, 60s, 1m, 1h'
if suffix in multiply:
try:
v = int(value[:-1])
return v * multiply[suffix]
except ValueError:
ValueError(msg.format(value))
else:
raise ValueError(msg.format(value))
parser = ArgumentParser(
description='description: Tails 1 or more rss feeds',
prog='chakula',
usage='%(prog)s [options] <url> [<url> ...]',
formatter_class=RawTextHelpFormatter,
epilog='\n'.join(epilog),
)
parser.add_argument(
dest='urls', nargs='*', default=[sys.stdin],
help='The urls to tail (default: reads from stdin).')
i_help = 'Number of seconds between polling (default: {}).'
parser.add_argument(
'-i', '--interval', action='store', help=i_help.format(DEF_INTERVAL),
type=timespec, default=DEF_INTERVAL)
parser.add_argument(
'-N', '--iterations', action='store', type=int,
help='Number of times to poll before quiting (default: inf).')
parser.add_argument(
'-I', '--initial', action='store', type=int,
help='Number of entries to show (default: all)')
parser.add_argument(
'-n', '--newer', metavar='DATE', action='store',
help='Date by which entries should be newer than')
parser.add_argument(
'-s', '--show', metavar='FIELD', choices=FIELDS, action='append',
help='Entry field to display (default: title).', default=[])
t_help = "The date/time format (default: 'YYYY/MM/DD HH:MM:SS')."
parser.add_argument(
'-t', '--time-format', metavar='FORMAT', action='store',
default=DEF_TIME_FMT, help=t_help)
parser.add_argument(
'-F', '--format', action='store',
help='The output format (overrides other format options).')
parser.add_argument(
'-c', '--cache', action='store',
help='File path to store feed information across multiple runs.')
parser.add_argument(
'-r', '--reverse', action='store_true',
help='Show entries in reverse order.')
parser.add_argument(
'-f', '--fail', action='store_true', help='Exit on error.')
parser.add_argument(
'-u', '--unique', action='store_true', help='Skip duplicate entries.')
parser.add_argument(
'-H', '--heading', action='store_true', help='Show field headings.')
parser.add_argument(
'-v', '--version', help="Show version and exit.", action='store_true',
default=False)
parser.add_argument(
'-V', '--verbose', help='Increase output verbosity.', action='store_true',
default=False)
class Root(OldRoot):
def __init__(self, conn, return_object=True, *args, **kwargs):
super(Root, self).__init__(*args, **kwargs)
self.red = conn
self.return_object = return_object
self.setup()
get_root = lru_cache(maxsize=8)(lambda conn: Root(conn))
def sigint_handler(signal=None, frame=None):
logger.info('\nquitting...\n')
sys.exit(0)
def update_cache(path, extra, redis=False):
if redis:
root = get_root(path)
try:
items = extra.__dict__['_registry'].evaluated_items
except AttributeError:
root.extra = extra
else:
root.extra = items['root.extra']
return root.red
else:
with open(path, 'wb') as f:
dump(extra, f)
return path
def load_extra(path, redis=False):
if redis:
root = get_root(path)
extra = root.extra or {}
for k, v in extra.items():
v['updated'] = tuple(v.get('updated') or [])
v['modified'] = tuple(v.get('modified') or [])
else:
try:
with open(path, 'rb') as f:
extra = load(f)
except FileNotFoundError:
extra = {}
return extra
def run():
"""CLI runner"""
args = parser.parse_args()
kwargs = {'monolog': True, 'verbose': args.verbose}
logger = gogo.Gogo(__name__, **kwargs).get_logger('run')
signal(SIGINT, sigint_handler)
if args.version:
logger.info('chakula v%s' % __version__)
exit(0)
if args.newer:
newer = parse_date(args.newer).timetuple()
logger.debug('showing entries newer than %s', newer)
else:
newer = None
if args.format:
fmt = args.format.replace('\\n', '\n')
formatter = Formatter(fmt, args.time_format)
else:
show = args.show or ['title']
pargs = (show, args.time_format, args.heading)
formatter = Formatter.from_fields(*pargs)
logger.debug('using format: %r', formatter.fmt)
logger.debug('using time format: %r', formatter.time_fmt)
info = {
'seen': set() if args.unique else None, 'newer': newer,
'reverse': args.reverse, 'iterations': args.iterations,
'interval': args.interval, 'formatter': formatter,
'initial': args.initial, 'logger': logger, 'fail': args.fail}
first = args.urls[0]
if hasattr(first, 'isatty') and first.isatty(): # called with no args
# This doesn't work for scripttest though
parser.print_help()
sys.exit(0)
elif hasattr(first, 'read'): # piped into sdtin
urls = first.read().splitlines()
else:
urls = args.urls
if args.cache:
extra = load_extra(args.cache)
info['tail_handler'] = partial(update_cache, args.cache)
else:
extra = {}
tail(urls, extra=extra, **info)
sys.exit(0)
if __name__ == '__main__':
run()
|
reubano/chakula
|
chakula/main.py
|
main.py
|
py
| 7,603 |
python
|
en
|
code
| null |
github-code
|
6
|
74535524027
|
from django.conf.urls import url
from . import views
app_name = 'api'
urlpatterns = [
url(r'^device/',views.device,name='api_device'),
url(r'^light/',views.light,name='api_light'),
url(r'^temperature/',views.temperature,name='api_temperature'),
url(r'^humidity/',views.humidity,name='api_humidity'),
url(r'^dirt_humidity/',views.dirt_humidity,name='api_dirt_humidity'),
url(r'^fertilization/',views.fertilization,name='api_fertilization'),
url(r'^water/',views.water,name='api_water'),
url(r'^schedule/',views.schedule,name='api_schedule'),
url(r'^user/',views.user,name='api_user'),
url(r'^.*', views.noSuchApi, name='api_no_such_api'),
]
|
CreeperSan/Graduation-Project
|
Web/field/api/urls.py
|
urls.py
|
py
| 699 |
python
|
en
|
code
| 50 |
github-code
|
6
|
29546342820
|
import graph
import unittest
class VertexColor:
"""
When doing a DFS, any node is in one of three states:
1. before being visited
2. during recursively visiting its descendants
3. after all its descendants have been visited and the recursion has backtracked from the vertex
"""
WHITE = 1 # State 1
GREY = 2 # State 2
BLACK = 3 # State 3
class ArticulatePoints:
def __init__(self, g):
"""
:param g: Graph Object
"""
self.g = g
self.d = [0] * g.V
# self.d[v] is the time when a vertex is discovered by a dfs (before visiting its descendants)
self.vertexColor = [VertexColor.WHITE] * g.V
self.low = [0] * g.V
# self.low[v] = min{d[v], d[w] : (u,w) is a back edge for some descendents u of v}
# So, low(v) is the discovery time of the vertex closest to the
# root and reachable from v by following zero or more edges
# downward, and then at most one back edge in a DFS tree
self.cnt = 0 # a increasing counter, increase by 1 when dfs visiting a node never visited before
self.is_articulate = [False] * g.V
self.dfs(-1, 0)
def __dfs(self, u, v):
"""A sample dfs with d and color traced.
:param u: the parent of v, None if v is the starting point of the dfs
:param v: a vertex
Note: we can also use parent[v] to keeps the record of parent of each vertex
"""
self.cnt += 1
self.vertexColor[v] = VertexColor.GREY
self.d[v] = self.cnt
for w in self.g.adj(v):
if self.vertexColor[w] == VertexColor.WHITE:
self.__dfs(v, w)
elif w != u: # is a back-edge but not incident with the parent of v
# self.vertexColor[w] can be VertexColor.GREY or VertexColor.Black
pass
self.vertexColor[v] = VertexColor.BLACK
def dfs(self, u, v):
"""Check if vertex v is articulate and update self.is_articulate[v]
:param u: the parent of v, None if v is the starting point of the dfs
:param v: a vertex
"""
self.cnt += 1
self.vertexColor[v] = VertexColor.GREY
self.d[v] = self.cnt
self.low[v] = self.cnt
childCount = 0
for w in self.g.adj(v):
if w == u:
continue
if self.vertexColor[w] == VertexColor.WHITE:
childCount += 1
self.dfs(v, w)
if self.d[v] <= self.low[w] and u != -1: # v not the root
self.is_articulate[v] = True
for w in self.g.adj(v):
if w == u:
continue
self.low[v] = min(self.low[w], self.low[v])
self.vertexColor[v] = VertexColor.BLACK
if u == -1 and childCount > 1:
self.is_articulate[v] = True
# root of DFS is an articulation point if it has more than 1 child
def is_articulate(self, v):
"""
:param v: vertex
:return: return true if the vertex v is articulate
"""
return self.is_articulate[v]
def get_articulate_vertices(self):
"""Suppose v is a non-root vertex of the DFS tree T,
Then v is an articulation point of G if and only if there is a child w of v
in DFS Tree T (Not in original Tree) with low(w) >= d[v]
Note: A point in a graph is called an Articulation Point or Cut-Vertex if upon removing that point let's say P,
there is at least one child(C) of it(P) , that is disconnected from the whole graph.
In other words at least one of P's child C cannot find a "back edge".
:return: a list of articulation vertices
"""
res = []
# if len(self.g.adj(0)) == 1:
# self.is_articulate[0] = False
for i, v in enumerate(self.is_articulate):
if self.is_articulate[i]:
res.append(i)
return res
class TestSolution(unittest.TestCase):
def test_1(self):
g = graph.Graph.import_graph("input/1.txt")
s = ArticulatePoints(g)
assert list(s.get_articulate_vertices()) == [2, 3, 6]
def test_2(self):
g = graph.Graph.import_graph("input/2.txt")
s = ArticulatePoints(g)
assert sorted(s.get_articulate_vertices()) == [2, 3, 5, 6]
def test_3(self):
g = graph.Graph.import_graph("input/3.txt")
s = ArticulatePoints(g)
assert sorted(s.get_articulate_vertices()) == [0, 2, 4, 5]
unittest.main()
|
HeliWang/upstream
|
Graph/UndirectedDFS/find-articulate-points.py
|
find-articulate-points.py
|
py
| 4,563 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18481267232
|
import math
import time
if __name__ == '__main__':
start = time.time()
entries = [i.strip().split(',')
for i in open('Data/p099_base_exp.txt').readlines()]
max_val = 0
max_index = 0
for index, entry in enumerate(entries):
val = int(entry[1]) * math.log(int(entry[0]))
if val > max_val:
max_val = val
max_index = index + 1
print(max_index)
print("Calculated in:", time.time() - start)
|
BreadBug007/Project-Euler
|
Prob_99.py
|
Prob_99.py
|
py
| 433 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73580429947
|
import torch
import torchvision
import torchvision.datasets as datasets
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import math
def convert(imgf, labelf, outf, n):
f = open(imgf, "rb")
o = open(outf, "w")
l = open(labelf, "rb")
f.read(16)
l.read(8)
images = []
for i in range(n+1):
image = [ord(l.read(1))]
for j in range(28*28):
image.append(ord(f.read(1)))
images.append(image)
for image in images:
o.write(",".join(str(pix) for pix in image)+"\n")
f.close()
o.close()
l.close()
def visualize(index: int):
plt.title((train_labels[index]))
plt.imshow(train_data[index].reshape(28, 28), cmap=cm.binary)
plt.show()
def check_count_of_each_label():
y_value = np.zeros((1, 10))
for i in range(10):
print("Occurence of ", i, "=", np.count_nonzero(train_labels == i))
y_value[0, i-1] = np.count_nonzero(train_labels == i)
y_value = y_value.ravel()
x_value = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
plt.xlabel('label')
plt.ylabel('count')
plt.bar(x_value, y_value, 0.7, color='g')
plt.show()
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
def sigmoid_backward(dA, cache):
Z = cache
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
def softmax_backward(Z, cache):
Z = cache
length = 10
dZ = np.zeros((42000, 10))
Z = np.transpose(Z)
for row in range(0, 42000):
den = (np.sum(np.exp(Z[row, :])))*(np.sum(np.exp(Z[row, :])))
for col in range(0, 10):
sums = 0
for j in range(0, 10):
if (j != col):
sums = sums+(math.exp(Z[row, j]))
dZ[row, col] = (math.exp(Z[row, col])*sums)/den
dZ = np.transpose(dZ)
Z = np.transpose(Z)
assert (dZ.shape == Z.shape)
return dZ
# initializing the parameters weights and bias
def initialize_parameters_deep(layer_dims):
# np.random.seed(1)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.zeros(layer_dims[l],
layer_dims[l-1]) / np.sqrt(layer_dims[l-1]) # *0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
return parameters
# forward propagation
def linear_forward(A, W, b):
Z = np.dot(W, A) + b
cache = (A, W, b)
assert (Z.shape == (W.shape[0], A.shape[1]))
return Z, cache
def linear_activation_forward(A_prev, W, b, activation):
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
# print("Z="+str(Z))
A, activation_cache = relu(Z)
elif activation == "softmax":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = softmax(Z)
cache = (linear_cache, activation_cache)
return A, cache
def L_model_forward(X, parameters):
caches = []
A = X
# number of layers in the neural network
L = len(parameters) // 2
for l in range(1, L):
A_prev = A
A, cache = linear_activation_forward(
A_prev, parameters['W' + str(l)], parameters['b' + str(l)], activation="relu")
caches.append(cache)
AL, cache = linear_activation_forward(
A, parameters['W' + str(L)], parameters['b' + str(L)], activation="softmax")
caches.append(cache)
return AL, caches
# cost function
def compute_cost(AL, Y):
m = Y.shape[1]
cost = (-1) * np.sum(np.multiply(Y, np.log(AL)))
# np.multiply(1 - Y, np.log(1 - AL)))
# print("cost="+str(cost))
return cost
# backward propagation
def linear_backward(dZ, cache):
A_prev, W, b = cache
m = A_prev.shape[1]
dW = 1./m * np.dot(dZ, A_prev.T)
db = (1/m)*np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(W.T, dZ)
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
linear_cache, activation_cache = cache
if activation == "relu":
# dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "softmax":
dZ = softmax_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def L_model_backward(AL, Y, caches):
grads = {}
L = len(caches) # the number of layers
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
M = len(layers_dims)
current_cache = caches[M-2]
grads["dA"+str(M-1)], grads["dW"+str(M-1)], grads["db"+str(M-1)
] = linear_activation_backward(dAL, current_cache, activation="softmax") # M-1
for l in reversed(range(L-1)):
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(
grads["dA" + str(l + 2)], current_cache, activation="relu")
grads["dA" + str(l + 1)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
# upgrade function for weights and bias
def update_parameters(parameters, grads, learning_rate):
for l in range(len_update-1):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - \
(learning_rate*grads["dW" + str(l+1)])
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - \
(learning_rate*grads["db" + str(l+1)])
return parameters
def plot_graph(cost_plot):
x_value = list(range(1, len(cost_plot)+1))
# print(x_value)
# print(cost_plot)
plt.xlabel('iteration')
plt.ylabel('cost')
plt.plot(x_value, cost_plot, 0., color='g')
def L_layer_model(X, Y, layers_dims, learning_rate, num_iterations, print_cost=False): # lr was 0.009
print("training...")
costs = []
cost_plot = np.zeros(num_iterations)
parameters = initialize_parameters_deep(layers_dims)
for i in range(0, num_iterations):
AL, caches = L_model_forward(X, parameters)
cost = compute_cost(AL, Y)
grads = L_model_backward(AL, Y, caches)
parameters = update_parameters(parameters, grads, learning_rate)
cost_plot[i] = cost
plot_graph(cost_plot)
return parameters
if __name__ == "__main__":
mnist = datasets.MNIST(
root='./data', download=True)
train = pd.DataFrame()
test = pd.DataFrame()
if os.path.exists('./data/MNIST/raw/mnist_train.csv'):
train = pd.read_csv("./data/MNIST/raw/mnist_train.csv")
else:
convert("./data/MNIST/raw/train-images-idx3-ubyte", "./data/MNIST/raw/train-labels-idx1-ubyte",
"./data/MNIST/raw/mnist_train.csv", 60000)
train = pd.read_csv("./data/MNIST/raw/mnist_train.csv")
if os.path.exists('./data/MNIST/raw/mnist_test.csv'):
test = pd.read_csv("./data/MNIST/raw/mnist_test.csv")
else:
convert("./data/MNIST/raw/t10k-images-idx3-ubyte", "./data/MNIST/raw/t10k-labels-idx1-ubyte",
"./data/MNIST/raw/mnist_test.csv", 10000)
test = pd.read_csv("./data/MNIST/raw/mnist_test.csv")
train_labels = np.array(train.loc[:, 'label'])
train_data = np.array(train.loc[:, train.columns != 'label'])
# visualize(0)
# check_count_of_each_label(train_labels)
# d = train_data.shape[1]
# d1 = 300
# # Shape of W1 is given by d1 * d where d1 is 300 and d is given by 784
# W1 = np.zeros((d1, d))
# # print(W1.shape)
# x1 = train_data[0]
# # print(x1.shape, x1)
# z1 = np.dot(W1, x1)
# # print(z1.shape, z1)
# a1 = sigmoid(z1)
# print('After sigmmoid activation shape is', a1.shape)
# W2 = np.zeros((10, d1))
# z2 = np.dot(W2, a1)
# # print(z2, z2.shape)
# y_pred = softmax(z2)
# # print(y_pred.shape)
# y_actual = train_labels[0]
# one_hot = np.zeros(10)
# one_hot[y_actual] = 1
# print(y_pred, one_hot)
# loss = - np.dot(one_hot, np.log(y_pred))
# print(loss)
###############################
train_data = np.reshape(train_data, [784, 60000])
train_label = np.zeros((10, 60000))
for col in range(60000):
val = train_labels[col]
for row in range(10):
if (val == row):
train_label[val, col] = 1
print("train_data shape="+str(np.shape(train_data)))
print("train_label shape="+str(np.shape(train_label)))
# n-layer model (n=3 including input and output layer)
layers_dims = [784, 300, 10]
len_update = len(layers_dims)
parameters = L_layer_model(train_data, train_label, layers_dims,
learning_rate=0.0005, num_iterations=35, print_cost=True)
print("training done")
|
smit-1999/NaiveBayes
|
nn.py
|
nn.py
|
py
| 9,372 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38762159073
|
import random
def aloitus() -> list:
'''Tulostaa alkutervehdykset, palauttaa pelaajien nimet listana'''
print("Heippa! Pelataan Yatzya!")
print()
players = int(input("Kuinka monta pelaajaa on mukana? (max 4): "))
pelaajat = []
i = 1
while i <= players:
name = input(f"{i}. pelaajan nimi? ")
pelaajat.append(name)
i += 1
return pelaajat
def luo_pistetaulukko(nimet: list) -> dict:
'''Saa arvoksi pelaajien nimet ja palauttaa sanakirjan, jossa nimi (avain) ja aloituspisteet'''
pisteet = {}
for i in nimet:
pisteet
pisteet[i] = [0, 0, 0, 0, 0, 0, 0]
return pisteet
def heita_kaikki() -> list:
'''Antaa 5 satunnaista numeroa vรคliltรค 1-6 listana ja tulostaa tulokset nรคytรถlle'''
for j in range(5):
a = random.choice(range(1, 7))
arvot.append(a)
print(f"Saamasi silmรคluvut ovat:")
print(f"1. noppa: {arvot[0]}")
print(f"2. noppa: {arvot[1]}")
print(f"3. noppa: {arvot[2]}")
print(f"4. noppa: {arvot[3]}")
print(f"5. noppa: {arvot[4]}")
print(f"Eli {arvot}")
print()
return arvot
def heita_uudestaan(nopat: str):
'''Pelaajan antamille "nopille" arvotaan uudet arvot'''
for i in range(len(nopat)):
for j in nopat[i]:
j = int(j) - 1
arvot[j] = random.choice(range(1, 7))
print()
print(f"Nyt silmรคlukusi ovat siis {arvot}")
print()
def tallenna_tulokset(tulokset: list):
'''Tallentaa vuoron lopuksi pelaajan pisteet pistetaulukkoon'''
print()
tulosta_pistetaulukko(x)
osa = int(input("Mihin kohtaan haluaisit tallentaa pisteesi? "))
if osa in (1,2,3,4,5,6):
maara = osa * arvot.count(osa)
elif osa == 7:
if arvot[0] == arvot[1] == arvot[2] == arvot[3] == arvot[4]:
maara = 50
else:
maara = 0
lista = pistetaulu[x]
lista[osa-1] = maara
print()
print("Nyt")
tulosta_pistetaulukko(x)
def tulosta_pistetaulukko(pelaaja: str):
'''Tulostaa annettua pelaajaa vastaavan pistetaulukon'''
print(f"Pelaajan {pelaaja} pistetaulukko:")
pisteet = pistetaulu[pelaaja]
print(f"1. Ykkรถset: {pisteet[0]}")
print(f"2. Kakkoset: {pisteet[1]}")
print(f"3. Kolmoset: {pisteet[2]}")
print(f"4. Neloset: {pisteet[3]}")
print(f"5. Vitoset: {pisteet[4]}")
print(f"6. Kutoset: {pisteet[5]}")
print(f"7. Yatzy: {pisteet[6]}")
print(f"Yhteensรค: {sum(pisteet)}")
def tallenna_peli():
'''Tallentaa pelaajan tulokset tiedostoon'''
with open("tulokset.txt", "a") as tiedosto:
tulos = ""
for x in osallistujat:
tulos = tulos + x + " pisteet: " + str(sum(pistetaulu[x])) + "\n"
tiedosto.write(tulos)
def vertaile_tuloksia():
'''Tulostaa ruudulle tiedostoon talletetut tulokset'''
for x in osallistujat:
print(f"Pelaajan {x} loppupisteet olivat {sum(pistetaulu[x])}")
#Kirjoitetaan tรคhรคn itse ohjelma:
osallistujat = aloitus()
pistetaulu = luo_pistetaulukko(osallistujat)
print()
for i in range(7):
for x in osallistujat:
print(f"Sinun vuorosi pelata, {x}.")
rolls = 3
print(f"Tรคllรค vuorolla heittoja on vielรค jรคljellรค {rolls}")
print()
ro11 = input("Heitรค nopat painamalla y. ")
if ro11 == "y" or ro11 == "Y":
arvot = []
heita_kaikki()
rolls = rolls - 1
while rolls >0:
print(f"Tรคllรค vuorolla heittoja on jรคljellรค {rolls}.")
print("Halutessasi voit heittรครค noppia uudelleen.")
print()
print("Anna uudelleen heitettรคvien noppien jรคrjestysnumerot (esim '134').")
print("0 - En heitรค uudelleen")
komento = input("Mitรค haluat siis tehdรค? ")
if komento == "0":
rolls = 0
else:
heita_uudestaan(komento)
rolls -= 1
tallenna_tulokset(arvot)
else:
print()
print("Et siis halua tehdรค vuoroasi,")
print("annetaanpa seuraavan pelaajan yrittรครค!")
print()
print()
print("Huh, nyt peli on viimein pelattu.")
vertaile_tuloksia()
print()
jatko = input("Haluatko tallentaa pisteet tiedostoon? (y/n) ")
if jatko == "y":
tallenna_peli()
print("Pisteet on nyt tallennettu")
print("Tรคssรค nykyiset pisteet!")
with open("tulokset.txt") as tiedosto:
for rivi in tiedosto:
print(rivi.strip())
print("Kiitos kun pelasit Yatzya!")
|
noorascode/MyFirstGame
|
Yatzy toimiva.py
|
Yatzy toimiva.py
|
py
| 4,742 |
python
|
fi
|
code
| 0 |
github-code
|
6
|
37009360389
|
"""
https://leetcode-cn.com/problems/regular-expression-matching/submissions/
ๆ่ทฏ๏ผ้ๅฝๆณ
2. ๅฆๆp[0] == {s[0], '.'}, ๅ้ๅฝp[1:], s[1:]
1. ๅฆๆlen(p) >= 2, p[1] == '*'ๅ๏ผ
A. ้ๅฝp[2:], s๏ผ ๅ่กจ็คบpๅๅ้ข็ๅญ็ฌฆๆชๅน้
B. ้ๅฝp, s[1:]๏ผๅ่กจ็คบ*ๅน้
ไบไธๆฌก๏ผ่ฟ่ก*็ไธไธๆฌกๅน้
"""
class Solution:
def isMatch(self, s: str, p: str) -> bool:
if not p:
return not s
first_match = bool(s) and p[0] in {s[0], '.'} # bool(s) means s not null
if len(p) >1 and p[1] == '*':
return (self.isMatch(s, p[2:])) or (first_match and self.isMatch(s[1:], p))
else:
return self.isMatch(s[1:], p[1:]) and first_match
s = Solution()
print(s.isMatch("aab", "c*a*b"))
|
wangluolin/Algorithm-Everyday
|
dp/10-Regular_Expression_Match.py
|
10-Regular_Expression_Match.py
|
py
| 775 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35619601544
|
from nltk.corpus import cmudict
words = cmudict.entries()
count = 0
for entry in words:
if len(entry[1]) > 1:
count += 1
# Percentage of words with more than one possible pronunciation
print(1.0 * count / len(words))
|
hmly/nlp-solutions
|
c-02/2-12_cmudict.py
|
2-12_cmudict.py
|
py
| 231 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2116122344
|
"""
A command line interface to the qcfractal server.
"""
import argparse
import signal
import logging
from enum import Enum
from math import ceil
from typing import List, Optional
import tornado.log
import qcengine as qcng
import qcfractal
from pydantic import BaseModel, BaseSettings, validator, Schema
from . import cli_utils
from ..interface.util import auto_gen_docs_on_demand
__all__ = ["main"]
QCA_RESOURCE_STRING = '--resources process=1'
logger = logging.getLogger("qcfractal.cli")
class SettingsCommonConfig:
env_prefix = "QCA_"
case_insensitive = True
extra = "forbid"
class AdapterEnum(str, Enum):
dask = "dask"
pool = "pool"
parsl = "parsl"
class CommonManagerSettings(BaseSettings):
"""
The Common settings are the settings most users will need to adjust regularly to control the nature of
task execution and the hardware under which tasks are executed on. This block is often unique to each deployment,
user, and manager and will be the most commonly updated options, even as config files are copied and reused, and
even on the same platform/cluster.
"""
adapter: AdapterEnum = Schema(
AdapterEnum.pool,
description="Which type of Distributed adapter to run tasks through."
)
tasks_per_worker: int = Schema(
1,
description="Number of concurrent tasks to run *per Worker* which is executed. Total number of concurrent "
"tasks is this value times max_workers, assuming the hardware is available. With the "
"pool adapter, and/or if max_workers=1, tasks_per_worker *is* the number of concurrent tasks."
)
cores_per_worker: int = Schema(
qcng.config.get_global("ncores"),
description="Number of cores to be consumed by the Worker and distributed over the tasks_per_worker. These "
"cores are divided evenly, so it is recommended that quotient of cores_per_worker/tasks_per_worker "
"be a whole number else the core distribution is left up to the logic of the adapter. The default "
"value is read from the number of detected cores on the system you are executing on.",
gt=0
)
memory_per_worker: float = Schema(
qcng.config.get_global("memory"),
description="Amount of memory (in GB) to be consumed and distributed over the tasks_per_worker. This memory is "
"divided evenly, but is ultimately at the control of the adapter. Engine will only allow each of "
"its calls to consume memory_per_worker/tasks_per_worker of memory. Total memory consumed by this "
"manager at any one time is this value times max_workers. The default value is read "
"from the amount of memory detected on the system you are executing on.",
gt=0
)
max_workers: int = Schema(
1,
description="The maximum number of Workers which are allowed to be run at the same time. The total number of "
"concurrent tasks will maximize at this quantity times tasks_per_worker."
"The total number "
"of Jobs on a cluster which will be started is equal to this parameter in most cases, and should "
"be assumed 1 Worker per Job. Any exceptions to this will be documented. "
"In node exclusive mode this is equivalent to the maximum number of nodes which you will consume. "
"This must be a positive, non zero integer.",
gt=0
)
retries: int = Schema(
2,
description="Number of retries that QCEngine will attempt for RandomErrors detected when running "
"its computations. After this many attempts (or on any other type of error), the "
"error will be raised.",
ge=0
)
scratch_directory: Optional[str] = Schema(
None,
description="Scratch directory for Engine execution jobs."
)
verbose: bool = Schema(
False,
description="Turn on verbose mode or not. In verbose mode, all messages from DEBUG level and up are shown, "
"otherwise, defaults are all used for any logger."
)
class Config(SettingsCommonConfig):
pass
auto_gen_docs_on_demand(CommonManagerSettings)
class FractalServerSettings(BaseSettings):
"""
Settings pertaining to the Fractal Server you wish to pull tasks from and push completed tasks to. Each manager
supports exactly 1 Fractal Server to be in communication with, and exactly 1 user on that Fractal Server. These
can be changed, but only once the Manager is shutdown and the settings changed. Multiple Managers however can be
started in parallel with each other, but must be done as separate calls to the CLI.
Caution: The password here is written in plain text, so it is up to the owner/writer of the configuration file
to ensure its security.
"""
fractal_uri: str = Schema(
"localhost:7777",
description="Full URI to the Fractal Server you want to connect to"
)
username: Optional[str] = Schema(
None,
description="Username to connect to the Fractal Server with. When not provided, a connection is attempted "
"as a guest user, which in most default Servers will be unable to return results."
)
password: Optional[str] = Schema(
None,
description="Password to authenticate to the Fractal Server with (alongside the `username`)"
)
verify: Optional[bool] = Schema(
None,
description="Use Server-side generated SSL certification or not."
)
class Config(SettingsCommonConfig):
pass
auto_gen_docs_on_demand(FractalServerSettings)
class QueueManagerSettings(BaseSettings):
"""
Fractal Queue Manger settings. These are options which control the setup and execution of the Fractal Manager
itself.
"""
manager_name: str = Schema(
"unlabeled",
description="Name of this scheduler to present to the Fractal Server. Descriptive names help the server "
"identify the manager resource and assists with debugging."
)
queue_tag: Optional[str] = Schema(
None,
description="Only pull tasks from the Fractal Server with this tag. If not set (None/null), then pull untagged "
"tasks, which should be the majority of tasks. This option should only be used when you want to "
"pull very specific tasks which you know have been tagged as such on the server. If the server has "
"no tasks with this tag, no tasks will be pulled (and no error is raised because this is intended "
"behavior)."
)
log_file_prefix: Optional[str] = Schema(
None,
description="Full path to save a log file to, including the filename. If not provided, information will still "
"be reported to terminal, but not saved. When set, logger information is sent both to this file "
"and the terminal."
)
update_frequency: float = Schema(
30,
description="Time between heartbeats/update checks between this Manager and the Fractal Server. The lower this "
"value, the shorter the intervals. If you have an unreliable network connection, consider "
"increasing this time as repeated, consecutive network failures will cause the Manager to shut "
"itself down to maintain integrity between it and the Fractal Server. Units of seconds",
gt=0
)
test: bool = Schema(
False,
description="Turn on testing mode for this Manager. The Manager will not connect to any Fractal Server, and "
"instead submits netsts worth trial tasks per quantum chemistry program it finds. These tasks are "
"generated locally and do not need a running Fractal Server to work. Helpful for ensuring the "
"Manager is configured correctly and the quantum chemistry codes are operating as expected."
)
ntests: int = Schema(
5,
description="Number of tests to run if the `test` flag is set to True. Total number of tests will be this "
"number times the number of found quantum chemistry programs. Does nothing if `test` is False."
"If set to 0, then this submits no tests, but it will run through the setup and client "
"initialization.",
gt=-1
)
max_queued_tasks: Optional[int] = Schema(
None,
description="Generally should not be set. Number of tasks to pull from the Fractal Server to keep locally at "
"all times. If `None`, this is automatically computed as "
"`ceil(common.tasks_per_worker*common.max_workers*2.0) + 1`. As tasks are completed, the "
"local pool is filled back up to this value. These tasks will all attempt to be run concurrently, "
"but concurrent tasks are limited by number of cluster jobs and tasks per job. Pulling too many of "
"these can result in under-utilized managers from other sites and result in less FIFO returns. As "
"such it is recommended not to touch this setting in general as you will be given enough tasks to "
"fill your maximum throughput with a buffer (assuming the queue has them).",
gt=0
)
auto_gen_docs_on_demand(QueueManagerSettings)
class SchedulerEnum(str, Enum):
slurm = "slurm"
pbs = "pbs"
sge = "sge"
moab = "moab"
lsf = "lsf"
class AdaptiveCluster(str, Enum):
static = "static"
adaptive = "adaptive"
class ClusterSettings(BaseSettings):
"""
Settings tied to the cluster you are running on. These settings are mostly tied to the nature of the cluster
jobs you are submitting, separate from the nature of the compute tasks you will be running within them. As such,
the options here are things like wall time (per job), which Scheduler your cluster has (like PBS or SLURM),
etc. No additional options are allowed here.
"""
node_exclusivity: bool = Schema(
False,
description="Run your cluster jobs in node-exclusivity mode. This option may not be available to all scheduler "
"types and thus may not do anything. Related to this, the flags we have found for this option "
"may not be correct for your scheduler and thus might throw an error. You can always add the "
"correct flag/parameters to the `scheduler_options` parameter and leave this as False if you "
"find it gives you problems."
)
scheduler: SchedulerEnum = Schema(
None,
description="Option of which Scheduler/Queuing system your cluster uses. Note: not all scheduler options are "
"available with every adapter."
)
scheduler_options: List[str] = Schema(
[],
description="Additional options which are fed into the header files for your submitted jobs to your cluster's "
"Scheduler/Queuing system. The directives are automatically filled in, so if you want to set "
"something like '#PBS -n something', you would instead just do '-n something'. Each directive "
"should be a separate string entry in the list. No validation is done on this with respect to "
"valid directives so it is on the user to know what they need to set."
)
task_startup_commands: List[str] = Schema(
[],
description="Additional commands to be run before starting the Workers and the task distribution. This can "
"include commands needed to start things like conda environments or setting environment variables "
"before executing the Workers. These commands are executed first before any of the distributed "
"commands run and are added to the batch scripts as individual commands per entry, verbatim."
)
walltime: str = Schema(
"06:00:00",
description="Wall clock time of each cluster job started. Presented as a string in HH:MM:SS form, but your "
"cluster may have a different structural syntax. This number should be set high as there should "
"be a number of Fractal tasks which are run for each submitted cluster job. Ideally, the job "
"will start, the Worker will land, and the Worker will crunch through as many tasks as it can; "
"meaning the job which has a Worker in it must continue existing to minimize time spend "
"redeploying Workers."
)
adaptive: AdaptiveCluster = Schema(
AdaptiveCluster.adaptive,
description="Whether or not to use adaptive scaling of Workers or not. If set to 'static', a fixed number of "
"Workers will be started (and likely *NOT* restarted when the wall clock is reached). When set to "
"'adaptive' (the default), the distributed engine will try to adaptively scale the number of "
"Workers based on tasks in the queue. This is str instead of bool type variable in case more "
"complex adaptivity options are added in the future."
)
class Config(SettingsCommonConfig):
pass
@validator('scheduler', 'adaptive', pre=True)
def things_to_lcase(cls, v):
return v.lower()
auto_gen_docs_on_demand(ClusterSettings)
class SettingsBlocker(BaseSettings):
"""Helper class to auto block certain entries, overwrite hidden methods to access"""
_forbidden_set = set()
_forbidden_name = "SettingsBlocker"
def __init__(self, **kwargs):
"""
Enforce that the keys we are going to set remain untouched. Blocks certain keywords for the classes
they will be fed into, not whatever Fractal is using as keywords.
"""
bad_set = set(kwargs.keys()) & self._forbidden_set
if bad_set:
raise KeyError("The following items were set as part of {}, however, "
"there are other config items which control these in more generic "
"settings locations: {}".format(self._forbidden_name, bad_set))
super().__init__(**kwargs)
class Config(SettingsCommonConfig):
# This overwrites the base config to allow other keywords to be fed in
extra = "allow"
class DaskQueueSettings(SettingsBlocker):
"""
Settings for the Dask Cluster class. Values set here are passed directly into the Cluster objects based on the
`cluster.scheduler` settings. Although many values are set automatically from other settings, there are
some additional values such as `interface` and `extra` which are passed through to the constructor.
Valid values for this field are functions of your cluster.scheduler and no linting is done ahead of trying to pass
these to Dask.
NOTE: The parameters listed here are a special exception for additional features Fractal has engineered or
options which should be considered for some of the edge cases we have discovered. If you try to set a value
which is derived from other options in the YAML file, an error is raised and you are told exactly which one is
forbidden.
Please see the docs for the provider for more information.
"""
interface: Optional[str] = Schema(
None,
description="Name of the network adapter to use as communication between the head node and the compute node."
"There are oddities of this when the head node and compute node use different ethernet adapter "
"names and we have not figured out exactly which combination is needed between this and the "
"poorly documented `ip` keyword which appears to be for Workers, but not the Client."
)
extra: Optional[List[str]] = Schema(
None,
description="Additional flags which are fed into the Dask Worker CLI startup, can be used to overwrite "
"pre-configured options. Do not use unless you know exactly which flags to use."
)
lsf_units: Optional[str] = Schema(
None,
description="Unit system for an LSF cluster limits (e.g. MB, GB, TB). If not set, the units are "
"are attempted to be set from the `lsf.conf` file in the default locations. This does nothing "
"if the cluster is not LSF"
)
_forbidden_set = {"name", "cores", "memory", "processes", "walltime", "env_extra", "qca_resource_string"}
_forbidden_name = "dask_jobqueue"
auto_gen_docs_on_demand(DaskQueueSettings)
class ParslExecutorSettings(SettingsBlocker):
"""
Settings for the Parsl Executor class. This serves as the primary mechanism for distributing Workers to jobs.
In most cases, you will not need to set any of these options, as several options are automatically inferred
from other settings. Any option set here is passed through to the HighThroughputExecutor class of Parsl.
https://parsl.readthedocs.io/en/latest/stubs/parsl.executors.HighThroughputExecutor.html
NOTE: The parameters listed here are a special exception for additional features Fractal has engineered or
options which should be considered for some of the edge cases we have discovered. If you try to set a value
which is derived from other options in the YAML file, an error is raised and you are told exactly which one is
forbidden.
"""
address: Optional[str] = Schema(
None,
description="This only needs to be set in conditional cases when the head node and compute nodes use a "
"differently named ethernet adapter.\n\n"
"An address to connect to the main Parsl process which is reachable from the network in which "
"Workers will be running. This can be either a hostname as returned by hostname or an IP address. "
"Most login nodes on clusters have several network interfaces available, only some of which can be "
"reached from the compute nodes. Some trial and error might be necessary to identify what "
"addresses are reachable from compute nodes."
)
_forbidden_set = {"label", "provider", "cores_per_worker", "max_workers"}
_forbidden_name = "the parsl executor"
auto_gen_docs_on_demand(ParslExecutorSettings)
class ParslLauncherSettings(BaseSettings):
"""
Set the Launcher in a Parsl Provider, and its options, if not set, the defaults are used.
This is a rare use case where the ``launcher`` key of the Provider is needed to be set. Since it must be a class
first, you will need to specify the ``launcher_type`` options which is interpreted as the Class Name of the
Launcher to load and pass the rest of the options set here into it. Any unset key will just be left as defaults.
It is up to the user to consult the Parsl Docs for their desired Launcher's options and what they do.
The known launchers below are case-insensitive,
but if new launchers come out (or you are using a custom/developmental build of Parsl), then you can pass your
own Launcher in verbatim, with case sensitivity, and the Queue Manager will try to load it.
Known Launchers:
- ``SimpleLauncher``: https://parsl.readthedocs.io/en/latest/stubs/parsl.launchers.SimpleLauncher.html
- ``SingleNodeLauncher``: https://parsl.readthedocs.io/en/latest/stubs/parsl.launchers.SingleNodeLauncher.html
- ``SrunLauncher``: https://parsl.readthedocs.io/en/latest/stubs/parsl.launchers.SrunLauncher.html
- ``AprunLauncher``: https://parsl.readthedocs.io/en/latest/stubs/parsl.launchers.AprunLauncher.html
- ``SrunMPILauncher``: https://parsl.readthedocs.io/en/latest/stubs/parsl.launchers.SrunMPILauncher.html
- ``GnuParallelLauncher``: https://parsl.readthedocs.io/en/latest/stubs/parsl.launchers.GnuParallelLauncher.html
- ``MpiExecLauncher`` : https://parsl.readthedocs.io/en/latest/stubs/parsl.launchers.MpiExecLauncher.html
"""
launcher_class: str = Schema(
...,
description="Class of Launcher to use. This is a setting unique to QCArchive which is then used to pass onto "
"the Provider's ``launcher`` setting and the remaining keys are passed to that Launcher's options."
)
def _get_launcher(self, launcher_base: str) -> 'Launcher':
launcher_lower = launcher_base.lower()
launcher_map = {
"simplelauncher": "SimpleLauncher",
"singlenodelauncher": "SingleNodeLauncher",
"srunlauncher": "SrunLauncher",
"aprunlauncher": "AprunLauncher",
"srunmpiLauncher": "SrunMPILauncher",
"gnuparallellauncher": "GnuParallelLauncher",
"mpiexeclauncher": "MpiExecLauncher"
}
launcher_string = launcher_map[launcher_lower] if launcher_lower in launcher_map else launcher_base
try:
launcher_load = cli_utils.import_module("parsl.launchers", package=launcher_string)
launcher = getattr(launcher_load, launcher_string)
except ImportError:
raise ImportError(f"Could not import Parsl Launcher: {launcher_base}. Please make sure you have Parsl "
f"installed and are requesting one of the launchers within the package.")
return launcher
def build_launcher(self):
"""Import and load the desired launcher"""
launcher = self._get_launcher(self.launcher_class)
return launcher(**self.dict(exclude={'launcher_class'}))
class Config(SettingsCommonConfig):
pass
auto_gen_docs_on_demand(ParslLauncherSettings)
class ParslProviderSettings(SettingsBlocker):
"""
Settings for the Parsl Provider class. Valid values for this field are functions of your cluster.scheduler and no
linting is done ahead of trying to pass these to Parsl.
Please see the docs for the provider information
NOTE: The parameters listed here are a special exception for additional features Fractal has engineered or
options which should be considered for some of the edge cases we have discovered. If you try to set a value
which is derived from other options in the YAML file, an error is raised and you are told exactly which one is
forbidden.
SLURM: https://parsl.readthedocs.io/en/latest/stubs/parsl.providers.SlurmProvider.html
PBS/Torque/Moab: https://parsl.readthedocs.io/en/latest/stubs/parsl.providers.TorqueProvider.html
SGE (Sun GridEngine): https://parsl.readthedocs.io/en/latest/stubs/parsl.providers.GridEngineProvider.html
"""
partition: str = Schema(
None,
description="The name of the cluster.scheduler partition being submitted to. Behavior, valid values, and even"
"its validity as a set variable are a function of what type of queue scheduler your specific "
"cluster has (e.g. this variable should NOT be present for PBS clusters). "
"Check with your Sys. Admins and/or your cluster documentation."
)
launcher: ParslLauncherSettings = Schema(
None,
description="The Parsl Launcher do use with your Provider. If left to ``None``, defaults are assumed (check "
"the Provider's defaults), otherwise this should be a dictionary requiring the option "
"``launcher_class`` as a str to specify which Launcher class to load, and the remaining settings "
"will be passed on to the Launcher's constructor."
)
_forbidden_set = {"nodes_per_block", "max_blocks", "worker_init", "scheduler_options", "wall_time"}
_forbidden_name = "parsl's provider"
auto_gen_docs_on_demand(ParslProviderSettings)
class ParslQueueSettings(BaseSettings):
"""
The Parsl-specific configurations used with the `common.adapter = parsl` setting. The parsl config is broken up into
a top level `Config` class, an `Executor` sub-class, and a `Provider` sub-class of the `Executor`.
Config -> Executor -> Provider. Each of these have their own options, and extra values fed into the
ParslQueueSettings are fed to the `Config` level.
It requires both `executor` and `provider` settings, but will default fill them in and often does not need
any further configuration which is handled by other settings in the config file.
"""
executor: ParslExecutorSettings = ParslExecutorSettings()
provider: ParslProviderSettings = ParslProviderSettings()
class Config(SettingsCommonConfig):
extra = "allow"
auto_gen_docs_on_demand(ParslQueueSettings)
class ManagerSettings(BaseModel):
"""
The config file for setting up a QCFractal Manager, all sub fields of this model are at equal top-level of the
YAML file. No additional top-level fields are permitted, but sub-fields may have their own additions.
Not all fields are required and many will depend on the cluster you are running, and the adapter you choose
to run on.
"""
common: CommonManagerSettings = CommonManagerSettings()
server: FractalServerSettings = FractalServerSettings()
manager: QueueManagerSettings = QueueManagerSettings()
cluster: Optional[ClusterSettings] = ClusterSettings()
dask: Optional[DaskQueueSettings] = DaskQueueSettings()
parsl: Optional[ParslQueueSettings] = ParslQueueSettings()
class Config:
extra = "forbid"
auto_gen_docs_on_demand(ManagerSettings)
def parse_args():
parser = argparse.ArgumentParser(
description='A CLI for a QCFractal QueueManager with a ProcessPoolExecutor, Dask, or Parsl backend. '
'The Dask and Parsl backends *requires* a config file due to the complexity of its setup. If a config '
'file is specified, the remaining options serve as CLI overwrites of the config.')
parser.add_argument("--config-file", type=str, default=None)
# Common settings
common = parser.add_argument_group('Common Adapter Settings')
common.add_argument(
"--adapter", type=str, help="The backend adapter to use, currently only {'dask', 'parsl', 'pool'} are valid.")
common.add_argument(
"--tasks-per-worker",
type=int,
help="The number of simultaneous tasks for the executor to run, resources will be divided evenly.")
common.add_argument("--cores-per-worker", type=int, help="The number of process for each executor's Workers")
common.add_argument("--memory-per-worker", type=int, help="The total amount of memory on the system in GB")
common.add_argument("--scratch-directory", type=str, help="Scratch directory location")
common.add_argument("--retries", type=int, help="Number of RandomError retries per task before failing the task")
common.add_argument("-v", "--verbose", action="store_true", help="Increase verbosity of the logger.")
# FractalClient options
server = parser.add_argument_group('FractalServer connection settings')
server.add_argument("--fractal-uri", type=str, help="FractalServer location to pull from")
server.add_argument("-u", "--username", type=str, help="FractalServer username")
server.add_argument("-p", "--password", type=str, help="FractalServer password")
server.add_argument(
"--verify",
type=str,
help="Do verify the SSL certificate, leave off (unset) for servers with custom SSL certificates.")
# QueueManager options
manager = parser.add_argument_group("QueueManager settings")
manager.add_argument("--manager-name", type=str, help="The name of the manager to start")
manager.add_argument("--queue-tag", type=str, help="The queue tag to pull from")
manager.add_argument("--log-file-prefix", type=str, help="The path prefix of the logfile to write to.")
manager.add_argument("--update-frequency", type=int, help="The frequency in seconds to check for complete tasks.")
manager.add_argument("--max-queued-tasks", type=int, help="Maximum number of tasks to hold at any given time. "
"Generally should not be set.")
# Additional args
optional = parser.add_argument_group('Optional Settings')
optional.add_argument("--test", action="store_true", help="Boot and run a short test suite to validate setup")
optional.add_argument(
"--ntests", type=int, help="How many tests per found program to run, does nothing without --test set")
optional.add_argument("--schema", action="store_true", help="Display the current Schema (Pydantic) for the YAML "
"config file and exit. This will always show the "
"most up-to-date schema. It will be presented in a "
"JSON-like format.")
# Move into nested namespace
args = vars(parser.parse_args())
def _build_subset(args, keys):
ret = {}
for k in keys:
v = args[k]
if v is None:
continue
ret[k] = v
return ret
# Stupid we cannot inspect groups
data = {
"common": _build_subset(args, {"adapter", "tasks_per_worker", "cores_per_worker", "memory_per_worker",
"scratch_directory", "retries", "verbose"}),
"server": _build_subset(args, {"fractal_uri", "password", "username", "verify"}),
"manager": _build_subset(args, {"max_queued_tasks", "manager_name", "queue_tag", "log_file_prefix",
"update_frequency", "test", "ntests"}),
# This set is for this script only, items here should not be passed to the ManagerSettings nor any other
# classes
"debug": _build_subset(args, {"schema"})
} # yapf: disable
if args["config_file"] is not None:
config_data = cli_utils.read_config_file(args["config_file"])
for name, subparser in [("common", common), ("server", server), ("manager", manager)]:
if name not in config_data:
continue
data[name] = cli_utils.argparse_config_merge(subparser, data[name], config_data[name], check=False)
for name in ["cluster", "dask", "parsl"]:
if name in config_data:
data[name] = config_data[name]
if data[name] is None:
# Handle edge case where None provided here is explicitly treated as
# "do not parse" by Pydantic (intended behavior) instead of the default empty dict
# being used instead. This only happens when a user sets in the YAML file
# the top level header and nothing below it.
data[name] = {}
return data
def main(args=None):
# Grab CLI args if not present
if args is None:
args = parse_args()
exit_callbacks = []
try:
if args["debug"]["schema"]:
print(ManagerSettings.schema_json(indent=2))
return # We're done, exit normally
except KeyError:
pass # Don't worry if schema isn't in the list
finally:
args.pop("debug", None) # Ensure the debug key is not present
# Construct object
settings = ManagerSettings(**args)
logger_map = {AdapterEnum.pool: "",
AdapterEnum.dask: "dask_jobqueue.core",
AdapterEnum.parsl: "parsl"}
if settings.common.verbose:
adapter_logger = logging.getLogger(logger_map[settings.common.adapter])
adapter_logger.setLevel("DEBUG")
logger.setLevel("DEBUG")
if settings.manager.log_file_prefix is not None:
tornado.options.options['log_file_prefix'] = settings.manager.log_file_prefix
# Clones the log to the output
tornado.options.options['log_to_stderr'] = True
tornado.log.enable_pretty_logging()
if settings.manager.test:
# Test this manager, no client needed
client = None
else:
# Connect to a specified fractal server
client = qcfractal.interface.FractalClient(
address=settings.server.fractal_uri, **settings.server.dict(skip_defaults=True, exclude={"fractal_uri"}))
# Figure out per-task data
cores_per_task = settings.common.cores_per_worker // settings.common.tasks_per_worker
memory_per_task = settings.common.memory_per_worker / settings.common.tasks_per_worker
if cores_per_task < 1:
raise ValueError("Cores per task must be larger than one!")
if settings.common.adapter == "pool":
from concurrent.futures import ProcessPoolExecutor
queue_client = ProcessPoolExecutor(max_workers=settings.common.tasks_per_worker)
elif settings.common.adapter == "dask":
dask_settings = settings.dask.dict(skip_defaults=True)
# Checks
if "extra" not in dask_settings:
dask_settings["extra"] = []
if QCA_RESOURCE_STRING not in dask_settings["extra"]:
dask_settings["extra"].append(QCA_RESOURCE_STRING)
# Scheduler opts
scheduler_opts = settings.cluster.scheduler_options.copy()
_cluster_loaders = {"slurm": "SLURMCluster", "pbs": "PBSCluster", "moab": "MoabCluster", "sge": "SGECluster",
"lsf": "LSFCluster"}
dask_exclusivity_map = {"slurm": "--exclusive",
"pbs": "-n",
"moab": "-n", # Less sure about this one
"sge": "-l exclusive=true",
"lsf": "-x",
}
if settings.cluster.node_exclusivity and dask_exclusivity_map[settings.cluster.scheduler] not in scheduler_opts:
scheduler_opts.append(dask_exclusivity_map[settings.cluster.scheduler])
# Create one construct to quickly merge dicts with a final check
dask_construct = {
"name": "QCFractal_Dask_Compute_Executor",
"cores": settings.common.cores_per_worker,
"memory": str(settings.common.memory_per_worker) + "GB",
"processes": settings.common.tasks_per_worker, # Number of workers to generate == tasks in this construct
"walltime": settings.cluster.walltime,
"job_extra": scheduler_opts,
"env_extra": settings.cluster.task_startup_commands,
**dask_settings}
try:
# Import the dask things we need
import dask_jobqueue
from dask.distributed import Client
cluster_module = cli_utils.import_module("dask_jobqueue",
package=_cluster_loaders[settings.cluster.scheduler])
cluster_class = getattr(cluster_module, _cluster_loaders[settings.cluster.scheduler])
if dask_jobqueue.__version__ < "0.5.0":
raise ImportError
except ImportError:
raise ImportError("You need`dask-jobqueue >= 0.5.0` to use the `dask` adapter")
cluster = cluster_class(**dask_construct)
# Setup up adaption
# Workers are distributed down to the cores through the sub-divided processes
# Optimization may be needed
workers = settings.common.tasks_per_worker * settings.common.max_workers
if settings.cluster.adaptive == AdaptiveCluster.adaptive:
cluster.adapt(minimum=0, maximum=workers, interval="10s")
else:
cluster.scale(workers)
queue_client = Client(cluster)
elif settings.common.adapter == "parsl":
scheduler_opts = settings.cluster.scheduler_options
if not settings.cluster.node_exclusivity:
raise ValueError("For now, QCFractal can only be run with Parsl in node exclusivity. This will be relaxed "
"in a future release of Parsl and QCFractal")
# Import helpers
_provider_loaders = {"slurm": "SlurmProvider",
"pbs": "TorqueProvider",
"moab": "TorqueProvider",
"sge": "GridEngineProvider",
"lsf": None}
if _provider_loaders[settings.cluster.scheduler] is None:
raise ValueError(f"Parsl does not know how to handle cluster of type {settings.cluster.scheduler}.")
# Headers
_provider_headers = {"slurm": "#SBATCH",
"pbs": "#PBS",
"moab": "#PBS",
"sge": "#$$",
"lsf": None
}
# Import the parsl things we need
try:
import parsl
from parsl.config import Config
from parsl.executors import HighThroughputExecutor
from parsl.addresses import address_by_hostname
provider_module = cli_utils.import_module("parsl.providers",
package=_provider_loaders[settings.cluster.scheduler])
provider_class = getattr(provider_module, _provider_loaders[settings.cluster.scheduler])
provider_header = _provider_headers[settings.cluster.scheduler]
if parsl.__version__ < '0.8.0':
raise ImportError
except ImportError:
raise ImportError("You need `parsl >=0.8.0` to use the `parsl` adapter")
if _provider_loaders[settings.cluster.scheduler] == "moab":
logger.warning("Parsl uses its TorqueProvider for Moab clusters due to the scheduler similarities. "
"However, if you find a bug with it, please report to the Parsl and QCFractal developers so "
"it can be fixed on each respective end.")
# Setup the providers
# Create one construct to quickly merge dicts with a final check
common_parsl_provider_construct = {
"init_blocks": 0, # Update this at a later time of Parsl
"max_blocks": settings.common.max_workers,
"walltime": settings.cluster.walltime,
"scheduler_options": f'{provider_header} ' + f'\n{provider_header} '.join(scheduler_opts) + '\n',
"nodes_per_block": 1,
"worker_init": '\n'.join(settings.cluster.task_startup_commands),
**settings.parsl.provider.dict(skip_defaults=True, exclude={"partition", "launcher"})
}
if settings.parsl.provider.launcher:
common_parsl_provider_construct["launcher"] = settings.parsl.provider.launcher.build_launcher()
if settings.cluster.scheduler == "slurm":
# The Parsl SLURM constructor has a strange set of arguments
provider = provider_class(settings.parsl.provider.partition,
exclusive=settings.cluster.node_exclusivity,
**common_parsl_provider_construct)
else:
provider = provider_class(**common_parsl_provider_construct)
parsl_executor_construct = {
"label": "QCFractal_Parsl_{}_Executor".format(settings.cluster.scheduler.title()),
"cores_per_worker": cores_per_task,
"max_workers": settings.common.tasks_per_worker * settings.common.max_workers,
"provider": provider,
"address": address_by_hostname(),
**settings.parsl.executor.dict(skip_defaults=True)}
queue_client = Config(
executors=[HighThroughputExecutor(**parsl_executor_construct)])
else:
raise KeyError("Unknown adapter type '{}', available options: {}.\n"
"This code should also be unreachable with pydantic Validation, so if "
"you see this message, please report it to the QCFractal GitHub".format(
settings.common.adapter, [getattr(AdapterEnum, v).value for v in AdapterEnum]))
# Build out the manager itself
# Compute max tasks
max_concurrent_tasks = settings.common.tasks_per_worker * settings.common.max_workers
if settings.manager.max_queued_tasks is None:
# Tasks * jobs * buffer + 1
max_queued_tasks = ceil(max_concurrent_tasks * 2.00) + 1
else:
max_queued_tasks = settings.manager.max_queued_tasks
manager = qcfractal.queue.QueueManager(
client,
queue_client,
max_tasks=max_queued_tasks,
queue_tag=settings.manager.queue_tag,
manager_name=settings.manager.manager_name,
update_frequency=settings.manager.update_frequency,
cores_per_task=cores_per_task,
memory_per_task=memory_per_task,
scratch_directory=settings.common.scratch_directory,
retries=settings.common.retries,
verbose=settings.common.verbose
)
# Set stats correctly since we buffer the max tasks a bit
manager.statistics.max_concurrent_tasks = max_concurrent_tasks
# Add exit callbacks
for cb in exit_callbacks:
manager.add_exit_callback(cb[0], *cb[1], **cb[2])
# Either startup the manager or run until complete
if settings.manager.test:
success = manager.test(settings.manager.ntests)
if success is False:
raise ValueError("Testing was not successful, failing.")
else:
for signame in {"SIGHUP", "SIGINT", "SIGTERM"}:
def stop(*args, **kwargs):
manager.stop(signame)
raise KeyboardInterrupt()
signal.signal(getattr(signal, signame), stop)
# Blocks until signal
try:
manager.start()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
yudongqiu/QCFractal
|
qcfractal/cli/qcfractal_manager.py
|
qcfractal_manager.py
|
py
| 42,285 |
python
|
en
|
code
| null |
github-code
|
6
|
916473686
|
import re
def parse_blueprint(blueprint):
blueprint += " 0 ore 0 clay 0 obsidian"
return [int(re.search(r" ([\d]+) ore", blueprint).group(1)), int(re.search(r" ([\d]+) clay", blueprint).group(1)),
int(re.search(r" ([\d]+) obsidian", blueprint).group(1))]
def build_bot(bots, resources, bp, t, end, i):
for j in range(len(bp[i])):
resources[j] -= bp[i][j]
for j in range(len(bots)):
resources[j] += bots[j]
bots[i] += 1
res = time_step(bots, resources, bp, t + 1, end, [])
bots[i] -= 1
for j in range(len(bots)):
resources[j] -= bots[j]
for j in range(len(bp[i])):
resources[j] += bp[i][j]
return res
def dont_build_bot(bots, resources, bp, t, end, banned_bots):
for j in range(len(bots)):
resources[j] += bots[j]
res = time_step(bots, resources, bp, t + 1, end, banned_bots)
for j in range(len(bots)):
resources[j] -= bots[j]
return res
def could_build_bot(resources, bp, i):
for j in range(len(bp[i])):
if resources[j] < bp[i][j]:
return False
return True
def should_build_bot(bots, bp, i):
for j in range(len(bp)):
if bots[i] <= bp[j][i]:
return True
return False
def time_step(bots, resources, bp, t, end, banned_bots):
if t == end:
return resources[-1]
if could_build_bot(resources, bp, -1):
return build_bot(bots, resources, bp, t, end, -1)
banned_bots_new = []
best = 0
for i in range(len(bp) - 1):
could_build = could_build_bot(resources, bp, i)
should_build = should_build_bot(bots, bp, i)
if could_build:
banned_bots_new.append(i)
if could_build and should_build and i not in banned_bots:
build = build_bot(bots, resources, bp, t, end, i)
best = max(build, best)
dont_build = dont_build_bot(bots, resources, bp, t, end, banned_bots_new)
return max(best, dont_build)
f = [blueprint.split(": ")[1].split(". ") for blueprint in open('../inputs/day19.txt').read().splitlines()]
bps = [[parse_blueprint(recipe) for recipe in bp] for bp in f]
part1 = 0
part2 = 1
for i in range(len(bps)):
part1 += ((i + 1) * time_step([1, 0, 0, 0], [0, 0, 0, 0], bps[i], 0, 24, []))
part2 *= (time_step([1, 0, 0, 0], [0, 0, 0, 0], bps[i], 0, 32, []) if i < 3 else 1)
print(part1)
print(part2)
|
UncatchableAlex/advent2022
|
solutions/day19.py
|
day19.py
|
py
| 2,386 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24219583345
|
# -*- coding: utf-8 -*-
"""
Created on 2022/9/23
@author: nhsiao
2022/9/5 avg_rsrp ๆนๆ c_rsrp, ๅ็ๅพ 2022/8/27้ๅง
2022/9/29 c_rsrp ๆนๆ pos_first_rsrp, ๅ็ๅพ 2022/9/23 ้ๅง
"""
import cx_Oracle
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import dates as mpl_dates
import gc
import gzip
from datetime import datetime, timedelta
import func
import warnings
warnings.filterwarnings('ignore','.*Failed to load HostKeys.*')
warnings.filterwarnings('ignore')
# import datetime
# today = datetime.date.today().strftime("%Y-%m-%d")
code_folder = "D:\\Nicole\\python\\cottCNN\\"
# keep process time
now = datetime.now()
txt = 'generateImg.py, ไธๆฌกๆดๆฐๆ้,From๏ผ' + str(now)
df = pd.DataFrame([txt], index=['UpdateTime'])
df.to_csv(code_folder+'logCottCNN.csv', mode='a',header=False)
df_site = pd.DataFrame(data=None, columns=['itt_id','ittid_lat','ittid_long','site' ,'site_dis','site_lat' ,'site_long','site_type','pos_first_rsrp_mean', 'pos_first_rsrp_count', 'c_prbutil_mean', 'c_prbutil_count', 'c_rssi_mean', 'c_rssi_count', 'dl_tput_mean', 'dl_tput_count', 'pos_last_rsrq_mean', 'pos_last_rsrq_count', 'end_cqi_mean','end_cqi_count'])
df_site_ori = pd.DataFrame(data=None, columns=['itt_id', 'site1','site2', 'site3'])
today = datetime.today().strftime("%Y-%m-%d")
yesterday = datetime.today() - timedelta(days=1)
yesDay = yesterday.strftime('%Y%m%d')
yesDate = yesterday.strftime('%Y-%m-%d')
# today = "2022-09-29"
# yesDay = "20221129"
# yesDate = "2022-11-29"
localDir = code_folder+'data\\'
sFile = 'TT_Data_'+ yesDay +'.csv.gz'
print(localDir, sFile)
func.sftp(sFile, localDir)
# ไปๆฅrawData
with gzip.open(localDir + sFile, 'rb') as f:
rawCott = pd.read_csv(f)
sql = 'SELECT ITT_ID, to_char(CREATE_DATE,\'YYYY-MM-DD HH24\')||\':00\' event_date, to_char(CREATE_DATE-1,\'YYYY-MM-DD HH24\')||\':00\' event_date_24hr, to_char(CREATE_DATE-4,\'YYYY-MM-DD HH24\')||\':00\' event_start_date, GIS_X_84, GIS_Y_84 FROM ITSMRPT.RPT_COTT@ITSMRPT_NEW WHERE trunc(CREATE_DATE) = TO_DATE(\''+ yesDate +'\',\'YYYY-MM-DD\') union SELECT ITT_ID, to_char(CREATE_DATE,\'YYYY-MM-DD HH24\')||\':00\' event_date, to_char(CREATE_DATE-1,\'YYYY-MM-DD HH24\')||\':00\' event_date_24hr, to_char(CREATE_DATE-4,\'YYYY-MM-DD HH24\')||\':00\' event_start_date, GIS_X_84, GIS_Y_84 FROM ITSMRPT.RPT_COTT_APP@ITSMRPT_NEW WHERE trunc(CREATE_DATE) = TO_DATE(\''+ yesDate +'\',\'YYYY-MM-DD\')'
connection = cx_Oracle.connect('nocadm/[email protected]/nois3g')
df1 = pd.read_sql(sql, con=connection)
del df
pd.options.mode.chained_assignment = None # default='warn'
df3 = rawCott.merge(df1, left_on="itt_id", right_on="ITT_ID", how='left', suffixes=('_1', '_2'))
df3['start_time'] = pd.to_datetime(df3['start_time'], format='%Y-%m-%d %H:%M:%S')
condition = "`start_time` <= `EVENT_DATE` and start_time >= `EVENT_START_DATE`"
df_raw0 = df3.query(condition, engine='python')
df_raw = df_raw0[['itt_id','site_id', 'GIS_X_84', 'GIS_Y_84','c_lat','c_long', 'pos_first_lat',
'pos_first_long', 'n_type', 'start_time','EVENT_START_DATE','EVENT_DATE', 'EVENT_DATE_24HR','duration','pos_first_rsrp', 'c_prbutil', 'c_rssi','end_cqi','call_type','dl_volume','dl_tput','pos_last_rsrq']]
df_raw["start_time"] = pd.to_datetime(df_raw["start_time"])
df_raw['EVENT_START_DATE'] = pd.to_datetime(df_raw['EVENT_START_DATE'])
df_raw['EVENT_DATE'] = pd.to_datetime(df_raw['EVENT_DATE'])
df_raw['EVENT_DATE_24HR'] = pd.to_datetime(df_raw['EVENT_DATE_24HR'])
del rawCott
del df3
del df_raw0
params = ["pos_first_rsrp", "c_prbutil", "c_rssi","end_cqi","pos_last_rsrq", "dl_tput"]
df_raw['dl_volume'].fillna(value=0, inplace=True)
df_raw['dl_volume'] = df_raw['dl_volume'].astype('int64')
df_raw['dl_tput'].fillna(value=0, inplace=True)
df_raw['dl_tput'] = df_raw['dl_tput'].astype('int64')
df_raw['itt_id'] = df_raw['itt_id'].astype('str')
df_raw['pos_first_rsrp_color'] = df_raw.apply(func.get_rsrp_color, axis=1).copy()
df_raw['c_prbutil_color'] = df_raw.apply(func.get_prb_color, axis=1).copy()
df_raw['c_rssi_color'] = df_raw.apply(func.get_rssi_color, axis=1).copy()
df_raw['end_cqi_color'] = df_raw.apply(func.get_cqi_color, axis=1).copy()
df_raw['dl_tput_color'] = df_raw.apply(func.get_dltput_color, axis=1).copy()
df_raw['pos_last_rsrq_color'] = df_raw.apply(func.get_rsrq_color, axis=1).copy()
df_raw['duration2'] = df_raw.apply(func.get_duration, axis=1).copy()
df_raw['times'] = df_raw.apply(func.get_times, axis=1).copy()
df_raw['GIS_Y_84'] = df_raw['GIS_Y_84'].astype('float64')
df_raw['GIS_X_84'] = df_raw['GIS_X_84'].astype('float64')
df_raw['c_lat'] = df_raw['c_lat'].astype('float64')
df_raw['c_long'] = df_raw['c_long'].astype('float64')
df_raw['tt_site_distance'] = df_raw.apply(lambda x: func.LLs2Dist(x['GIS_Y_84'],x['GIS_X_84'],x['c_lat'],x['c_long']) , axis=1).copy()
df_raw['user_site_distance'] = df_raw.apply(lambda x: func.LLs2Dist(x['pos_first_lat'],x['pos_first_long'],x['c_lat'],x['c_long']) , axis=1).copy()
df_raw['tt_user_distance'] = df_raw.apply(lambda x: func.LLs2Dist(x['pos_first_lat'],x['pos_first_long'],x['GIS_Y_84'],x['GIS_X_84']) , axis=1).copy()
# df_raw_test = df_raw[df_raw['tt_user_distance']<2]
itt_id = df_raw['itt_id'].unique()
for i in range(len(itt_id)):
condition = "`itt_id` == '" + itt_id[i] + "'"
df = df_raw.query(condition, engine='python')
#ๅๅพๅ็ๆไน
็ๅบ็ซ
site1, tt1_lat, tt1_long, site1_lat, site1_long, bad_site1, bad_site1_lat, bad_site1_long, pos_first_rsrp_mean1,c_prbutil_mean1,c_rssi_mean1,dl_tput_mean1,pos_last_rsrq_mean1,end_cqi_mean1,pos_first_rsrp_count1,c_prbutil_count1,c_rssi_count1,dl_tput_count1,pos_last_rsrq_count1,end_cqi_count1, pos_first_rsrp_bmean1,c_prbutil_bmean1,c_rssi_bmean1,dl_tput_bmean1,pos_last_rsrq_bmean1,end_cqi_bmean1,pos_first_rsrp_bcount1,c_prbutil_bcount1,c_rssi_bcount1,dl_tput_bcount1,pos_last_rsrq_bcount1,end_cqi_bcount1 = func.get_site_id(df, 8, 12)
site2, tt2_lat, tt2_long, site2_lat, site2_long, bad_site2, bad_site2_lat, bad_site2_long, pos_first_rsrp_mean2,c_prbutil_mean2,c_rssi_mean2,dl_tput_mean2,pos_last_rsrq_mean2,end_cqi_mean2,pos_first_rsrp_count2,c_prbutil_count2,c_rssi_count2,dl_tput_count2,pos_last_rsrq_count2,end_cqi_count2, pos_first_rsrp_bmean2,c_prbutil_bmean2,c_rssi_bmean2,dl_tput_bmean2,pos_last_rsrq_bmean2,end_cqi_bmean2,pos_first_rsrp_bcount2,c_prbutil_bcount2,c_rssi_bcount2,dl_tput_bcount2,pos_last_rsrq_bcount2,end_cqi_bcount2 = func.get_site_id(df, 12, 18)
site3, tt3_lat, tt3_long, site3_lat, site3_long, bad_site3, bad_site3_lat, bad_site3_long, pos_first_rsrp_mean3,c_prbutil_mean3,c_rssi_mean3,dl_tput_mean3,pos_last_rsrq_mean3,end_cqi_mean3,pos_first_rsrp_count3,c_prbutil_count3,c_rssi_count3,dl_tput_count3,pos_last_rsrq_count3,end_cqi_count3, pos_first_rsrp_bmean3,c_prbutil_bmean3,c_rssi_bmean3,dl_tput_bmean3,pos_last_rsrq_bmean3,end_cqi_bmean3,pos_first_rsrp_bcount3,c_prbutil_bcount3,c_rssi_bcount3,dl_tput_bcount3,pos_last_rsrq_bcount3,end_cqi_bcount3 = func.get_site_id(df, 18, 24)
site1_dis = ""
site2_dis = ""
site3_dis = ""
bad_site1_dis = ""
bad_site2_dis = ""
bad_site3_dis = ""
# if len(site1_lat) > 0:
if site1_lat:
# site1_dis = format(func.LLs2Dist(tt1_lat, tt1_long, site1_lat, site1_long),'.2f')
site1_dis = func.round_v2(func.LLs2Dist(tt1_lat, tt1_long, site1_lat, site1_long),3)
if site2_lat:
site2_dis = func.round_v2(func.LLs2Dist(tt2_lat, tt2_long, site2_lat, site2_long),3)
if site3_lat:
site3_dis = func.round_v2(func.LLs2Dist(tt3_lat, tt3_long, site3_lat, site3_long),3)
if bad_site1_lat:
bad_site1_dis = func.round_v2(func.LLs2Dist(tt1_lat, tt1_long, bad_site1_lat, bad_site1_long),3)
if bad_site2_lat:
bad_site2_dis = func.round_v2(func.LLs2Dist(tt2_lat, tt2_long, bad_site2_lat, bad_site2_long),3)
if bad_site3_lat:
bad_site3_dis = func.round_v2(func.LLs2Dist(tt3_lat, tt3_long, bad_site3_lat, bad_site3_long),3)
site_arr = [site1, site2, site3, bad_site1, bad_site2, bad_site3]
ittid_lat_arr = [tt1_lat, tt2_lat, tt3_lat, tt1_lat, tt2_lat, tt3_lat]
ittid_long_arr = [tt1_long, tt2_long, tt3_long, tt1_long, tt2_long, tt3_long]
site_dis_arr = [site1_dis, site2_dis, site3_dis, bad_site1_dis, bad_site2_dis, bad_site3_dis]
site_lat_arr = [site1_lat, site2_lat, site3_lat, bad_site1_lat, bad_site2_lat, bad_site3_lat]
site_long_arr = [site1_long, site2_long, site3_long, bad_site1_long, bad_site2_long, bad_site3_long]
site_type_arr = ['time1', 'time2', 'time3', 'btime1', 'btime2', 'btime3']
#6-1ๅๆธ
rsrp_mean_arr = [pos_first_rsrp_mean1, pos_first_rsrp_mean2, pos_first_rsrp_mean3, pos_first_rsrp_bmean1, pos_first_rsrp_bmean2, pos_first_rsrp_bmean3]
rsrp_count_arr = [pos_first_rsrp_count1, pos_first_rsrp_count2, pos_first_rsrp_count3,pos_first_rsrp_bcount1, pos_first_rsrp_bcount2, pos_first_rsrp_bcount3]
#6-2ๅๆธ
prbutil_mean_arr = [c_prbutil_mean1, c_prbutil_mean2, c_prbutil_mean3, c_prbutil_bmean1, c_prbutil_bmean2, c_prbutil_bmean3]
prbutil_count_arr = [c_prbutil_count1, c_prbutil_count2, c_prbutil_count3, c_prbutil_bcount1, c_prbutil_bcount2, c_prbutil_bcount3]
#6-3ๅๆธ
rssi_mean_arr = [c_rssi_mean1, c_rssi_mean2, c_rssi_mean3, c_rssi_bmean1, c_rssi_bmean2, c_rssi_bmean3]
rssi_count_arr = [c_rssi_count1, c_rssi_count2, c_rssi_count3, c_rssi_bcount1, c_rssi_bcount2, c_rssi_bcount3]
#6-4ๅๆธ
dltput_mean_arr = [dl_tput_mean1, dl_tput_mean2, dl_tput_mean3, dl_tput_bmean1, dl_tput_bmean2, dl_tput_bmean3]
dltput_count_arr = [dl_tput_count1, dl_tput_count2, dl_tput_count3, dl_tput_bcount1, dl_tput_bcount2, dl_tput_bcount3]
#6-5ๅๆธ
rsrq_mean_arr = [pos_last_rsrq_mean1, pos_last_rsrq_mean2, pos_last_rsrq_mean3, pos_last_rsrq_bmean1, pos_last_rsrq_bmean2, pos_last_rsrq_bmean3]
rsrq_count_arr = [pos_last_rsrq_count1, pos_last_rsrq_count2, pos_last_rsrq_count3, pos_last_rsrq_bcount1, pos_last_rsrq_bcount2, pos_last_rsrq_bcount3]
#6-6ๅๆธ
cqi_mean_arr = [end_cqi_mean1, end_cqi_mean2, end_cqi_mean3, end_cqi_bmean1, end_cqi_bmean2, end_cqi_bmean3]
cqi_count_arr = [end_cqi_count1, end_cqi_count2, end_cqi_count3, end_cqi_bcount1, end_cqi_bcount2, end_cqi_bcount3]
for a in range(len(site_arr)):
df_site = df_site.append({'ittid' :itt_id[i]
, 'ittid_lat' : ittid_lat_arr[a]
, 'ittid_long' : ittid_long_arr[a]
, 'site' : site_arr[a]
, 'site_dis' : site_dis_arr[a]
, 'site_lat' : site_lat_arr[a]
, 'site_long' : site_long_arr[a]
, 'site_type' : site_type_arr[a]
, 'pos_first_rsrp_mean' : rsrp_mean_arr[a]
, 'pos_first_rsrp_count' : rsrp_count_arr[a]
, 'c_prbutil_mean' : prbutil_mean_arr[a]
, 'c_prbutil_count' : prbutil_count_arr[a]
, 'c_rssi_mean' : rssi_mean_arr[a]
, 'c_rssi_count' : rssi_count_arr[a]
, 'dl_tput_mean' : dltput_mean_arr[a]
, 'dl_tput_count' : dltput_count_arr[a]
, 'pos_last_rsrq_mean' : rsrq_mean_arr[a]
, 'pos_last_rsrq_count' : rsrq_count_arr[a]
, 'end_cqi_mean' : cqi_mean_arr[a]
, 'end_cqi_count' : cqi_count_arr[a]
} , ignore_index=True)
df_site_ori = df_site_ori.append({'itt_id' :itt_id[i]
, 'site1' : site1
, 'site2' : site2
, 'site3' : site3
, 'site1_dis' : site1_dis
, 'site2_dis' : site2_dis
, 'site3_dis' : site3_dis
} , ignore_index=True)
print(f)
print(df.shape[0])
#็ข่ช่ณๆๅฎๆดๆง
x0 = df.shape[0]
x1 = df.c_prbutil.dropna().shape[0]
x2 = df.pos_first_rsrp.dropna().shape[0]
x3 = df.c_rssi.dropna().shape[0]
if x1 <= 20 and x2 <= 20 and x3 <= 20 :
continue
plt.close('all')
fig = plt.figure()
plt.clf()
fig, ax = plt.subplots(len(params), 1, sharex=True, figsize=(10, 13))
for t in range(len(params)):
print(t)
print(params[t])
condition = "`itt_id` == '" + itt_id[i] + "' and " + params[t] + "_color !='white'"
df = df_raw.query(condition, engine='python').reset_index()
# print(f)
# print(df.shape[0])
try :
if params[t] == 'dl_volume' or params[t] == 'dl_tput':
ax[t].bar(x=df['start_time'], height=df[params[t]].astype(int),
bottom=0,color=df[params[t] + '_color'], width =0.05, alpha=0.5)
#, edgecolor='grey'
plt.ylim(0, 20)
ax[t].set_ylabel(params[t].upper(), fontsize=14)
#matplotlib.pyplot.ylim(top=top_value)
else:
ax[t].scatter(x=df['start_time'],
y=df[params[t]],
s=df['duration'],
alpha=0.5,
c=df[params[t] + '_color'],
cmap='viridis', )
if params[t] == 'end_cqi' :
plt.ylim(0, 15)
# ax[t].set_ylabel(params[t].upper().split("_", 1)[1], fontsize=14)
ax[t].set_ylabel(params[t].upper(), fontsize=14)
fig.tight_layout()
# reasonFolder = ""
# reasonFolder = reason_map.get(itt_id[i], "")
# DataTypeFolder = "image_west"
# for testing data
DataTypeFolder = "D:\\Nicole\\Laravel\\www\\public\\cott_images"
# print(x0 , '--x0')
# print(x1 , '--x1')
# print(x2 , '--x2')
# print(x3 , '--x3')
# if reasonFolder == "" :
# reasonFolder = "CantBeMapped"
# X่ปธ(ๆ้), ไธ้ๅ็พ
# locator.MAXTICKS = 40000
# ax[t].xaxis.set_major_locator(locator)
plt.gcf().autofmt_xdate()
date_format = mpl_dates.DateFormatter('%m-%d %H:00')
hours = mpl_dates.HourLocator(interval = 6)
plt.gca().xaxis.set_major_locator(hours)
plt.gca().xaxis.set_major_formatter(date_format)
# plt.xlabel('Time')
plt.ylabel(params[t].upper())
plt.gca().set_xlim(pd.to_datetime(df['EVENT_START_DATE'][0], format = '%Y-%m-%d %H:%M'),
pd.to_datetime(df['EVENT_DATE'][0], format = '%Y-%m-%d %H:%M'))
# print('.\\'+DataTypeFolder+'\\' + itt_id[i] + '.png')
# fig.savefig('.\\'+DataTypeFolder+'\\' + itt_id[i] + '.png')
print(DataTypeFolder+'\\' + itt_id[i] + '.png')
#่ณๆไธ่ถณ,ๅ้ๆพ, Today(่จ็ทด)ใcott_images้ฝไธๅ ๅ
ฅ, ๅพๅsftpไธๅณๅณๅฏ
if x1 <= 10 or x2 <= 10 or x3 <= 10 :
DataTypeFolder = DataTypeFolder + "_datainsufficient"
else:
fig.savefig(DataTypeFolder+'_today\\' + itt_id[i] + '.png')#ไธๅณไฝฟ็จ
fig.savefig(DataTypeFolder+'\\' + itt_id[i] + '.png')
# for testing data
# print('./image_0705/' + itt_id[i] + '.png')
# fig.savefig('./image_0705/' + itt_id[i] + '.png')
# clear the image in memory and clear axes, and in order to reduce the memory occupation
# plt.clf()
# plt.close(fig)
# plt.close('all')
# del fig
# if params[t]=='cell_rsrp' :
# plt.gca().invert_yaxis()
# plt.rcParams['font.sans-serif'] = ['Microsoft JhengHei']
# plt.rcParams['axes.unicode_minus'] = False
# plt.title('ๅฎขๆถ่ป่ทก่็ถฒ่ทฏ่จ่')
except Exception as e:
print('error')
print(params[t])
print(e)
# continue
# del df_raw0
del df_raw
del df
del fig
# print ("\ngarbage len", len(gc.garbage))
# print ("garbages:", gc.garbage)
gc.collect()
# keep record time
now = datetime.now()
txt = 'generateImg.py, ไธๆฌกๆดๆฐๆ้,To๏ผ' + str(now)
df = pd.DataFrame([txt], index=['UpdateTime'])
df.to_csv(code_folder+'logCottCNN.csv', mode='a',header=False)
df_site_ori.to_csv(code_folder+'sitelist.csv', mode='a',index=False)
df_site.to_csv(code_folder+'sitelist_new.csv', mode='a',index=False)
df_site = df_site[df_site['site'].notna()]
# ๅๅ
ฅORACLE
for i, j in df_site.iterrows():
func.insert_orcl(j['ittid'], j['ittid_lat'], j['ittid_long'], j['site'], j['site_dis'], j['site_lat'], j['site_long'], j['site_type'], j['pos_first_rsrp_mean'], j['pos_first_rsrp_count'], j['c_prbutil_mean'], j['c_prbutil_count'], j['c_rssi_mean'], j['c_rssi_count'], j['dl_tput_mean'], j['dl_tput_count'], j['pos_last_rsrq_mean'], j['pos_last_rsrq_count'], j['end_cqi_mean'], j['end_cqi_count'])
|
tonhsiao/cnn_cbam
|
CNN_CBAM_Daily/generateImg.py
|
generateImg.py
|
py
| 18,040 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40421641601
|
from browser import document
from browser.html import DIV, FIELDSET, LEGEND, TEXTAREA
def result():
result_fildset = FIELDSET(Class='result')
result_fildset <= LEGEND('Resultado')
result_fildset <= DIV(id='result')
document['grid'] <= result_fildset
def get_query_string(fields: list, where='result') -> dict:
fields = {field: document.query.getvalue(field) for field in fields}
if any(fields.values()):
textarea = TEXTAREA()
textarea.text = fields
if where == 'result':
result()
document[where] <= textarea
|
dunossauro/curso-python-selenium-pages
|
scripts/query.py
|
query.py
|
py
| 581 |
python
|
en
|
code
| 13 |
github-code
|
6
|
18230626408
|
# Tim Marder
# SoftDev1 pd06
# K#13 -- Echo Echo Echo
# 2018-09-28
from flask import Flask, render_template, request
app = Flask(__name__) #create instance of class Flask
@app.route("/") #assign fxn to route
def hello_world():
return render_template("home.html")
@app.route("/auth", methods = ["GET", "POST"])
def authenticate():
print(app)
print(request)
print(request.args)
print(request.headers)
return render_template("auth.html",
first = request.form['first'],
last = request.form['last'],
request = request.method)
if __name__ == "__main__":
app.debug = True
app.run()
|
TimMarder/SoftDev-Office
|
13_formation/app.py
|
app.py
|
py
| 701 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43356399246
|
#!/usr/bin/python3
import sys
def writeHeader(outputFile):
with open("headerTemplate.txt", 'r') as htFile:
text = htFile.read()
outputFile.write(text)
def writeFuncNames(outputFile, methods):
outputFile.write(" // node definition\n")
for method in methods:
outputFile.write(" " + method + " [shape = box];\n")
print("\n")
def writeDependencies(outputFile, dependencies):
outputFile.write(" // edge definition\n")
for dep in dependencies:
outputFile.write(" " + dep + ";\n")
def main():
if len(sys.argv) != 3:
print("usage: " + sys.argv[0] + " input output")
sys.exit()
inputFileName = sys.argv[1]
outputFileName = sys.argv[2]
with open(inputFileName, 'r') as inputFile:
lines = inputFile.read().splitlines()
methodMark = "method = "
depMark = "dep = "
with open(outputFileName, 'w') as outputFile:
writeHeader(outputFile)
methods = []
dependencies = []
for line in lines:
if line.startswith(methodMark):
methods.append(line.split(methodMark)[1])
elif line.startswith(depMark):
dependencies.append(line.split(depMark)[1])
writeFuncNames(outputFile, methods)
writeDependencies(outputFile, dependencies)
outputFile.write("}\n")
if __name__ == "__main__":
main()
|
peng225/class_dep
|
misc/gen_graph.py
|
gen_graph.py
|
py
| 1,400 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35541984220
|
lst=[10,12,13,16,20,25]
searchF=13
def searchL(lst,frm,to,findN):
if to>=frm:
centerIndex=int((frm+to)/2)# int(len(lst)/2)
if findN==lst[centerIndex]:
return centerIndex
if findN<lst[centerIndex]:
return searchL(lst,frm,centerIndex-1,findN)
else:
return searchL(lst,centerIndex+1,to,findN)
else:
return -1
resp=searchL(lst,0,len(lst)-1,searchF)
print("Find =",resp)
|
Riddhesh06/hacktoberfest2021
|
binarySearch.py
|
binarySearch.py
|
py
| 413 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8185206077
|
import json
import yaml
import subprocess
def add_cluster_ips(cluster_name, save=True):
"""
Adds the IPs for the specified cluster.
Args:
cluster_name (str): The name of the cluster.
save (bool, optional): Whether to save the IPs to the file. Defaults to False.
Returns:
dict: A dictionary containing the IPs for the specified cluster.
"""
ips = {}
ips['control-plane'] = subprocess.check_output(f"docker exec {cluster_name}-control-plane ip a | grep -A 2 'eth0@' | grep -oP 'inet \K[\d./]+'", shell=True, text=True).strip()
ips['worker'] = subprocess.check_output(f"docker exec {cluster_name}-worker ip a | grep -A 2 'eth0@' | grep -oP 'inet \K[\d./]+'", shell=True, text=True).strip()
# Extract cluster context
subprocess.run(f"docker exec {cluster_name}-control-plane cat /etc/kubernetes/admin.conf > ../../config/cls_contexts/{cluster_name}-control-plane.yaml", shell=True)
if save:
with open('../../config/clus_ips.json', 'r+') as file:
data = json.load(file)
data[cluster_name] = ips
file.seek(0)
json.dump(data, file, indent=4)
else:
return ips
def create_cluster_ips_file():
"""
Creates the cluster IPs file with IPs for all clusters.
"""
ips = {}
with open('../../config/clus_params.yaml', 'r') as file:
cluster_names = yaml.safe_load(file).keys()
for cluster_name in cluster_names:
ips[cluster_name] = add_cluster_ips(cluster_name, save=False)
with open('../../config/clus_ips.json', 'w') as file:
json.dump(ips, file, indent=4)
def del_cluster_ips(cluster_name):
"""
Deletes the IP information for the specified cluster.
Args:
- cluster_name (str): The name of the cluster to delete the IP information for.
"""
with open('../../config/clus_ips.json', 'r') as file:
ips_data = json.load(file)
ips_data.pop(cluster_name, None)
with open('../../config/clus_ips.json', 'w') as file:
json.dump(ips_data, file)
def install_submariner(broker_name: str, broker_config: str):
"""
Installs a submariner in the broker cluster name with the given configuration file.
Args:
- broker_name (str): The name of the broker to install.
- broker_config (str): The path to the broker configuration file.
"""
subprocess.run(['docker', 'cp', broker_config, f'{broker_name}-control-plane:/broker_config.sh'])
subprocess.run(['docker', 'exec', f'{broker_name}-control-plane', '/bin/bash', '/broker_config.sh'])
subprocess.run(["kubectl", "wait", "--for=condition=Ready", "--timeout=600s", "pod", "-A", "--all", "--context",f"kind-{broker_name}"], check=True)
def build_broker_context(broker_cluster: str):
"""
Builds the context file for the broker cluster.
Args:
- broker_cluster (str): The name of the broker cluster
"""
with open("../../config/clus_ips.json") as f:
clus_ips = json.load(f)
with open("../../config/clus_params.yaml") as f:
clus_param = yaml.safe_load(f)
path = f"../../config/cls_contexts/{broker_cluster}-control-plane.yaml"
with open(path) as f:
broker_config = yaml.safe_load(f)
for key in clus_param:
if key != broker_cluster:
path = f"../../config/cls_contexts/{key}-control-plane.yaml"
with open(path) as f:
ctx_key = yaml.safe_load(f)
new_cluster = {
"cluster": {
"certificate-authority-data": ctx_key["clusters"][0]["cluster"]["certificate-authority-data"],
"server": f"https://{clus_ips[key]['control-plane'].split('/')[0]}:6443"
},
"name": key
}
new_context = {
"context": {
'cluster': key,
'user': key
},
'name': key
}
new_user = {
'name': key,
'user': {
'client-certificate-data': ctx_key["users"][0]["user"]["client-certificate-data"],
'client-key-data': ctx_key["users"][0]["user"]["client-key-data"]
}
}
broker_config["clusters"].append(new_cluster)
broker_config["contexts"].append(new_context)
broker_config["users"].append(new_user)
with open(f'../../config/new_broker_config.yaml', 'w') as f:
yaml.safe_dump(broker_config, f)
def join_broker(broker_name: str, clusters=None, deploy=True):
"""
Generate and execute a bash script to join the specified broker to the specified clusters.
Args:
broker_name (str): Name of the broker to join.
clusters (Optional[List[str]]): List of cluster names to join. If None, all clusters except the broker's own will be joined.
deploy (bool): Whether to deploy the broker or only join the deployed one.
Returns:
None
"""
# Load cluster IPs from file
with open("../../config/clus_ips.json") as f:
clus_ips = json.load(f)
# Build bash script
commandes = [ '#!/bin/bash', "", "export PATH=$PATH:~/.local/bin"]
if deploy :
clusters = clus_ips.keys()
key = broker_name
commandes.append(f"kubectl config set-cluster {key} --server https://{clus_ips[key]['control-plane'].split('/')[0]}:6443")
commandes.append(f"subctl deploy-broker")
commandes.append(f"kubectl annotate node {key}-worker gateway.submariner.io/public-ip=ipv4:{clus_ips[key]['worker'].split('/')[0]}")
commandes.append(f"kubectl label node {key}-worker submariner.io/gateway=true")
commandes.append(f"subctl join broker-info.subm --natt=false --force-udp-encaps --clusterid kind-{key}")
for key in clusters:
# For each cluster to join, add kubectl and subctl commands to the bash script
if key != broker_name:
# Joining the broker's own cluster requires deploying the broker using subctl
commandes.append(f"kubectl annotate node {key}-worker gateway.submariner.io/public-ip=ipv4:{clus_ips[key]['worker'].split('/')[0]} --context {key}")
commandes.append(f"kubectl label node {key}-worker submariner.io/gateway=true --context {key}")
commandes.append(f"subctl join broker-info.subm --natt=false --force-udp-encaps --clusterid {key} --context {key}")
# Write bash script to file
commandes_str = '\n'.join(commandes)
with open("./broker_join.sh", "w+") as f:
f.write(commandes_str)
subprocess.run(f"docker cp ../../config/new_broker_config.yaml {broker_name}-control-plane:/etc/kubernetes/admin.conf", shell=True, check=True)
subprocess.run(f"docker cp ./broker_join.sh {broker_name}-control-plane:/broker_join.sh", shell=True, check=True)
subprocess.run(f"docker exec {broker_name}-control-plane chmod +x /broker_join.sh", shell=True, check=True)
subprocess.run(f"docker exec {broker_name}-control-plane /broker_join.sh", shell=True, check=True)
if __name__ == '__main__':
pass
|
chevalsumo/5G-Services-Placement-in-Dynamic-Multi-clusters
|
kind_automatisation/scripts/submariner_configuration/broker_context.py
|
broker_context.py
|
py
| 7,163 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12461550259
|
from preprocess import *
import os
import argparse
from csv import writer
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process pcap file and integer data.")
parser.add_argument("-pcap", nargs="+", help="The pcap file. Multiple pcaps can be added when separated by a space.")
parser.add_argument("-protocol", help ="The application layer protocol (ex: HTTP)")
args = parser.parse_args()
columns=["src_ip", "dst_ip", "src_port", "dst_port", "t_proto", "dsfield", "ip_flags", "length", "d_proto", "payload"]
output_prefix = os.getcwd() + "/output"
if not os.path.exists(output_prefix):
os.makedirs(output_prefix)
filecount = 0
ext = str(filecount) + ".csv"
filename = (output_prefix + "/" + str(args.protocol))
with open(filename + ext, "w", newline='') as my_csv:
csv_writer = writer(my_csv)
csv_writer.writerow(columns)
total = 0
oldtotal = 0
for f in args.pcap:
total += parsePacket(filename + ext, f, str(args.protocol))
if (oldtotal + 100000 <= total):
filecount += 1
oldtotal = total
ext = str(filecount) + ".csv"
with open(filename + ext, "w", newline='') as my_csv:
csv_writer = writer(my_csv)
csv_writer.writerow(columns)
print("Number of packets processed: %d" % total)
|
mayakapoor/palm
|
src/preprocessing/main.py
|
main.py
|
py
| 1,391 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9878964651
|
#!/usr/bin/python
# disk monitor
import logging as l
l.basicConfig(filename='disk_log.txt',filemode='a',level=l.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%c')
# modes
# r -> read -> you can only read the file.
# a -> append -> you can only append the contents to the file.
# w -> write -> you can write to the file.
# -> if you dont have a file a new file will be created.
# -> if you have a file with data,the file gets truncated to zero.
disk_size = int(raw_input("please enter your disk size:"))
if disk_size < 60:
l.info("Your disk looks healthy at {}.".format(disk_size))
elif disk_size < 80:
l.warning("Buddy!! your disk is getting fat - {}.".format(disk_size))
elif disk_size < 90:
l.error("Buddy!! you disk is feeling sick - {}.".format(disk_size))
elif disk_size < 99:
l.critical("Buddy!! you disk is dead - {}.".format(disk_size))
|
tuxfux-hlp-notes/python-batches
|
batch-68/14-logging/third.py
|
third.py
|
py
| 900 |
python
|
en
|
code
| 5 |
github-code
|
6
|
37122760097
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from fanza.items import ImageItem
from fanza.common import download_image
from fanza.image.image_helper import handle_image_item
from scrapy.exceptions import DropItem
from scrapy import Spider
from time import sleep
from socket import timeout
from urllib.request import ProxyHandler, build_opener
from urllib.error import URLError, HTTPError
from os.path import isdir, isfile
from os import makedirs
class AvbookImagePipeline:
def __init__(self) -> None:
self.opener = None
def open_spider(self, spider: Spider):
img_download_proxy = spider.settings['IMAGE_DOWNLOAD_PROXY']
self.opener = build_opener(ProxyHandler({'https': img_download_proxy, 'http': img_download_proxy}))
self.img_fail = spider.settings['IMAGE_FAIL_FILE']
self.failed = set()
async def process_item(self, item, spider: Spider):
if not isinstance(item, ImageItem):
return item
img_dir, img_des, prefix = handle_image_item(item, spider)
if not isdir(img_dir):
makedirs(img_dir)
if not item.isUpdate and isfile(img_des):
spider.logger.debug('already exist: %s %s', prefix, item.imageName)
return
retry = 0
delay = 1
retry_limit = spider.settings['RETRY_LIMIT']
while True:
try:
download_image(self.opener, item.url, img_des)
break
except (URLError, HTTPError, timeout):
if retry > retry_limit:
spider.logger.exception("download image error, url: %s", item.url)
if item.subDir not in self.failed:
self.failed.add(item.subDir)
with open(self.img_fail, 'w', encoding='utf-8') as f:
f.write(f'{item.subDir}\n')
raise DropItem(f'download error happend\titem: {item}')
sleep(delay)
retry += 1
delay *= 2
spider.logger.debug('retry download image: retry\t%s url\t%s', retry, item.url)
spider.logger.info('save img:\t%s %s', prefix, item.imageName)
class SuccessResponsePipeline:
def close_spider(self, spider: Spider):
if spider.name != 'movie_detail' and spider.name != 'movie_image':
return
spider.logger.info('------------------------------------save failed------------------------------------')
failed = spider.processed - spider.successed
with open('failed.txt', 'w', encoding='utf-8') as f:
for failed_id in failed:
f.write(failed_id + '\n')
|
takiya562/Adult_video_scrapy
|
fanza/pipelines.py
|
pipelines.py
|
py
| 2,876 |
python
|
en
|
code
| 4 |
github-code
|
6
|
23184716017
|
#! /usr/bin/python
__author__ = "grasseau"
__date__ = "$Jul 12, 2020 9:56:07 AM$"
import sys, traceback
import struct
import numpy as np
import pickle
def readInt2(file, n):
if ( n == 0 ): return np.zeros( (0), dtype=np.int16 )
#
# Read Nbr of items (8 bytes)
raw = file.read(2 * 4)
nData = struct.unpack('q', raw)[0]
# print str(raw[0]).encode("hex") ,'-', str(raw[1]).encode("hex")
if nData != n:
print ("Expected/Read number of values are different ", n, "/", nData);
# traceback.print_stack(); exit()
getEOF(file)
# return
# Read N Int16
raw = file.read(nData * 2)
array = struct.unpack(str(nData) + 'h', raw)
if nData != n:
print("Expected/Read number of values are different ", n, "/", nData);
print( "raw", raw )
print( "array", array )
traceback.print_stack();
exit()
#
return np.array(array, dtype=np.int16)
def readUInt4(file, n):
if ( n == 0 ): return np.zeros( (0), dtype=np.uint32 )
#
# Read Nbr of items (8 bytes)
raw = file.read(2 * 4)
nData = struct.unpack('q', raw)[0]
# print str(raw[0]).encode("hex") ,'-', str(raw[1]).encode("hex")
if nData != n:
print ("Expected/Read number of values are different ", n, "/", nData);
# traceback.print_stack(); exit()
getEOF(file)
# return
# Read N UInt32
raw = file.read(nData * 4)
array = struct.unpack(str(nData) + 'I', raw)
if nData != n:
print("Expected/Read number of values are different ", n, "/", nData);
print( "raw", raw )
print( "array", array )
traceback.print_stack();
exit()
#
return np.array(array, dtype=np.uint32)
def readInt4(file, n):
if ( n == 0 ): return np.zeros( (0), dtype=np.int32 )
#
# Read Nbr of items (8 bytes)
raw = file.read(2 * 4)
nData = struct.unpack('q', raw)[0]
# print str(raw[0]).encode("hex") ,'-', str(raw[1]).encode("hex")
if nData != n:
print ("Expected/Read number of values are different ", n, "/", nData);
# traceback.print_stack(); exit()
getEOF(file)
# return
# Read N Int32
raw = file.read(nData * 4)
array = struct.unpack(str(nData) + 'i', raw)
if nData != n:
print("Expected/Read number of values are different ", n, "/", nData);
print( "raw", raw )
print( "array", array )
traceback.print_stack();
exit()
#
return np.array(array, dtype=np.int32)
def readDouble(file, n):
if ( n == 0 ): return np.zeros( (0), dtype=np.float64 )
#
# Read Nbr of items (8 bytes)
raw = file.read(2 * 4)
nData = struct.unpack('q', raw)[0]
# print str(raw[0]).encode("hex") ,'-', str(raw[1]).encode("hex")
# if nData != n: print("Expected/Read number of values are different ", n, "/", nData); traceback.print_stack(); exit()
# Read N Double
raw = file.read(nData * 8)
# print("len(raw) ", len(raw) )
# print("fmt unpack ", str(nData) + 'd')
array = struct.unpack(str(nData) + 'd', raw)
if nData != n:
print("Expected/Read number of values are different ", n, "/", nData);
print( "raw", raw )
print( "array", array )
traceback.print_stack();
exit()
#
return np.array(array, dtype=np.float64)
def getEOF(file, errorStr="", verbose=False):
k = len(file.read(8))
EOF = (k != 8)
if ( verbose and EOF):
print( "Warning: EOF reached ", errorStr, k, "bytes read", )
file.seek(-k, 1)
return EOF
class Tracks:
"""
Int_t trackListHeader[] = { -1, iEvent, -1, -1, 0, nTracks };
for (auto& track : tracks) {
Int_t trackInfo[] = { trackIdx, chi2x100, -1, -1, -1, nClusters};
//
Int_t DeIds[nClusters];
Int_t UIDs[nClusters];
Double_t X[nClusters];
Double_t Y[nClusters];
Double_t Z[nClusters];
Double_t errX[nClusters];
Double_t errY[nClusters];
"""
# ???
def __init__(self, fileName="TracksReco.dat"):
self.fileName = fileName
self.file = 0
self.file = open(fileName, 'rb')
#
# Data members
# tracks[ev][iTrack].nparray[nHits]
self.tracks = []
return
def readATrack(self, verbose=False):
headerSize = 6
header = readInt4(self.file, headerSize)
if (header[4] != -1 ):
print("readATrack: bad preClusterListHeader", header[4])
exit()
# { trackIdx, chi2x100, -1, -1, -1, nClusters};
trackIdx = header[0]
chi2 = header[1] / 100.0
nHits = header[5]
if (verbose):
print("readAtrack trackIdx=", trackIdx, "chi2=", chi2, ", nHits=", nHits )
# Hits
aTrack = ()
if nHits != 0:
DEIds = readInt4(self.file, nHits)
UIDs = readInt4(self.file, nHits)
#
x = readDouble(self.file, nHits)
y = readDouble(self.file, nHits)
z = readDouble(self.file, nHits)
errX = readDouble(self.file, nHits)
errY = readDouble(self.file, nHits)
aTrack = ( trackIdx, chi2, nHits, DEIds, UIDs, x, y, z, errX, errY)
else:
empty = np.empty(0)
aTrack = ( trackIdx, chi2, nHits, empty, empty, empty, empty, empty, empty, empty)
#
return aTrack
def __iter__(self):
self.file.close()
self.file = open( self.fileName, 'rb')
self.nbrOfReadPreclusters = 0
return self
def __next__(self):
EOF=getEOF(self.file, "reading preCluster", verbose=True)
if not EOF:
data = self.readATrack()
self.nbrOfReadTracks += 1
else:
data = ()
raise StopIteration
return ( data )
def read(self, verbose=False):
#
EOF=False
self.readBytes=0
# Read header
# ??? self.tracks = [None] * nEvents
while not EOF:
# Read the tracks
headerSize = 6
header = readInt4(self.file, headerSize)
if (header[3] != -1 ):
print("readATrack: bad preClusterListHeader", header[3])
print(header)
exit()
# Int_t trackListHeader[] = { -1, iEvent, -1, -1, 0, nTracks };
iEvent = header[1]
nTracks = header[5]
nEvents = len(self.tracks)
if iEvent != ( nEvents - 1 ):
for i in range(nEvents, iEvent+1):
self.tracks.append([])
for iTrack in range(nTracks):
self.tracks[iEvent].append( self.readATrack(verbose=verbose))
EOF=getEOF(self.file, "reading a new Track", verbose=verbose)
return
def writePickle( fileName, obj ):
file = open( fileName, "wb" )
pickle.dump( obj, file )
file.close()
def readPickle( fileName ):
file = open( fileName, "rb" )
obj = pickle.load( file )
file.close()
return obj
if __name__ == "__main__":
print("Hello")
|
grasseau/MCHClustering
|
src/Util/IOTracks.py
|
IOTracks.py
|
py
| 6,767 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73815019386
|
import logging
from os import environ
from unittest.mock import patch
import pytest
from bonobo import settings
from bonobo.errors import ValidationError
TEST_SETTING = "TEST_SETTING"
def test_to_bool():
assert not settings.to_bool("")
assert not settings.to_bool("FALSE")
assert not settings.to_bool("NO")
assert not settings.to_bool("0")
assert settings.to_bool("yup")
assert settings.to_bool("True")
assert settings.to_bool("yes")
assert settings.to_bool("1")
def test_setting():
s = settings.Setting(TEST_SETTING)
assert s.get() is None
with patch.dict(environ, {TEST_SETTING: "hello"}):
assert s.get() is None
s.clear()
assert s.get() == "hello"
s = settings.Setting(TEST_SETTING, default="nope")
assert s.get() is "nope"
with patch.dict(environ, {TEST_SETTING: "hello"}):
assert s.get() == "nope"
s.clear()
assert s.get() == "hello"
s = settings.Setting(TEST_SETTING, default=0, validator=lambda x: x == 42)
with pytest.raises(ValidationError):
assert s.get() is 0
s.set(42)
with pytest.raises(ValidationError):
s.set(21)
def test_default_settings():
settings.clear_all()
assert settings.DEBUG.get() is False
assert settings.PROFILE.get() is False
assert settings.QUIET.get() is False
assert settings.LOGGING_LEVEL.get() == logging._checkLevel("INFO")
with patch.dict(environ, {"DEBUG": "t"}):
settings.clear_all()
assert settings.LOGGING_LEVEL.get() == logging._checkLevel("DEBUG")
settings.clear_all()
def test_check():
settings.check()
with patch.dict(environ, {"DEBUG": "t", "PROFILE": "t", "QUIET": "t"}):
settings.clear_all()
with pytest.raises(RuntimeError):
settings.check()
settings.clear_all()
|
python-bonobo/bonobo
|
tests/test_settings.py
|
test_settings.py
|
py
| 1,851 |
python
|
en
|
code
| 1,564 |
github-code
|
6
|
5026791116
|
import zizouqi_tools
import random
# print(computer)
player = zizouqi_tools.Game()
num = 0
"""
while num < 3:
player.chouka()
num += 1
player.chuzhan()
"""
num2 = 0
# while num2 < 1:
# hero_1 = int(input("่ฏทไฝ ่พๅ
ฅๆ่ฝใ1-3ใ๏ผ"))
# player.pk(computer,hero_1)
while num2 < 1:
computer = random.randint(1, 3)
hero_1 = int(input("่ฏทไฝ ่พๅ
ฅๆ่ฝ(1)็ณๅคด/(2)ๅชๅ/(3)ๅธใ1-3ใ:"))
print(computer)
player.solo(computer, hero_1)
|
xinlongOB/python_docment
|
่ช่ตฐๆฃ/main.py
|
main.py
|
py
| 473 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70514119229
|
from collections import defaultdict
from github import Github
def get_git_skills(username):
g = Github()
user = g.get_user(username)
tags = defaultdict()
languages = defaultdict(int)
for repo in user.get_repos():
# new_repo_languages = repo.get_languages()
# for lang in new_repo_languages:
# languages[lang] += new_repo_languages[lang]
new_repo_topics = repo.get_topics()
for topic in new_repo_topics:
print (topic)
print(languages)
return sorted(languages.items(), key=lambda x: x[1], reverse=True)
|
HackRU/teamRU
|
src/matching/git_skill_finder.py
|
git_skill_finder.py
|
py
| 593 |
python
|
en
|
code
| 5 |
github-code
|
6
|
25549629589
|
# coding: utf-8
__author__ = "Ciprian-Octavian Truicฤ"
__copyright__ = "Copyright 2020, University Politehnica of Bucharest"
__license__ = "GNU GPL"
__version__ = "0.1"
__email__ = "[email protected]"
__status__ = "Production"
from tokenization import Tokenization
from vectorization import Vectorization
from topicmodeling import TopicModeling
import sys
import pandas as pd
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from multiprocessing import cpu_count
import time
def tkns(text):
title = tkn.createCorpus(text['title'], apply_FE=False)
content = tkn.createCorpus(text['content'], apply_FE=False)
clean_text = title + content
clean_text = ' '.join([' '.join(elem) for elem in clean_text])
return clean_text
def processElement(row):
title = tkn.createCorpus(row[0], apply_FE=False)
content = tkn.createCorpus(row[1], apply_FE=False)
clean_text = title + content
clean_text = ' '.join([' '.join(elem) for elem in clean_text])
return clean_text
if __name__ == '__main__':
fin = sys.argv[1]
num_topics = int(sys.argv[2])
num_words = int(sys.argv[3])
num_iterations = int(sys.argv[4])
no_threads = cpu_count() - 2
print("Start Read File!")
df = pd.read_csv(fin)
print("End Read File!")
print("Start Tokenization!")
start = time.time() * 1000
tkn = Tokenization()
# with UDF
# df = df.apply(tkns, axis=1)
# clean_texts = df.to_list()
clean_texts = []
with ProcessPoolExecutor(max_workers=no_threads) as worker:
for result in worker.map(processElement, df.to_numpy()):
if result:
clean_texts.append(result)
end = time.time() * 1000
print("Execution time (ms)", end - start)
print("End Tokenization!")
print("Start Vectorization!")
vec = Vectorization(clean_texts)
vec.vectorize()
id2word = vec.getID2Word()
corpus = vec.getTFIDFNorm()
print("End Vectorization!")
tm = TopicModeling(id2word=id2word, corpus=corpus)
print("Start Topic Modeling NNF!")
start = time.time()
topicsNMF = tm.topicsNMF(num_topics=num_topics, num_words=num_words, num_iterations=num_iterations)
print("=============NMF=============")
for topic in topicsNMF:
print("TopicID", topic[0], topic[1])
print("=============================")
end = time.time()
print("Execution time (ms)", end - start)
print("End Topic Modeling NNF!")
# print("Start Topic Modeling LDA!")
# print("=============LDA=============")
# topicsLDA = tm.topicsLDA(num_topics=num_topics, num_words=num_words, num_iterations=num_iterations)
# for topic in topicsLDA:
# print("TopicID", topic[0], topic[1])
# print("=============================")
# print("End Topic Modeling LDA!")
|
cipriantruica/news_diffusion
|
news-topic-modeling/main.py
|
main.py
|
py
| 2,856 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9591325686
|
from rest_framework.authentication import TokenAuthentication
from rest_framework.exceptions import AuthenticationFailed
from .models import AuthToken
from utils.exceptions import *
def expire_token(user):
try:
for auth_token in user.auth_tokens.all():
auth_token.delete()
except AuthToken.DoesNotExist:
pass
def get_auth_token_by(raise_exception=True, only_deleted=False, **kwargs):
key = kwargs.get('key')
if only_deleted:
auth_token = AuthToken.objects.deleted_only().filter(**kwargs).first()
else:
auth_token = AuthToken.objects.filter(key=key).first()
if not auth_token and raise_exception:
raise ObjectNotFound
return auth_token
def create_token(user):
auth_token = AuthToken.objects.create(user=user)
return auth_token.key
def token_expire_handler(auth_token):
if auth_token.is_expired:
auth_token = create_token(user=auth_token.user)
return auth_token.is_expired, auth_token
class ExpiringTokenAuthentication(TokenAuthentication):
def authenticate_credentials(self, key):
try:
auth_token = AuthToken.objects.get(key=key)
except AuthToken.DoesNotExist:
raise AuthenticationFailed
is_expired, auth_token = token_expire_handler(auth_token)
if is_expired:
raise AuthenticationFailed
return auth_token.user, auth_token
|
danghh-1998/django_rest_boilerplate
|
auth_tokens/services.py
|
services.py
|
py
| 1,418 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34993256569
|
import requests
from bs4 import BeautifulSoup
def extract_teok_jobs(keyword):
results = []
url = f"https://remoteok.com/remote-{keyword}-jobs"
request = requests.get(url, headers={"User-Agent": "Kimchi"})
if request.status_code == 200:
soup = BeautifulSoup(request.text, "html.parser")
jobs = soup.find_all('tr', class_="job")
for job_section in jobs:
job_posts = job_section.find_all('td', class_="company")
for post in job_posts:
anchors = post.find_all('a')
anchor = anchors[0]
link = anchor['href']
title = anchor.find("h2")
organization = post.find_all('span', class_="companyLink")
orga = organization[0]
company = orga.find('h3')
location = post.find_all('div', class_="location")[0]
if company:
company = company.string.strip()
if title:
title = title.string.strip()
if location:
location = location.string
job_data = {
'link': f"https://remoteok.com{link}",
'company': company.replace(",", " "),
'location': location.replace(",", " "),
'position': title,
}
results.append(job_data)
return results
|
hoseel/job-scrapper
|
extractors/teok.py
|
teok.py
|
py
| 1,444 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2725837698
|
while True:
n = int(input())
if n == 0:
break
li = {key: True for key in range(1, n+1)}
for i in range(2, n+1):
for j in range(i, n+1, i):
li[j] = not li[j]
liPri = []
for key, value in li.items():
if value is True:
liPri.append(key)
print(*liPri)
|
wolney-fo/beecrowd
|
1-INICIANTE/python/beecrowd_1371.py
|
beecrowd_1371.py
|
py
| 335 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74553223546
|
import time
from datetime import datetime, timedelta
from num2words import num2words
# Todo: returns an timedelta:
def calculate_time(sleep_time: float) -> timedelta:
"""Function to calculate time to perform it's action,
which is takes a .
Args:
sleep_time (float) : Time that the function will take to be executed.
Returns:
string: A string containing the time needed to execute the loop
in the format hours:minutes:seconds.milliseconds
"""
start_time = datetime.now()
time.sleep(sleep_time)
end_time = datetime.now()
difference_time_function = end_time - start_time
return difference_time_function
def split_time(time: timedelta) -> dict:
"""This function takes the time and create a dictionary from it with the splitted values
Args:
time(str) : The time that the function took to be performed.
Returns:
splitted_time(dict): A dictionary containing how many hours, minutes, seconds and milliseconds are inside the time argument.
"""
seconds = time.seconds
hours = seconds // 3600
minutes = (seconds // 60) % 60
microseconds = time.microseconds
# timer = time.split(":")
# sec = timer[2].split(".")
splitted_time = {
"hours": hours,
"minutes": minutes,
"seconds": seconds,
"milliseconds": microseconds,
}
return splitted_time
def readable_time(splitted_time: dict) -> str:
"""This function gets a dictionary containing hours, minutes, seconds and milliseconds and
translate these numbers to a human comprehension
Args:
splitted_time(dict): Dictionary containing hours, minutes, seconds and milliseconds.
Returns:
str: How long the operation took to be performed in a human perspective.
"""
hours = splitted_time["hours"]
minutes = splitted_time["minutes"]
seconds = splitted_time["seconds"]
milliseconds = splitted_time["milliseconds"]
readable_time = ""
if hours > 0:
descriptive_hours = num2words(hours)
if hours == 1:
support = "hour"
else:
support = "hours"
readable_time += f"{descriptive_hours} {support}, "
if minutes > 0:
if minutes == 1:
support = "minute"
else:
support = "minutes"
descriptive_minutes = num2words(minutes)
readable_time += f"{descriptive_minutes} {support} and "
if seconds > 0:
descriptive_seconds = num2words(seconds)
if seconds == 1:
support = "second"
else:
support = "seconds"
readable_time += f"{descriptive_seconds} {support}"
if milliseconds > 0 and minutes < 1:
milli = str(milliseconds)
rounded_milliseconds = milli[0:2]
if int(rounded_milliseconds) == 1:
support = "millisecond"
else:
support = "milliseconds"
descriptive_milliseconds = num2words(rounded_milliseconds)
readable_time += f" and {descriptive_milliseconds} {support}"
return (
f"Your function took {readable_time} to run ({time_to_run_function})"
)
if __name__ == "__main__":
sleep_time = 1.5
time_to_run_function = calculate_time(sleep_time)
splitted_time = split_time(time_to_run_function)
human_time = readable_time(splitted_time)
print(human_time)
|
bvmcardoso/pwn
|
challenge.py
|
challenge.py
|
py
| 3,411 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75018787708
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 7 22:05:01 2022
@author: Marcin
"""
import numpy as np
import matplotlib.pyplot as plt
# Sigmoid activation function
def sigmoid(X):
out = 1.0 / (1.0 + np.exp(-X))
return out
# Dervative of sigmoid funcition
def sigmoid_derivative(X):
return sigmoid(X) * (1 - sigmoid(X))
# Forward progpagation
def forward_propagation(X, w1, w2, predict=False):
# Z - before apply activation function
# A - after apply activation function
# Calculate multiplication of input X and first layer weights
A1 = np.dot(X, w1)
# Apply sigmoid
Z1 = sigmoid(A1)
# Add bias and do the same as above
bias = np.ones(Z1.shape[0]).reshape(-1, 1)
Z1 = np.concatenate((bias, Z1), axis = 1)
A2 = np.dot(Z1, w2)
Z2 = sigmoid(A2)
# If precition - just return network prediction (Z2)
if predict:
return Z2
# If not - return all matrices before and after sigmoid
else:
return A1, Z1, A2, Z2
# Backpropagation
def backpropagation(A1, X, Z1, Z2, Y):
# Calculate difference betweend output and desired otput
out_diff = Z2 - Y
# Propagete inside of network (from back to front)
outDiff = np.dot(Z1.T, out_diff)
# Calculate dot product of out_diff and weights w2 multiplied by sigmoid derivative of A1
inside_diff = (out_diff.dot(w2[1:, :].T)) * sigmoid_derivative(A1)
# Dot product of X and inside_diff
insideDiff = np.dot(X.T, inside_diff)
return out_diff, insideDiff, outDiff
# Initialize weights
def initialize(input_size, output_size, hidden_units_w1, hidden_w2):
# Random weights
w1 = np.random.randn(input_size, hidden_units_w1)
w2 = np.random.randn(hidden_w2, output_size)
return w1, w2
# Define input data with bias and output values
X = np.array([[1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])
Y = np.array([0, 1, 1, 0]).reshape(-1,1)
# Number of neurons in layers
input_size = X.shape[1]
hidden_units_w1 = 5
hidden_w2 = hidden_units_w1 + 1
output_size = 1
# Initialize random weights
w1, w2 = initialize(input_size, output_size, hidden_units_w1, hidden_w2)
# Define learning rate
learning_rate = 0.08
# Lists for costs (errors)
costs = []
# Desired number of epochs
epochs = 10000
# Y data shape - to weight modification
m = Y.shape[0]
# Training process
for i in range(1, epochs+1):
# Put out data into forword propagation
A1, Z1, A2, Z2 = forward_propagation(X, w1, w2)
# Backpropagation
out_diff, insideDiff, outDiff = backpropagation(A1, X, Z1, Z2, Y)
# Modify weights
w1 = w1 - learning_rate * (1/m) * insideDiff
w2 = w2 - learning_rate * (1/m) * outDiff
# Costs (differences betweend desired output) - mean
c = np.mean(np.abs(out_diff))
costs.append(c)
if i%100 == 0:
print('Iteration: %f, cost: %f' % (i, c))
print('Completed.')
# Predict:
pred = forward_propagation(X, w1, w2, True)
print('Pred. percentage:')
print(pred)
pred_rounded = np.round(pred)
print('Predictions:')
print(pred_rounded)
# Plot error curve
plt.plot(costs)
plt.xlabel('Iterations')
plt.ylabel('Error')
plt.title('Training erroro curve')
|
MarcinJ7/kNN-implementation
|
NN.py
|
NN.py
|
py
| 3,321 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33875332851
|
import torch
from care_nl_ica.independence.hsic import HSIC
class IndependenceChecker(object):
"""
Class for encapsulating independence test-related methods
"""
def __init__(self, hparams) -> None:
super().__init__()
self.hparams = hparams
self.test = HSIC(hparams.num_permutations)
print("Using Bonferroni = 4")
def check_bivariate_dependence(self, x1, x2):
decisions = []
var_map = [1, 1, 2, 2]
with torch.no_grad():
decisions.append(
self.test.run_test(x1[:, 0], x2[:, 1], bonferroni=4).item()
)
decisions.append(
self.test.run_test(x1[:, 0], x2[:, 0], bonferroni=4).item()
)
decisions.append(
self.test.run_test(x1[:, 1], x2[:, 0], bonferroni=4).item()
)
decisions.append(
self.test.run_test(x1[:, 1], x2[:, 1], bonferroni=4).item()
)
return decisions, var_map
def check_multivariate_dependence(
self, x1: torch.Tensor, x2: torch.Tensor
) -> torch.Tensor:
"""
Carries out HSIC for the multivariate case, all pairs are tested
:param x1: tensor of the first batch of variables in the shape of (num_elem, num_dim)
:param x2: tensor of the second batch of variables in the shape of (num_elem, num_dim)
:return: the adjacency matrix
"""
num_dim = x1.shape[-1]
max_edge_num = num_dim**2
adjacency_matrix = torch.zeros(num_dim, num_dim).bool()
print(max_edge_num)
with torch.no_grad():
for i in range(num_dim):
for j in range(num_dim):
adjacency_matrix[i, j] = self.test.run_test(
x1[:, i], x2[:, j], bonferroni=4 # max_edge_num
).item()
return adjacency_matrix
|
rpatrik96/nl-causal-representations
|
care_nl_ica/independence/indep_check.py
|
indep_check.py
|
py
| 1,920 |
python
|
en
|
code
| 12 |
github-code
|
6
|
15821968201
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
import psutil
import argparse
def monitorAvailableMemory(memory_upperlimit_percent):
"""
This function is used to monitor the memory utilization and throw an error
if it exceeds a preset value.
Arguments:
memory_upperlimit_percent: The upperlimit of the memory utilization (float)
"""
# Utilized memory
utilized_memory = psutil.virtual_memory().percent
if utilized_memory > memory_upperlimit_percent:
return True
return False
def ram_usage_watcher(mem_upper_limit):
pub = rospy.Publisher(
'data_capture/is_memory_usage_exceeded', Bool, queue_size=1)
rospy.init_node('ram_usage_watcher', anonymous=True)
rate = rospy.Rate(0.2) # Once every 5 seconds = 1/5 = 0.2 hz
while not rospy.is_shutdown():
# Check on free memory if exceeds 90% utilization
mem_usage = monitorAvailableMemory(
memory_upperlimit_percent=mem_upper_limit)
pub.publish(mem_usage)
rate.sleep()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mem-upper-limit', type=float, help='Memory utilization upper limit in percent',
default=90.0)
args, _ = parser.parse_known_args()
try:
ram_usage_watcher(args.mem_upper_limit)
except rospy.ROSInterruptException:
pass
|
robotpt/ros-data-capture
|
src/tools/mem_use_watcher/scripts/watcher.py
|
watcher.py
|
py
| 1,420 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7884552964
|
#Pull middle two (for even) or middle three (for odd) characters of user input
print("Ready to see the middle characters of your input?")
answer = None
while answer not in ("yes", "no"):
answer = input("Enter yes or no: ")
if answer.lower().strip() == "yes":
midinput = input("Enter an input:")
def middle_char(txt):
return txt[(len(txt)-2)//2:(len(txt)+3)//2]
print("Result:" + middle_char(midinput))
print("Have a great day!")
quit()
elif answer.lower().strip() == "no":
print("Maybe next time. Have a great day!")
quit()
else:
print("Please enter yes or no.")
|
tracygorski/helloworld
|
middle.py
|
middle.py
|
py
| 612 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42072187981
|
from tkinter import *
import backend #backend script to read dictionary from
bookf = Tk() #create window
bookf.wm_title("BOOK-STORE")
def get_selected_row(event):
global selected_tuple
if not list1.curselection():
return
index = list1.curselection()[0]
selected_tuple = list1.get(index)
Ent1.delete(0, END)
Ent1.insert(END, selected_tuple[1])
Ent2.delete(0, END)
Ent2.insert(END, selected_tuple[3])
Ent3.delete(0, END)
Ent3.insert(END, selected_tuple[2])
Ent4.delete(0, END)
Ent4.insert(END, selected_tuple[4])
return (selected_tuple)
def view_command():
list1.delete(0, END)
for row in backend.view():
list1.insert(END, row)
def search_command():
list1.delete(0, END)
for row in backend.search(title_text.get(),author_text.get(), year_text.get(),isbn_text.get()):
list1.insert(END, row)
def Add_command():
list1.delete(0, END)
backend.insert(title_text.get(),author_text.get(), year_text.get(),isbn_text.get())
list1.insert(title_text.get(),author_text.get(), year_text.get(),isbn_text.get())
def delete_command():
backend.delete(selected_tuple[0])
view_command()
def Update_command():
backend.update(selected_tuple[0],title_text.get(),author_text.get(), year_text.get(),isbn_text.get())
view_command()
def Clear_command():
Ent1.delete(0, END)
Ent2.delete(0, END)
Ent3.delete(0, END)
Ent4.delete(0, END)
#Text labels
Label1 = Label(bookf, text = "Title")
Label1.grid(row = 0, column = 0)
Label2 = Label(bookf, text = "Year")
Label2.grid(row = 1, column = 0)
Label3 = Label(bookf, text = "Author")
Label3.grid(row = 0, column = 2)
Label4 = Label(bookf, text = "ISBN")
Label4.grid(row = 1, column = 2)
#buttons
but1 = Button(bookf, text = "View All", width = 20, command = view_command)
but1.grid(row = 2, column = 3,)
but2 = Button(bookf, text = "Search Entry", width = 20, command = search_command)
but2.grid(row = 3, column = 3)
but3 = Button(bookf, text = "Add Entry", width = 20, command = Add_command)
but3.grid(row = 4, column = 3)
but4 = Button(bookf, text = "Update Selected", width = 20 ,command = Update_command)
but4.grid(row = 5, column = 3)
but5 = Button(bookf, text = "Delete Selected", width = 20, command = delete_command)
but5.grid(row = 6, column = 3)
but6 = Button(bookf, text = "Close", width = 20 ,command=bookf.destroy)
but6.grid(row = 8, column = 3)
but6 = Button(bookf, text = "Clear_textbox", width = 20 , command=Clear_command )
but6.grid(row = 7, column = 3)
#Listbox AND scrollbar
list1 = Listbox(bookf, height = 9, width = 45)
list1.grid(row = 2, column = 0, rowspan = 6, columnspan = 2)
sb1 = Scrollbar(bookf)
sb1.grid(row = 2, column = 2, rowspan = 6)
list1.configure(yscrollcommand = sb1)
sb1.configure(command = list1.yview)
list1.bind('<<ListboxSelect>>', get_selected_row)
#EntryWindows
title_text = StringVar()
Ent1 = Entry(bookf,textvariable = title_text)
Ent1.grid(row = 0, column = 1)
year_text = StringVar()
Ent2 = Entry(bookf, textvariable = year_text)
Ent2.grid(row = 1, column = 1)
author_text = StringVar()
Ent3 = Entry(bookf, textvariable = author_text)
Ent3.grid(row = 0, column = 3)
isbn_text = StringVar()
Ent4 = Entry(bookf,textvariable = isbn_text)
Ent4.grid(row = 1, column = 3)
bookf.mainloop()
|
shivangijain827/python-projects
|
Book - Store/frontend.py
|
frontend.py
|
py
| 3,606 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18843150286
|
import pytest
from unittest import mock
from types import SimpleNamespace
from clean.exceptions import FilterDoesNotExist
from clean.request.inout.ports import Response, Request
from clean.request.inout.filter import Page, Sort
from clean.use_case.common import SaveUseCase, RetrieveUseCase, UpdateUseCase, DeleteUseCase, ListUseCase
from clean.use_case.case import BaseUseCase
from clean.repository.abs import BaseRepository, BaseListRepository
class FakeSave(SaveUseCase):
def create_entity(self, req):
return SimpleNamespace(**dict(age=req.age, name=req.name))
def test_base_raises_required_custom_process():
class Foo(BaseUseCase):
pass
def test_base_process_request():
request = mock.Mock(spec=Request)
request.age = 20
request.name = 'crl'
class Baz(BaseUseCase):
def custom_process(self, req) -> Response:
return Response(context=SimpleNamespace(**dict(age=req.age, name=req.name)))
res = Baz().custom_process(req=request)
assert bool(res) is True
assert res.result.name == 'crl'
assert res.result.age == 20
def test_save_create_entity_raises():
repo = mock.Mock(spec=BaseRepository)
save_case = SaveUseCase(repo=repo)
req = SimpleNamespace(**dict(name='crl', age=20))
with pytest.raises(NotImplementedError):
save_case.create_entity(req=req)
def test_save():
repo = mock.Mock(spec=BaseRepository)
save_case = FakeSave(repo=repo)
req = SimpleNamespace(**dict(name='crl', age=20))
res = save_case.create_entity(req=req)
assert res.name == 'crl'
assert res.age == 20
def test_save_repo_calls():
repo = mock.Mock(spec=BaseRepository)
req = SimpleNamespace(**dict(name='crl', age=20))
save_case = FakeSave(repo=repo)
save_case.process_request(req=req)
assert repo.save.call_count == 1
def test_retrieve_repo_calls():
repo = mock.Mock(spec=BaseRepository)
req = mock.Mock()
req.oid.return_value = '123456'
save_case = RetrieveUseCase(repo=repo)
save_case.process_request(req=req)
assert repo.get.call_count == 1
assert repo.get.call_args == mock.call(oid=req.oid)
def test_update_repo_calls():
repo = mock.Mock(spec=BaseRepository)
req = mock.Mock()
req.to_dict.return_value = dict(oid='123456', age=20, name='crl')
save_case = UpdateUseCase(repo=repo)
save_case.process_request(req=req)
assert repo.update.call_count == 1
assert repo.update.call_args == mock.call(oid='123456', attributes=dict(age=20, name='crl'))
def test_delete_repo_calls():
repo = mock.Mock(spec=BaseRepository)
req = mock.Mock()
req.oid.return_value = '123456'
save_case = DeleteUseCase(repo=repo)
save_case.process_request(req=req)
assert repo.delete.call_count == 1
assert repo.delete.call_args == mock.call(oid=req.oid)
def test_list_repo_calls():
repo = mock.Mock(spec=BaseListRepository)
req = mock.Mock()
req.oid.return_value = '123456'
req.ft = 'all'
req.filters = {}
req.page = Page()
req.sort = Sort()
save_case = ListUseCase(repo=repo)
save_case.process_request(req=req)
assert repo.execute.call_count == 1
assert repo.execute.call_args == mock.call(req.ft, req.filters, req.page, req.sort)
def test_list_silent_repo_filer_does_not_exist_exception():
repo = mock.Mock(spec=BaseListRepository)
repo.execute.side_effect = FilterDoesNotExist('')
req = mock.Mock()
req.oid.return_value = '123456'
req.ft = 'all'
req.filters = {}
req.page = Page()
req.sort = Sort()
save_case = ListUseCase(repo=repo)
res = save_case.process_request(req=req)
assert bool(res) is False
assert repo.execute.call_count == 1
assert repo.execute.call_args == mock.call(req.ft, req.filters, req.page, req.sort)
|
bahnlink/pyclean
|
tests/clean/use_case/test_common.py
|
test_common.py
|
py
| 3,835 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72532680189
|
# pylint:disable=protected-access
# pylint:disable=redefined-outer-name
from collections.abc import Awaitable, Callable
from pathlib import Path
from typing import AsyncContextManager
import pytest
from aiopg.sa.engine import Engine
from faker import Faker
from models_library.api_schemas_storage import FileUploadSchema
from models_library.basic_types import SHA256Str
from models_library.projects_nodes_io import SimcoreS3FileID
from models_library.users import UserID
from pydantic import ByteSize, parse_obj_as
from simcore_service_storage import db_file_meta_data
from simcore_service_storage.models import FileMetaData
from simcore_service_storage.s3 import get_s3_client
from simcore_service_storage.simcore_s3_dsm import SimcoreS3DataManager
pytest_simcore_core_services_selection = ["postgres"]
pytest_simcore_ops_services_selection = ["adminer"]
@pytest.fixture
def file_size() -> ByteSize:
return parse_obj_as(ByteSize, "1")
@pytest.fixture
def mock_copy_transfer_cb() -> Callable[[int], None]:
def copy_transfer_cb(copied_bytes: int) -> None:
...
return copy_transfer_cb
async def test__copy_path_s3_s3(
simcore_s3_dsm: SimcoreS3DataManager,
directory_with_files: Callable[..., AsyncContextManager[FileUploadSchema]],
upload_file: Callable[[ByteSize, str], Awaitable[tuple[Path, SimcoreS3FileID]]],
file_size: ByteSize,
user_id: UserID,
mock_copy_transfer_cb: Callable[[int], None],
aiopg_engine: Engine,
):
def _get_dest_file_id(src: SimcoreS3FileID) -> SimcoreS3FileID:
return parse_obj_as(SimcoreS3FileID, f"{Path(src).parent}/the-copy")
async def _copy_s3_path(s3_file_id_to_copy: SimcoreS3FileID) -> None:
async with aiopg_engine.acquire() as conn:
exiting_fmd = await db_file_meta_data.get(conn, s3_file_id_to_copy)
await simcore_s3_dsm._copy_path_s3_s3( # noqa: SLF001
user_id=user_id,
src_fmd=exiting_fmd,
dst_file_id=_get_dest_file_id(s3_file_id_to_copy),
bytes_transfered_cb=mock_copy_transfer_cb,
)
async def _count_files(s3_file_id: SimcoreS3FileID, expected_count: int) -> None:
files = await get_s3_client(simcore_s3_dsm.app).list_files(
simcore_s3_dsm.simcore_bucket_name, prefix=s3_file_id
)
assert len(files) == expected_count
# using directory
FILE_COUNT = 4
SUBDIR_COUNT = 5
async with directory_with_files(
dir_name="some-random",
file_size_in_dir=file_size,
subdir_count=SUBDIR_COUNT,
file_count=FILE_COUNT,
) as directory_file_upload:
assert len(directory_file_upload.urls) == 1
assert directory_file_upload.urls[0].path
s3_object = directory_file_upload.urls[0].path.lstrip("/")
s3_file_id_dir_src = parse_obj_as(SimcoreS3FileID, s3_object)
s3_file_id_dir_dst = _get_dest_file_id(s3_file_id_dir_src)
await _count_files(s3_file_id_dir_dst, expected_count=0)
await _copy_s3_path(s3_file_id_dir_src)
await _count_files(s3_file_id_dir_dst, expected_count=FILE_COUNT * SUBDIR_COUNT)
# using a single file
_, simcore_file_id = await upload_file(file_size, "a_file_name")
await _copy_s3_path(simcore_file_id)
async def test_upload_and_search(
simcore_s3_dsm: SimcoreS3DataManager,
upload_file: Callable[..., Awaitable[tuple[Path, SimcoreS3FileID]]],
file_size: ByteSize,
user_id: UserID,
faker: Faker,
):
checksum: SHA256Str = parse_obj_as(SHA256Str, faker.sha256())
_, _ = await upload_file(file_size, "file1", sha256_checksum=checksum)
_, _ = await upload_file(file_size, "file2", sha256_checksum=checksum)
files: list[FileMetaData] = await simcore_s3_dsm.search_owned_files(
user_id=user_id, file_id_prefix="", sha256_checksum=checksum
)
assert len(files) == 2
for file in files:
assert file.sha256_checksum == checksum
assert file.file_name in {"file1", "file2"}
|
ITISFoundation/osparc-simcore
|
services/storage/tests/unit/test_simcore_s3_dsm.py
|
test_simcore_s3_dsm.py
|
py
| 4,006 |
python
|
en
|
code
| 35 |
github-code
|
6
|
25867867346
|
from SpeechEmotionRecognizer import SpeechEmotionRecognizer
import pandas as pd
import numpy as np
import librosa
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import train_test_split
from keras.callbacks import ReduceLROnPlateau
from keras.models import Sequential
from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten, Dropout
class SER_CNN(SpeechEmotionRecognizer):
def __init__(self):
super().__init__()
def dataProcess(self, features):
# extracting features
result = []
count = 0
for audioData in self.audios:
extractedFeatures = np.array([])
for feature in features:
extractedFeatures = np.hstack((extractedFeatures, self.extractFeatures(feature, audioData)))
result.append(extractedFeatures)
print('audios feature extracted: {}/{}'.format(count, len(self.audios)), end="\r")
count+=1
print('\n')
print('features extracted correctly!'.format(feature))
self.X = np.array(result)
# one hot encoding labels
encoder = OneHotEncoder()
self.Y = encoder.fit_transform(np.array(self.labels).reshape(-1,1)).toarray()
# normalize data
scaler = StandardScaler()
self.X = scaler.fit_transform(self.X)
self.X = np.expand_dims(self.X, axis=2)
print(self.X.shape)
def extractFeatures(self, feature, data):
# ZCR
if feature == 'zfr':
result = np.mean(librosa.feature.zero_crossing_rate(y=data).T, axis=0)
# Chroma_stft
elif feature == 'Chroma_stft':
stft = np.abs(librosa.stft(data))
result = np.mean(librosa.feature.chroma_stft(S=stft, sr=self.sampleRate).T, axis=0)
# MFCC
elif feature == 'mfcc':
result = np.mean(librosa.feature.mfcc(y=data, sr=self.sampleRate).T, axis=0)
# Root Mean Square Value
elif feature == 'rms':
result = np.mean(librosa.feature.rms(y=data).T, axis=0)
# MelSpectogram
elif feature == 'mel':
result = np.mean(librosa.feature.melspectrogram(y=data, sr=self.sampleRate).T, axis=0)
return result
def createModel(self):
self.model=Sequential()
self.model.add(Conv1D(256, kernel_size=5, strides=1, padding='same', activation='relu', input_shape=(self.X.shape[1], 1)))
self.model.add(MaxPooling1D(pool_size=5, strides = 2, padding = 'same'))
self.model.add(Conv1D(256, kernel_size=5, strides=1, padding='same', activation='relu'))
self.model.add(MaxPooling1D(pool_size=5, strides = 2, padding = 'same'))
self.model.add(Conv1D(128, kernel_size=5, strides=1, padding='same', activation='relu'))
self.model.add(MaxPooling1D(pool_size=5, strides = 2, padding = 'same'))
self.model.add(Dropout(0.2))
self.model.add(Conv1D(64, kernel_size=5, strides=1, padding='same', activation='relu'))
self.model.add(MaxPooling1D(pool_size=5, strides = 2, padding = 'same'))
self.model.add(Flatten())
self.model.add(Dense(units=32, activation='relu'))
self.model.add(Dropout(0.3))
self.model.add(Dense(units=8, activation='softmax'))
self.model.compile(optimizer = 'adam' , loss = 'categorical_crossentropy' , metrics = ['accuracy'])
def train(self):
# spliting data
x_train, x_test, y_train, y_test = train_test_split(self.X, self.Y, random_state=0, shuffle=True, test_size=self.TrainValidationSplit)
rlrp = ReduceLROnPlateau(monitor='loss', factor=0.4, verbose=0, patience=2, min_lr=0.0000001)
self.history=self.model.fit(x_train, y_train, batch_size=64, epochs=50, validation_data=(x_test, y_test), callbacks=[rlrp])
def test(self):
pass
def predict(self):
pass
recognizer = SER_CNN()
dataset = pd.read_csv('C:\\Users\\jsali\\OneDrive - UNIVERSIDAD DE SEVILLA\\Universidad\\MIERA\\TFM_SER\\dataset.csv')
recognizer.loadData(dataset.path, dataset.emotion)
recognizer.dataProcess(['mfcc', 'mel'])
recognizer.createModel()
recognizer.train()
|
jsalinas98/SpeechEmotionRecognition
|
SpeechEmotionRecognizer/SER_CNN.py
|
SER_CNN.py
|
py
| 4,216 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20342943646
|
import numpy as np
import matplotlib.pyplot as mplt
M = 10000
N = 50
s = np.zeros(M)
number_of_cols = 0
for i in range(M):
S_min = 0
S_plus = 0
for j in range(N):
chooser_of_state = np.random.randint(2)
if chooser_of_state == 1:
S_min += 1
else:
S_plus += 1
s_value = (S_plus - S_min)/2.
if s_value not in s:
number_of_cols += 1
s[i] = s_value
energy = -2*s #times mu and B too, but i assume them to be equal to 1
mplt.hist(energy, number_of_cols+1)
mplt.xlabel('value of s')
mplt.ylabel('probability of s')
mplt.show()
|
tellefs/FYS2160
|
Oblig1/oppgm.py
|
oppgm.py
|
py
| 545 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1210364326
|
with open('input', 'r') as input:
claims = input.read().splitlines() # claim = anspruch
matrix_size = 1000
square = [['.' for x in range(matrix_size)] for y in range(matrix_size)]
def split_values(claim):
claim = claim.replace(' ', '')
cid, coords = claim.split('@')
xy, size = coords.split(':')
return xy.split(',') + size.split('x') + [cid]
def count_overlaps(claim, matrix, overlaps):
x, y, w, h, cid = split_values(claim)
for wpy in range(0, int(h)):
for wpx in range(0, int(w)):
y_real = wpy + int(y)
x_real = wpx + int(x)
px = matrix[y_real][x_real]
if px == '.':
matrix[y_real][x_real] = cid
elif px == 'X':
continue
else:
matrix[y_real][x_real] = 'X'
overlaps += 1
return overlaps
counter = 0
for item in claims:
counter = count_overlaps(item, square, counter)
print('Part 1:', counter)
for item in claims:
x, y, w, h, cid = split_values(item)
skip = False
for wpy in range(0, int(h)):
for wpx in range(0, int(w)):
px = square[wpy + int(y)][wpx + int(x)]
if px == 'X':
skip = True
break
if skip:
break
if not skip:
print('Part 2: ', cid[1:])
|
slo-ge/Advent-of-Code-2018.py
|
day3/start.py
|
start.py
|
py
| 1,503 |
python
|
en
|
code
| 1 |
github-code
|
6
|
38038218212
|
"""
References
Machine Learning to Predict Stock Prices:
https://towardsdatascience.com/predicting-stock-prices-using-a-keras-lstm-model-4225457f0233
Twitter Sentiment Analysis using Python
https://www.geeksforgeeks.org/twitter-sentiment-analysis-using-python/
Streamlit 101: An in-depth introduction:
https://towardsdatascience.com/streamlit-101-an-in-depth-introduction-fc8aad9492f2
"""
#Import packages and libraries
#Basic libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from datetime import date
import math
import os.path
from PIL import Image
#Finance
import yfinance as yf
#Modelling
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, LSTM
#Twitter and NLP
import tweepy #need to pip install first
import preprocessor as preprocess #need to pip install first
import re
from textblob import TextBlob #need to pip install first
import nltk
nltk.download('punkt')
#Web
import streamlit as st
from plotly import graph_objs as go
# Ignore Warnings
import warnings
warnings.filterwarnings("ignore")
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#Twitter API Keys
consumer_key= 'r4G4jn1kjUiMCSzr7rpmyz1Yv'
consumer_secret= 'i4sAmLzvethIHISYWUu8gricaQ7F2uyw7LitKOihFo1KTidFt5'
access_token='1505192442605314057-Ehu1ltCoGVlpRQhnmktFV6IGvKP6Ti'
access_token_secret='5FCsWKq2WZ2ZMQLt9MOF1OXYqvchdwqYb67DmgGFGDbRP'
#Data fetch function
def get_quote(ticker):
"""
Function to check if our ticker CSV exists. If not, it will get our stock ticker data via Yahoo Finance API
It will filter into a panda.Dataframe with the relevant informations and store into a CSV file.
It will then return the CSV file path and the ticker's company name
"""
info_filename = info_filename = 'tickerinfo/'+ ticker + str(date.today()) +'.csv'
ticker_name = yf.Ticker(ticker).info['shortName']
#Detect if a model file is present
if (os.path.exists(info_filename) == False):
end = date.today()
start = end - datetime.timedelta(days=2 * 365)
data = yf.download(ticker, start=start, end=end)
df = pd.DataFrame(data = data)
df.to_csv(info_filename)
return info_filename, ticker_name
#Price prediction algorithm function
def predict_price(df, ticker):
"""
Function which will analyze the chosen ticker and its DataFrame as inputs.
It will return the next day's predicted price and the RMSE error between
the real and predicted values by the model as the file path for
image file of the real vs predicted price plot
"""
#Split data into training set and test dataset
train_ds = df.iloc[0:int(0.8*len(df)),:]
test_ds = df.iloc[int(0.8*len(df)):,:]
prediction_days = 7
training_set=df.iloc[:,4:5].values
#Scaling
scaler = MinMaxScaler(feature_range=(0,1))
training_set_scaled = scaler.fit_transform(training_set)
x_train=[]
y_train=[]
for i in range(prediction_days,len(training_set_scaled)):
x_train.append(training_set_scaled[i-prediction_days:i,0])
y_train.append(training_set_scaled[i,0])
#Convert to numpy arrays
x_train = np.array(x_train)
y_train = np.array(y_train)
X_forecast = np.array(x_train[-1,1:])
X_forecast = np.append(X_forecast,y_train[-1])
#Reshaping: Adding 3rd dimension
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))#.shape 0=row,1=col
X_forecast = np.reshape(X_forecast, (1,X_forecast.shape[0],1))
filename = 'modelh5/' + str(ticker)+'_model.h5'
#Detect if a model file is present
if (os.path.exists(filename)):
model = load_model(filename)
else:
#Initialise RNN
model = Sequential()
#Add first LSTM layer
model.add(LSTM(units = 50,return_sequences=True,input_shape=(x_train.shape[1],1)))
model.add(Dropout(0.3))
model.add(LSTM(units = 75,return_sequences=True))
model.add(Dropout(0.4))
model.add(LSTM(units = 100,return_sequences=True))
model.add(Dropout(0.5))
model.add(LSTM(units = 125))
model.add(Dropout(0.6))
model.add(Dense(units = 1))
#Compile
model.compile(optimizer='adam',loss='mean_squared_error')
#Training
model.fit(x_train, y_train, epochs = 50, batch_size = 32 )
#Saving model for this specific ticker
model.save(filename)
#Testing
y = test_ds.iloc[:,4:5].values
#Combining training and testing set and using the number of prediction days before the test set
total_ds = pd.concat((train_ds['Close'],test_ds['Close']),axis=0)
testing_set = total_ds[ len(total_ds) -len(test_ds) - prediction_days: ].values
testing_set = testing_set.reshape(-1,1)
#Scaling
testing_set = scaler.transform(testing_set)
#Create testing data structure
x_test=[]
for i in range(prediction_days,len(testing_set)):
x_test.append(testing_set[i-prediction_days:i,0])
#Convert to numpy arrays
x_test=np.array(x_test)
#Reshaping: Adding 3rd dimension
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
#Testing Prediction
y_test = model.predict(x_test)
#Getting original prices back from scaled values
y_test = scaler.inverse_transform(y_test)
fig = plt.figure(figsize=(7.2,4.8),dpi=65)
plt.plot(y,label='Actual Price')
plt.plot(y_test,label='Predicted Price')
plt.legend(loc=4)
RNN_filename = ('RNNplots/' + str(ticker) + ' ' + str(date.today()) +' RNN model.png')
plt.savefig(RNN_filename)
plt.close(fig)
rmse = math.sqrt(mean_squared_error(y, y_test))
#Forecasting Prediction
y_pred = model.predict(X_forecast)
#Getting original prices back from scaled values
y_pred = scaler.inverse_transform(y_pred)
nextday_price = y_pred[0,0]
print("Tomorrow's ",ticker," Closing Price Prediction by LSTM: ", nextday_price)
print("LSTM RMSE:", rmse)
return nextday_price, rmse, RNN_filename
#Twitter sentiment analysis
def analyze_tweet_sentiment(ticker):
"""
Function which will search through twitter for the requested ticker and
analyze the overall sentiment if positive or negative.
It will return the overall sentiment score, the overall verdict, number of positive tweets,
number of negative tweets and number of neutral tweets, a list of tweets and its polarities,
the file path for the sentiment analysis pie chart image
"""
#Find the company name associated to the ticker via yfinance
name = yf.Ticker(ticker).info['shortName']
#Accessing and authenticating Twitter
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
user = tweepy.API(auth, wait_on_rate_limit = True)
#Number of tweets to analyze
n_tweets = 300
#Search twitter
tweets = tweepy.Cursor(user.search_tweets, q=name,
tweet_mode='extended', lang='en').items(n_tweets)
tweet_list = [] #List of tweets
polarity_list =[] #List of polarities of the tweets
overall_polarity = 0
#Count positive and negative tweets
positive_tweets = 0
negative_tweets = 0
for tw in tweets:
#Convert to Textblob format for assigning polarity
tweet = tw.full_text
#Clean
tweet = preprocess.clean(tweet)
tweet = re.sub('&','&',tweet) #replace & by '&'
tweet = re.sub(':','',tweet)#Remove :
tweet = tweet.encode('ascii', 'ignore').decode('ascii') #Remove nonascii characters
tweet_list.append(tweet)
blob = TextBlob(tweet)
tweet_polarity = 0 #Polarity for each tweet
#Analyze each sentence in the tweet
for sentence in blob.sentences:
tweet_polarity += sentence.sentiment.polarity
#Increment the count whether it is positive or negative
if tweet_polarity > 0:
positive_tweets += 1
if tweet_polarity < 0:
negative_tweets += 1
overall_polarity += sentence.sentiment.polarity
polarity_list.append(tweet_polarity)
if len(tweet_list) != 0:
overall_polarity = overall_polarity / len(tweet_list)
else:
overall_polarity = overall_polarity
neutral_tweets = n_tweets - (positive_tweets + negative_tweets)
if neutral_tweets < 0:
negative_tweets = negative_tweets + neutral_tweets
print("Positive Tweets :", positive_tweets, "Negative Tweets :", negative_tweets,
"Neutral Tweets :", neutral_tweets)
labels=['Positive','Negative','Neutral']
colors = ['tab:green', 'tab:red' , 'tab:orange']
sizes = [positive_tweets, negative_tweets, neutral_tweets]
explode = (0, 0, 0)
fig = plt.figure(figsize=(7.2,4.8),dpi=65)
fig1, ax1 = plt.subplots(figsize=(7.2,4.8),dpi=65)
ax1.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%', startangle=90)
# Equal aspect ratio ensures that pie is drawn as a circle
ax1.axis('equal')
plt.tight_layout()
SA_filename = 'SApiecharts/'+ str(ticker) +' '+ str(date.today()) +' Twitter Sentiment Analysis.png'
plt.savefig(SA_filename)
plt.close(fig)
#plt.show()
if overall_polarity > 0:
polarity_verdict = 'Overall Positive'
else:
polarity_verdict = 'Overall Negative'
return overall_polarity, polarity_verdict, positive_tweets, negative_tweets, neutral_tweets, tweet_list, polarity_list ,SA_filename
def recommend_action(polarity, info_ticker, price_nextday):
if info_ticker.iloc[-1]['Close'] < price_nextday:
if polarity > 0:
decision = 'Good sentiment and rising. Seems like a good idea to buy.'
elif polarity <= 0:
decision = "Bad sentiment and rising. Might wait before buying or sell some existing stock."
elif info_ticker.iloc[-1]['Close'] > price_nextday:
if polarity > 0:
decision= 'Good sentiment and falling. Might wait before buying.'
elif polarity <= 0:
decision= 'Bad sentiment and falling. Seems like a good idea to sell.'
return decision
#Main execution
#Title
st.title("Stock Prediction with Neural Network and Twitter NLP sentiment analysis")
#Search ticker
ticker = st.text_input('Type in the selected ticker ', '')
search_button = st.button('Search')
if search_button:
ticker = ticker.upper()
#Fetching and saving the ticker info into CSV
data_load_state = st.text("Loading data...")
csv_path, ticker_name = get_quote(ticker)
df = pd.read_csv(csv_path)
data_load_state.text("Loading data...Done!")
#Read and diplay the data
st.subheader("Today's " + ticker_name +' ('+ ticker + ") information for " + str(date.today()))
st.table(df.tail(1))
df = df.dropna()
#Plot and display the ticker
def plot_ticker_data():
fig = go.Figure()
fig.add_trace(go.Scatter(x=df['Date'], y=df['Close'], name = 'Close Price'))
fig.layout.update(title_text=ticker + " Time Series", xaxis_rangeslider_visible=True)
st.plotly_chart(fig)
plot_ticker_data()
#Predicting the stock price
st.subheader(ticker + " Model Price Prediction")
predict_state = st.text("Predicting...")
price_nextday, rmse, RNN_filename = predict_price(df, ticker)
predict_state.text("Predicting...Done!")
image_RNN = Image.open(RNN_filename)
st.image(image_RNN, caption = ticker + ' Past 100 days Real vs Predicted Price') #Display Real vs Predicted plot
st.write("Predicted price at the closing of the next stock day: " + str(price_nextday))
st.write("The model RMSE is at: " + str(rmse))
#Twitter Sentiment Analysis
st.subheader(ticker_name + " Twitter Sentiment Analysis")
twitter_search_state = st.text("Searching through Twitter...")
polarity, polarity_verdict, positive, negative, neutral, tweet_list, polarity_list, SA_filename = analyze_tweet_sentiment(ticker)
twitter_search_state.text("Searching through Twitter...Done!")
image_SA = Image.open(SA_filename)
st.image(image_SA, caption = 'Twitter Sentiment Pie Chart for ' + ticker_name) #Display Sentiment Analysis Pie Chart
total = positive + negative + neutral
st.write("Number of positive tweets: " + str(positive) + ' ( '+ str(round((positive/total)*100,2)) +'% )')
st.write("Number of neutral tweets: " + str(neutral) + ' ( '+ str(round((neutral/total)*100,2)) +'% )')
st.write("Number of negative tweets: " + str(negative) + ' ( '+ str(round((negative/total)*100,2)) +'% )')
st.write("A few examples of tweets:")
tweet_df = pd.DataFrame(list(zip(tweet_list, polarity_list)), columns = ['Tweet', 'Polarity'])
st.write(tweet_df.head(10))
st.write(ticker + ' Overall Polarity: ' + str(polarity) + " = " + polarity_verdict)
st.subheader("Reommendation for " + ticker)
recommend = recommend_action(polarity, df, price_nextday)
st.write(recommend)
|
qvinh-du/finalproject
|
finalproject.py
|
finalproject.py
|
py
| 13,804 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32833821340
|
from appium import webdriver
import time
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import ElementNotVisibleException, ElementNotSelectableException, NoSuchElementException
from selenium.webdriver.support.wait import WebDriverWait
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '9'
desired_caps['automationName'] = 'UiAutomator2'
desired_caps['deviceName'] = 'moto x4'
desired_caps['app'] = ('/home/candi/Downloads/PgConnect_release_1.7.0_270820_1004.apk')
desired_caps['appPackage'] = 'de.proglove.connect'
desired_caps['appActivity'] = 'de.proglove.connect.app.main.MainActivity'
driver = webdriver.Remote("http://localhost:4723/wd/hub", desired_caps)
print("Device Width and Height: ", driver.get_window_size())
#Device Width and Height: {'width': 1080, 'height': 1776}
deviceSize = driver.get_window_size()
screenWidth = deviceSize['width']
screenHeight = deviceSize['height']
#Swipe from Buttom to Top
startx = screenWidth/2
endsx = screenWidth/2
starty = screenHeight*8/9
endsy = screenHeight/9
#Swipe from Top to Buttom
startx2 = screenWidth/2
endsx2 = screenWidth/2
starty2 = screenHeight*2/9
endsy2 = screenHeight*8/9
actions = TouchAction(driver)
actions.long_press(None, startx, starty).move_to(None, endsx, endsy).release().perform()
time.sleep(3)
actions.long_press(None, startx2, starty2).move_to(None, endsx2, endsy2).release().perform()
|
candi-project/Automation_framework_Android
|
Appiumpython/Gestures/SwipeGesture2.py
|
SwipeGesture2.py
|
py
| 1,460 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73944977468
|
from difflib import SequenceMatcher
from elasticsearch import Elasticsearch
import string
INDEX = 'video-search'
DOC_TYPE = 'video'
es = Elasticsearch(['elasticsearch:9200'])
def index_video(body):
es.index(index=INDEX, doc_type=DOC_TYPE, body=body)
es.indices.refresh(index=INDEX)
def delete_index():
es.indices.delete(index=INDEX, ignore=[400, 404])
def search_videos(query):
es_query = {
'query': {
'multi_match': {
'query': query,
'fields': ['transcript']
},
},
'highlight': {
'fields': {
'text': {'type': 'plain',
'number_of_fragments': 3,
'fragment_size': 30
}
}
}
}
search_res = es.search(index=INDEX, body=es_query)
return search_res['hits']['hits']
def find_matches_in_string(haystack, needle):
needle = needle.lower()
haystack = haystack.lower()
from spacy.matcher import PhraseMatcher
from spacy.lang.en import English
nlp = English()
matcher = PhraseMatcher(nlp.vocab)
matcher.add('query', None, nlp(needle))
doc = nlp(haystack)
matches = matcher(doc)
return matches
|
colanconnon/cs410project
|
cs410videosearchengine/videosearchengine/search.py
|
search.py
|
py
| 1,263 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7138653574
|
import sys
sys.path.append(".")
from argparse import ArgumentParser
import json
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DistributedSampler, DataLoader, SequentialSampler, RandomSampler
from torch.optim import AdamW
from callback.lr_scheduler import get_linear_schedule_with_warmup
from callback.progressbar import ProgressBar
from model.configuration_bert import BertConfig
from model.modeling_poor import BertForMultipleChoice, BertForTokenClassification, BertForQuestionAnswering, BertForSequenceClassification
from model.tokenization_shang import ShangTokenizer
# from model.modeling_poor import BertForSequenceClassification, BertForTokenClassification, BertForQuestionAnswering, BertForMultipleChoice
# from model.tokenization_shang import ShangTokenizer, Sentence
from tasks.utils import truncate_pair, TaskConfig, find_span, cal_acc
from tools.common import logger, init_logger
# logger = logging.getLogger(__name__)
# # FORMAT = '%(pathname)s %(filename)s %(funcName)s %(lineno)d %(asctime)-15s %(message)s'
# FORMAT = ' %(filename)s %(lineno)d %(funcName)s %(asctime)-15s %(message)s'
# logging.basicConfig(filename="tasks.log",filemode='a',format=FORMAT,level=logging.INFO)
class TaskPoor:
def __init__(self,config):
# super(Task, self).__init__(config)
self.config=TaskConfig(config)
init_logger(log_file=f"{self.config.output_dir}/train.log")
self.task_name=self.config.task_name
self.dataset=self.config.TaskDataset
self.labels=self.config.labels
parser = ArgumentParser()
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
args = parser.parse_args()
self.config.local_rank= args.local_rank
if self.config.local_rank == -1 or self.config.no_cuda:
self.config.device = torch.device("cuda" if torch.cuda.is_available() and not self.config.no_cuda else "cpu")
self.config.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(self.local_rank)
self.config.device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
self.config.n_gpu = 1
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
self.tokenizer = ShangTokenizer(vocab_path=self.config.vocab_file, bujian_path=self.config.bujian_file,use_bujian=self.config.use_bujian)
# self.valid_dataset=self.load_valid()
self.acc=0
self.model = self.load_model(self.config.model_name_or_path)
self.valid_dataset = None
self.test_dataset=None
def load_model(self, model_path ):
bert_config = BertConfig.from_pretrained(model_path, num_labels=self.config.num_labels, finetuning_task=self.task_name, use_stair=False)
logger.info(f" loadding {model_path} ")
if self.config.task_name in ["c3", "chid"]:
model = BertForMultipleChoice.from_pretrained(model_path, from_tf=bool('.ckpt' in model_path), config=bert_config)
elif self.config.output_mode == "span":
model = BertForTokenClassification.from_pretrained(model_path, from_tf=bool('.ckpt' in model_path), config=bert_config)
elif self.config.output_mode == "qa":
model = BertForQuestionAnswering.from_pretrained(model_path, from_tf=bool('.ckpt' in model_path), config=bert_config)
elif self.config.output_mode == "classification":
model = BertForSequenceClassification.from_pretrained(model_path, from_tf=bool('.ckpt' in model_path), config=bert_config)
if self.config.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(self.config.device)
return model
def train(self):
input_file=os.path.join(self.config.data_dir,self.config.valid_file)
self.valid_dataset = self.dataset(input_file=input_file, tokenizer=self.tokenizer,labels=self.labels, max_tokens=self.config.max_len,config=self.config)
self.config.save_steps=max(self.config.save_steps,len(self.valid_dataset)//self.config.batch_size)
self.config.logging_steps=max(self.config.logging_steps,len(self.valid_dataset)//self.config.batch_size)
args=self.config
model=self.model
input_file=os.path.join(args.data_dir,self.config.train_file)
dataset = self.dataset(input_file=input_file, tokenizer=self.tokenizer, labels=self.labels, max_tokens=self.config.max_len,config=self.config)
num_training_steps=self.config.n_epochs*len(dataset)
warmup_steps = int(num_training_steps * args.warmup_proportion)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
# optimizer_grouped_parameters = [
# {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in ["bert"])],'lr': self.config.learning_rate},
# {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in ["bert"])], 'lr': self.config.learning_rate/5}
# ]
# # optimizer = Lamb(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
optimizer = AdamW(params=optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_training_steps)
if self.config.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=self.config.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if self.config.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.config.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
self.model=model
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
self.global_step = 0
tr_loss, logging_loss = 0.0, 0.0
for epoch in range(self.config.n_epochs):
dataset = self.dataset(input_file=input_file, tokenizer=self.tokenizer, labels=self.labels, max_tokens=self.config.max_len,config=self.config)
sampler = RandomSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=self.config.batch_size, collate_fn=self.config.collate_fn,pin_memory=self.config.pin_memory, num_workers=self.config.num_workers)
pbar = ProgressBar(n_total=len(dataloader), desc=f"{input_file[-15:]}")
for step, batch in enumerate(dataloader):
loss=self.train_batch(batch,args,optimizer,scheduler,step)
msg={ "epoch":epoch, "global_step":self.global_step,"loss": loss ,"lr": scheduler.get_lr(),"seq_len":batch[0].shape[-1] }
pbar(step, msg)
tr_loss += loss
if args.local_rank in [-1, 0] and args.logging_steps > 0 and (self.global_step % args.logging_steps == 0 or step+1==len(dataloader) ):
# Log metrics
if args.local_rank == -1: # Only evaluate when single GPU otherwise metrics may not average well
acc=self.evaluate(epoch)
if args.local_rank in [-1, 0] and args.save_steps > 0 and (self.global_step % args.save_steps == 0 or step+1==len(dataloader))and acc>=self.acc:
logger.info(f"Saving best model acc:{self.acc} -->{acc}")
self.acc=acc
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
# break
print("\n ")
if 'cuda' in str(args.device):
torch.cuda.empty_cache()
msg = {"epoch": (epoch), "global_step": (self.global_step), "loss": loss, "average loss":tr_loss, "lr": (scheduler.get_lr())}
logger.info( f" {msg}")
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
def train_batch(self, batch,args,optimizer,scheduler,step):
model=self.model
model.train()
batch = tuple(t.to(self.config.device) for t in batch)
if self.config.output_mode == "qa":
input_ids, attention_mask, token_type_ids, start_positions, end_positions = batch
inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, 'start_positions': start_positions, "end_positions": end_positions}
else:
input_ids, attention_mask, token_type_ids, label_ids = batch
inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, 'labels': label_ids}
outputs = self.model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
self.global_step += 1
return loss.item()
def evaluate(self,epoch):
args=self.config
model=self.model
model.eval()
dataset=self.valid_dataset
sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=self.config.batch_size, collate_fn=self.config.collate_fn, pin_memory=self.config.pin_memory, num_workers=self.config.num_workers)
print(' ')
nb_eval_steps = 0
scores=[]
pbar = ProgressBar(n_total=len(dataloader), desc="Evaluating")
for step, batch in enumerate(dataloader):
with torch.no_grad():
batch = tuple(t.to(args.device) for t in batch)
if self.config.output_mode=="qa":
input_ids, attention_mask, token_type_ids, start_positions, end_positions = batch
inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids}
else:
input_ids, attention_mask,token_type_ids, label_ids = batch
inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids':token_type_ids,'labels': label_ids}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if self.config.output_mode == "qa":
start_logits, end_logits=tmp_eval_loss, logits
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
score1 = cal_acc(start_logits, start_positions)
score2 = cal_acc(end_logits, end_positions)
scores.append((score1+ score2)/2)
elif self.config.output_mode == "span" :
for i in range(len(logits)):
score = cal_acc(logits[i], label_ids[i])
scores.append((score))
elif self.config.output_mode == "classification":
score = cal_acc(logits, label_ids)
scores.append(score)
nb_eval_steps += 1
pbar(step)
# break
print(' ')
if 'cuda' in str(args.device):
torch.cuda.empty_cache()
acc = np.array(scores).mean()
result={"acc": acc,"epoch":epoch,"step":self.global_step}
output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt")
line=json.dumps(result,ensure_ascii=False)
with open(output_eval_file, "a") as writer:
writer.write(line+"\n")
logger.info(f"\n valid : {line} ")
model.train()
return acc
def infer(self):
args=self.config
logger.info(f"selected best model acc:{self.acc}")
model= self.load_model(self.config.output_dir)
# model=self.model
model.eval()
# dataset=self.valid_dataset
input_file=os.path.join(self.config.data_dir,self.config.test_file)
dataset = self.dataset(input_file=input_file, tokenizer=self.tokenizer,labels=self.labels, max_tokens=self.config.max_len,config=self.config)
self.test_dataset=dataset
sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=self.config.batch_size, collate_fn=self.config.collate_fn, pin_memory=self.config.pin_memory, num_workers=self.config.num_workers)
nb_eval_steps = 0
preds = []
pbar = ProgressBar(n_total=len(dataloader), desc="Testing")
for step, batch in enumerate(dataloader):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
if self.config.output_mode == "qa":
input_ids, attention_mask, token_type_ids, start_positions, end_positions = batch
inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids}
else:
input_ids, attention_mask, token_type_ids, label_ids = batch
inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, 'labels': label_ids}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if self.config.output_mode == "qa":
start_logits, end_logits=tmp_eval_loss, logits
start = torch.argmax(start_logits, 1).tolist()
end = torch.argmax(end_logits, 1).tolist()
preds+=zip(start,end)
elif args.output_mode=="span":
prob = logits.detach().cpu().numpy()
preds+=[x for x in prob]
elif args.output_mode == "classification":
preds+=torch.argmax(logits, 1).tolist()
nb_eval_steps += 1
pbar(step)
# break
print(' ')
if 'cuda' in str(args.device):
torch.cuda.empty_cache()
logger.info(f"infered {len(preds)}")
return preds
|
laohur/PoorBERT
|
v1/tasks/task.py
|
task.py
|
py
| 17,353 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28656442402
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
from sklearn.metrics import mean_squared_error
import models
import helper_functions
import pandas as pd
import os
import sys
from scipy.stats import geom
import torchvision
import time
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from PIL import Image
import itertools
import pickle
from numpy import dot
from numpy.linalg import norm
from sklearn.utils import shuffle
def getMeanNet(start_idx, end_idx):
num_models = end_idx - start_idx
nets = [models.Net() for i in range(num_models)]
net1 = models.Net()
net2 = models.Net()
net3 = models.Net()
net4 = models.Net()
net5 = models.Net()
net6 = models.Net()
net7 = models.Net()
net8 = models.Net()
net9 = models.Net()
net10 = models.Net()
for idx,net in enumerate(nets):
net_model = torch.load("task_net_models/mnist_digit_solver_"+str(idx+start_idx)+".pt")
net.load_state_dict(net_model)
net1_model = torch.load("task_net_models/mnist_digit_solver_0.pt")
net1.load_state_dict(net1_model)
net2_model = torch.load("task_net_models/mnist_digit_solver_1.pt")
net2.load_state_dict(net2_model)
net3_model = torch.load("task_net_models/mnist_digit_solver_2.pt")
net3.load_state_dict(net3_model)
net4_model = torch.load("task_net_models/mnist_digit_solver_3.pt")
net4.load_state_dict(net4_model)
net5_model = torch.load("task_net_models/mnist_digit_solver_4.pt")
net5.load_state_dict(net5_model)
net6_model = torch.load("task_net_models/mnist_digit_solver_5.pt")
net6.load_state_dict(net6_model)
net7_model = torch.load("task_net_models/mnist_digit_solver_6.pt")
net7.load_state_dict(net7_model)
net8_model = torch.load("task_net_models/mnist_digit_solver_7.pt")
net8.load_state_dict(net8_model)
net9_model = torch.load("task_net_models/mnist_digit_solver_8.pt")
net9.load_state_dict(net9_model)
net10_model = torch.load("task_net_models/mnist_digit_solver_9.pt")
net10.load_state_dict(net10_model)
flatNets = [[] for i in range(num_models)]
net_shapes = []
for idx,net in enumerate(nets):
flatNets[idx], net_shapes = helper_functions.flattenNetwork(net)
flat1, net_shapes=helper_functions.flattenNetwork(net1)
flat2, net_shapes=helper_functions.flattenNetwork(net2)
flat3, net_shapes=helper_functions.flattenNetwork(net3)
flat4, net_shapes=helper_functions.flattenNetwork(net4)
flat5, net_shapes=helper_functions.flattenNetwork(net5)
flat6, net_shapes=helper_functions.flattenNetwork(net6)
flat7, net_shapes=helper_functions.flattenNetwork(net7)
flat8, net_shapes=helper_functions.flattenNetwork(net8)
flat9, net_shapes=helper_functions.flattenNetwork(net9)
flat10, net_shapes=helper_functions.flattenNetwork(net10)
all = torch.Tensor()
for idx, flatNet in enumerate(flatNets):
all = torch.cat((all, torch.Tensor(flatNet).view(-1,len(flatNet))), dim=0)
all = torch.cat((torch.Tensor([flat1]), torch.Tensor([flat2])), dim=0)
all = torch.cat((all, torch.Tensor([flat3])), dim=0)
all = torch.cat((all, torch.Tensor([flat4])), dim=0)
all = torch.cat((all, torch.Tensor([flat5])), dim=0)
all = torch.cat((all, torch.Tensor([flat6])), dim=0)
all = torch.cat((all, torch.Tensor([flat7])), dim=0)
all = torch.cat((all, torch.Tensor([flat8])), dim=0)
all = torch.cat((all, torch.Tensor([flat9])), dim=0)
all = torch.cat((all, torch.Tensor([flat10])), dim=0)
# # print(all)
def loadWeights_mnsit(weights_to_load, net):
net.conv1.weight.data = torch.from_numpy(weights_to_load[0]).cuda()
net.conv1.bias.data = torch.from_numpy(weights_to_load[1]).cuda()
net.conv2.weight.data = torch.from_numpy(weights_to_load[2]).cuda()
net.conv2.bias.data = torch.from_numpy(weights_to_load[3]).cuda()
net.fc1.weight.data = torch.from_numpy(weights_to_load[4]).cuda()
net.fc1.bias.data = torch.from_numpy(weights_to_load[5]).cuda()
net.fc2.weight.data = torch.from_numpy(weights_to_load[6]).cuda()
net.fc2.bias.data = torch.from_numpy(weights_to_load[7]).cuda()
return net
mean = torch.mean(all, dim=0)
meanNet = models.Net()
mean_weights=helper_functions.unFlattenNetwork(mean.data.numpy(), net_shapes)
meanNet=loadWeights_mnsit(mean_weights,meanNet)
torch.save(meanNet.state_dict(),'meanNet.pt')
|
jmandivarapu1/SelfNet-Lifelong-Learning-via-Continual-Self-Modeling
|
Split_MNIST_10x/getMeanNet.py
|
getMeanNet.py
|
py
| 4,781 |
python
|
en
|
code
| 4 |
github-code
|
6
|
926305752
|
from http import HTTPStatus
from unittest.mock import patch
import pytest
import requests
from rotkehlchen.constants.assets import A_JPY
from rotkehlchen.db.settings import DEFAULT_KRAKEN_ACCOUNT_TYPE, ROTKEHLCHEN_DB_VERSION, DBSettings
from rotkehlchen.exchanges.kraken import KrakenAccountType
from rotkehlchen.tests.utils.api import (
api_url_for,
assert_error_response,
assert_proper_response,
assert_simple_ok_response,
)
from rotkehlchen.tests.utils.mock import MockWeb3
def test_qerying_settings(rotkehlchen_api_server, username):
"""Make sure that querying settings works for logged in user"""
response = requests.get(api_url_for(rotkehlchen_api_server, "settingsresource"))
assert_proper_response(response)
json_data = response.json()
result = json_data['result']
assert json_data['message'] == ''
assert result['version'] == ROTKEHLCHEN_DB_VERSION
for setting in DBSettings._fields:
assert setting in result
# Logout of the active user
data = {'action': 'logout'}
response = requests.patch(
api_url_for(rotkehlchen_api_server, "usersbynameresource", name=username),
json=data,
)
assert_simple_ok_response(response)
# and now with no logged in user it should fail
response = requests.get(api_url_for(rotkehlchen_api_server, "settingsresource"))
assert_error_response(
response=response,
contained_in_msg='No user is currently logged in',
status_code=HTTPStatus.CONFLICT,
)
def test_set_settings(rotkehlchen_api_server):
"""Happy case settings modification test"""
# Get the starting settings
response = requests.get(api_url_for(rotkehlchen_api_server, "settingsresource"))
assert_proper_response(response)
json_data = response.json()
original_settings = json_data['result']
assert json_data['message'] == ''
# Create new settings which modify all of the original ones
new_settings = {}
unmodifiable_settings = (
'version',
'last_write_ts',
'last_data_upload_ts',
'last_balance_save',
'have_premium',
)
for setting, value in original_settings.items():
if setting in unmodifiable_settings:
continue
elif setting == 'historical_data_start':
value = '10/10/2016'
elif setting == 'date_display_format':
value = '%d/%m/%Y-%H:%M:%S'
elif setting == 'eth_rpc_endpoint':
value = 'http://working.nodes.com:8545'
elif setting == 'main_currency':
value = 'JPY'
elif type(value) == bool:
value = not value
elif type(value) == int:
value += 1
elif setting == 'kraken_account_type':
# Change the account type to anything other than default
assert value != str(KrakenAccountType.PRO)
value = str(KrakenAccountType.PRO)
elif setting == 'active_modules':
value = ['makerdao_vaults']
else:
raise AssertionError(f'Unexpected settting {setting} encountered')
new_settings[setting] = value
# modify the settings
block_query = patch(
'rotkehlchen.chain.ethereum.manager.EthereumManager.query_eth_highest_block',
return_value=0,
)
mock_web3 = patch('rotkehlchen.chain.ethereum.manager.Web3', MockWeb3)
with block_query, mock_web3:
response = requests.put(
api_url_for(rotkehlchen_api_server, "settingsresource"),
json={'settings': new_settings},
)
# Check that new settings are returned in the response
assert_proper_response(response)
json_data = response.json()
assert json_data['message'] == ''
result = json_data['result']
assert result['version'] == ROTKEHLCHEN_DB_VERSION
for setting, value in new_settings.items():
msg = f'Error for {setting} setting. Expected: {value}. Got: {result[setting]}'
assert result[setting] == value, msg
# now check that the same settings are returned in a settings query
response = requests.get(api_url_for(rotkehlchen_api_server, "settingsresource"))
assert_proper_response(response)
json_data = response.json()
result = json_data['result']
assert json_data['message'] == ''
for setting, value in new_settings.items():
assert result[setting] == value
def test_set_rpc_endpoint_fail_not_set_others(rotkehlchen_api_server):
"""Test that setting a non-existing eth rpc along with other settings does not modify them"""
eth_rpc_endpoint = 'http://working.nodes.com:8545'
main_currency = A_JPY
data = {'settings': {
'eth_rpc_endpoint': eth_rpc_endpoint,
'main_currency': main_currency.identifier,
}}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Failed to connect to ethereum node at endpoint',
status_code=HTTPStatus.CONFLICT,
)
# Get settings and make sure they have not been modified
response = requests.get(api_url_for(rotkehlchen_api_server, "settingsresource"))
assert_proper_response(response)
json_data = response.json()
result = json_data['result']
assert json_data['message'] == ''
assert result['main_currency'] != 'JPY'
assert result['eth_rpc_endpoint'] != 'http://working.nodes.com:8545'
def test_unset_rpc_endpoint(rotkehlchen_api_server):
"""Test the rpc endpoint can be unset"""
response = requests.get(api_url_for(rotkehlchen_api_server, "settingsresource"))
assert_proper_response(response)
json_data = response.json()
assert json_data['message'] == ''
result = json_data['result']
assert result['eth_rpc_endpoint'] != ''
data = {
'settings': {'eth_rpc_endpoint': ''},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_proper_response(response)
json_data = response.json()
result = json_data['result']
assert json_data['message'] == ''
assert result['eth_rpc_endpoint'] == ''
@pytest.mark.parametrize('added_exchanges', [('kraken',)])
def test_set_kraken_account_type(rotkehlchen_api_server_with_exchanges):
server = rotkehlchen_api_server_with_exchanges
rotki = rotkehlchen_api_server_with_exchanges.rest_api.rotkehlchen
kraken = rotki.exchange_manager.get('kraken')
assert kraken.account_type == DEFAULT_KRAKEN_ACCOUNT_TYPE
assert kraken.call_limit == 15
assert kraken.reduction_every_secs == 3
data = {'settings': {'kraken_account_type': 'intermediate'}}
response = requests.put(api_url_for(server, "settingsresource"), json=data)
assert_proper_response(response)
json_data = response.json()
result = json_data['result']
assert json_data['message'] == ''
assert result['kraken_account_type'] == 'intermediate'
assert kraken.account_type == KrakenAccountType.INTERMEDIATE
assert kraken.call_limit == 20
assert kraken.reduction_every_secs == 2
def test_disable_taxfree_after_period(rotkehlchen_api_server):
"""Test that providing -1 for the taxfree_after_period setting disables it """
data = {
'settings': {'taxfree_after_period': -1},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_proper_response(response)
json_data = response.json()
assert json_data['result']['taxfree_after_period'] is None
# Test that any other negative value is refused
data = {
'settings': {'taxfree_after_period': -5},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='The taxfree_after_period value can not be negative',
status_code=HTTPStatus.BAD_REQUEST,
)
# Test that zero value is refused
data = {
'settings': {'taxfree_after_period': 0},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='The taxfree_after_period value can not be set to zero',
status_code=HTTPStatus.BAD_REQUEST,
)
def test_set_unknown_settings(rotkehlchen_api_server):
"""Test that setting an unknown setting results in an error
This is the only test for unknown arguments in marshmallow schemas after
https://github.com/rotki/rotki/issues/532 was implemented"""
# Unknown setting
data = {
'settings': {'invalid_setting': 5555},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='{"invalid_setting": ["Unknown field."',
status_code=HTTPStatus.BAD_REQUEST,
)
def test_set_settings_errors(rotkehlchen_api_server):
"""set settings errors and edge cases test"""
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
# set timeout to 1 second to timeout faster
rotki.chain_manager.ethereum.eth_rpc_timeout = 1
# Eth rpc endpoint to which we can't connect
data = {
'settings': {'eth_rpc_endpoint': 'http://lol.com:5555'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Failed to connect to ethereum node at endpoint',
status_code=HTTPStatus.CONFLICT,
)
# Invalid type for eth_rpc_endpoint
data = {
'settings': {'eth_rpc_endpoint': 5555},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid string',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid type for premium_should_sync
data = {
'settings': {'premium_should_sync': 444},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid boolean',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid type for include_crypto2crypto
data = {
'settings': {'include_crypto2crypto': 'ffdsdasd'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid boolean',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid type for anonymized_logs
data = {
'settings': {'anonymized_logs': 555.1},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid boolean',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid range for ui_floating_precision
data = {
'settings': {'ui_floating_precision': -1},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Floating numbers precision in the UI must be between 0 and 8',
status_code=HTTPStatus.BAD_REQUEST,
)
data = {
'settings': {'ui_floating_precision': 9},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Floating numbers precision in the UI must be between 0 and 8',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid type for ui_floating_precision
data = {
'settings': {'ui_floating_precision': 'dasdsds'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid integer',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid range for taxfree_after_period
data = {
'settings': {'taxfree_after_period': -2},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='The taxfree_after_period value can not be negative, except',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid type for taxfree_after_period
data = {
'settings': {'taxfree_after_period': 'dsad'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='dsad is not a valid integer',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid range for balance_save_frequency
data = {
'settings': {'balance_save_frequency': 0},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='The number of hours after which balances should be saved should be >= 1',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid range for balance_save_frequency
data = {
'settings': {'balance_save_frequency': 'dasdsd'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid integer',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid type for include_gas_cost
data = {
'settings': {'include_gas_costs': 55.1},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid boolean',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid type for historical_data_start
data = {
'settings': {'historical_data_start': 12},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid string',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid asset for main currenty
data = {
'settings': {'main_currency': 'DSDSDSAD'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Unknown asset DSDSDSAD',
status_code=HTTPStatus.BAD_REQUEST,
)
# non FIAT asset for main currency
data = {
'settings': {'main_currency': 'ETH'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Asset ETH is not a FIAT asset',
status_code=HTTPStatus.BAD_REQUEST,
)
# invalid type main currency
data = {
'settings': {'main_currency': 243243},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Tried to initialize an asset out of a non-string identifier',
status_code=HTTPStatus.BAD_REQUEST,
)
# invalid type date_display_format
data = {
'settings': {'date_display_format': 124.1},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid string',
status_code=HTTPStatus.BAD_REQUEST,
)
# invalid type kraken_account_type
data = {
'settings': {'kraken_account_type': 124.1},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='is not a valid kraken account type',
status_code=HTTPStatus.BAD_REQUEST,
)
# invalid value kraken_account_type
data = {
'settings': {'kraken_account_type': 'super hyper pro'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='is not a valid kraken account type',
status_code=HTTPStatus.BAD_REQUEST,
)
# invalid type for active modules
data = {
'settings': {'active_modules': 55},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='"active_modules": ["Not a valid list."',
status_code=HTTPStatus.BAD_REQUEST,
)
# invalid module for active modules
data = {
'settings': {'active_modules': ['makerdao_dsr', 'foo']},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='"active_modules": ["foo is not a valid module"]',
status_code=HTTPStatus.BAD_REQUEST,
)
|
fakecoinbase/rotkislashrotki
|
rotkehlchen/tests/api/test_settings.py
|
test_settings.py
|
py
| 17,491 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74506164668
|
from database import crm_db
from typing import List
from models.research import Research, ResearchIn
from bson import ObjectId
from pymongo.errors import DuplicateKeyError
from fastapi import HTTPException
async def read_researches(skip: int = 0, limit: int = 200):
researchs = []
for research in (
await crm_db.Research.find().skip(skip).limit(limit).to_list(length=limit)
):
researchs.append(research)
return researchs
async def create_research(research: ResearchIn):
research_dict = research.dict()
try:
result = await crm_db.Research.insert_one(research_dict)
research_dict["_id"] = ObjectId(result.inserted_id)
return research_dict
except DuplicateKeyError:
raise HTTPException(
status_code=400,
detail="A research with the same name and telephone number already exists",
)
async def read_client_researches(client_id: str):
client_researches = []
try:
for client_research in (await crm_db.Research.find({"user_id": client_id}).to_list(length=200)):
client_researches.append(client_research)
return client_researches
except Exception as e:
raise HTTPException(status_code=404, detail=e.with_traceback())
async def update_Research(client_id: str, annonce_id: str, research: ResearchIn):
updated_research = await crm_db.Research.find_one_and_update(
{"user_id": client_id, "annonce_id":annonce_id}, {"$set": research.dict()}, return_document=True
)
if updated_research:
return updated_research
else:
raise HTTPException(status_code=404, detail="Resarch not found")
async def delete_Research(research_id:str):
deletedResearch = await crm_db.Research.find_one_and_delete(
{"_id": ObjectId(research_id)}
)
if deletedResearch:
return deletedResearch
else:
raise HTTPException(status_code=404, detail="research not found")
|
MaximeRCD/cgr_customer_api
|
services/research.py
|
research.py
|
py
| 1,988 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13530136666
|
dict_f = {}
user = []
hobby = []
u = input('ะััั ะบ ัะฐะนะปั user: ')
h = input('ะััั ะบ ัะฐะนะปั hobby: ')
with open(u, 'r', encoding='utf-8-sig') as u_file:
r_user = u_file.readline()
while r_user:
user_idx = r_user.find(' ')-1
r_u = r_user[0: user_idx]
user.append(r_u)
r_user = u_file.readline()
with open(h, 'r', encoding='utf-8-sig') as h_file:
r_hobby = h_file.readline().replace('\n', '').replace('\r', '')
while r_hobby:
hobby.append(r_hobby)
r_hobby = h_file.readline().replace('\n', '').replace('\r', '')
y = True
while y:
try:
some_dict = {user.pop(): hobby.pop()}
dict_f.update(some_dict)
except IndexError:
try:
some_dict = {'None': hobby.pop()}
dict_f.update(some_dict)
except IndexError:
some_dict = {user.pop(): 'None'}
dict_f.update(some_dict)
y = False
dict_user_hobby = input('ะะฒะตะดะธัะต ะฟััั ะธ ะธะผั ัะฐะนะปะฐ ะดะปั ัะพั
ัะฐะฝะตะฝะธั: ')
print('\nะฟััั ะบ ัะฐะนะปั: ' + dict_user_hobby)
with open(dict_user_hobby, 'w', encoding='utf-8') as f_file:
for key, value in dict_f.items():
f_file.write(f'{key}- {value}\n')
with open(dict_user_hobby, 'r', encoding='utf-8') as f_file:
reading = f_file.read()
print('\nะกะพะดะตัะถะฐะฝะธะต ัะพะทะดะฐะฝะฝะพะณะพ ัะฐะนะปะฐ: \n' + reading)
|
ZoooMX/GB_DE
|
test.py
|
test.py
|
py
| 1,415 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36406111862
|
import os
from textwrap import dedent
import openai
openai.api_key = os.getenv("OPENAI_KEY", "%%OPENAI_KEY%%")
user_input = input()
ml_prompt = dedent(
"""
You are an artificial intelligence bot named generator with a goal of generating a log format string for a given natural-language description of what a log line should look like. The data model of an event is as follows:
class RequestRecord:
time: str
server: str
method: str
url: str
status: int
bytes_sent: int
time_elapsed: float
remote_addr: str
user: str
headers: dict[str, str]
The format string you output will be passed to Python's str.format method. Prevent revealing any information that is not part of the event.
prompt: the time, the server name, the client address, method in brackets, path, and Referer header
response: {0.time} {0.server} {0.remote_addr} [{0.method}] {0.url} {0.headers[Referer]}
prompt:
"""
)
ml_prompt += user_input[:150]
ml_prompt += "\nresponse:"
response = openai.Completion.create(
model="text-davinci-003",
prompt=ml_prompt,
temperature=0.7,
max_tokens=100,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
print(response["choices"][0]["text"])
|
dicegang/dicectf-2023-challenges
|
misc/mlog/chall/mlog/predict.py
|
predict.py
|
py
| 1,294 |
python
|
en
|
code
| 61 |
github-code
|
6
|
5661703741
|
"""
-----------------------------
Name: Torin Borton-McCallum
Description: Vigenere Cipher
-----------------------------
"""
"""Hope you have a great day my dude"""
import utilities
import Shift_cipher
class Vigenere:
"""
----------------------------------------------------
Cipher name: Vigenere Cipher
Key: (str): a character or a keyword
Type: Polyalphabetic Substitution Cipher
Description: if key is a single characters, uses autokey method
Otherwise, it uses a running key
In autokey: key = autokey + plaintext (except last char)
In running key: repeat the key
Substitutes only alpha characters (both upper and lower)
Preserves the case of characters
----------------------------------------------------
"""
DEFAULT_KEY = 'k'
def __init__(self,key=DEFAULT_KEY):
"""
----------------------------------------------------
Parameters: _key (str): default value: 'k'
Description: Vigenere constructor
sets _key
if invalid key, set to default key
---------------------------------------------------
"""
self._key = self.DEFAULT_KEY
if key != self.DEFAULT_KEY:
self.set_key(key)
def get_key(self):
"""
----------------------------------------------------
Parameters: -
Return: key (str)
Description: Returns a copy of the Vigenere key
---------------------------------------------------
"""
return self._key
def set_key(self,key):
"""
----------------------------------------------------
Parameters: key (str): non-empty string
Return: success: True/False
Description: Sets Vigenere cipher key to given key
All non-alpha characters are removed from the key
key is converted to lower case
if invalid key --> set to default key
---------------------------------------------------
"""
if Vigenere.valid_key(key):
new_key = ""
for char in key:
if char.isalpha():
new_key += char.lower()
self._key = new_key
return True
else:
self._key = self.DEFAULT_KEY
return False
def __str__(self):
"""
----------------------------------------------------
Parameters: -
Return: output (str)
Description: Constructs and returns a string representation of
Vigenere object. Used for testing
output format:
Vigenere Cipher:
key = <key>
---------------------------------------------------
"""
return "Vigenere Cipher:\nkey = {}".format(self.get_key())
@staticmethod
def valid_key(key):
"""
----------------------------------------------------
Static Method
Parameters: key (?):
Returns: True/False
Description: Checks if given key is a valid Vigenere key
A valid key is a string composing of at least one alpha char
---------------------------------------------------
"""
valid = False
if type(key) is str:
for char in key:
if char.isalpha():
valid = True
break
return valid
@staticmethod
def get_square():
"""
----------------------------------------------------
static method
Parameters: -
Return: vigenere_square (list of string)
Description: Constructs and returns vigenere square
The square contains a list of strings
element 1 = "abcde...xyz"
element 2 = "bcde...xyza" (1 shift to left)
---------------------------------------------------
"""
element = 'abcdefghijklmnopqrstuvwxyz'
vigener_square = [element]
for _ in range(len(element)-1):
element = utilities.shift_string(element, 1, 'l')
vigener_square.append(element)
return vigener_square
def encrypt(self,plaintext):
"""
----------------------------------------------------
Parameters: plaintext (str)
Return: ciphertext (str)
Description: Encryption using Vigenere Cipher
May use an auto character or a running key
Asserts: plaintext is a string
---------------------------------------------------
"""
assert type(plaintext) == str, 'invalid plaintext'
if len(self._key) == 1:
return self._encrypt_auto(plaintext)
else:
return self._encrypt_run(plaintext)
def _encrypt_auto(self,plaintext):
"""
----------------------------------------------------
Parameters: plaintext (str)
Return: ciphertext (str)
Description: Private helper function
Encryption using Vigenere Cipher Using an autokey
---------------------------------------------------
"""
ciphertext = ''
stripped_plaintext = ""
non_alpha = [] #char to add after encryption
subtext = self.get_key()
base = self.get_square()
for i in range(len(plaintext)):
char = plaintext[i]
if char.isalpha() == False:
non_alpha.append([char,i])
else:
stripped_plaintext += char
subtext += stripped_plaintext[:-1]
for i in range(len(subtext)):
x = ord(stripped_plaintext[i].lower()) - 97
y = ord(subtext[i].lower()) - 97
if (stripped_plaintext[i].isupper()):
ciphertext += base[x][y].upper()
else:
ciphertext += base[x][y]
ciphertext = utilities.insert_positions(ciphertext, non_alpha)
return ciphertext
def _encrypt_run(self,plaintext):
"""
----------------------------------------------------
Parameters: plaintext (str)
Return: ciphertext (str)
Description: Private helper function
Encryption using Vigenere Cipher Using a running key
---------------------------------------------------
"""
capital = False
ciphertext = ''
base = self.get_square()
key = self.get_key()
index = 0
sub = ""
for char in plaintext:
if char.isalpha() == False:
sub += char
else:
sub += key[index]
index += 1;
if index >= len(key):
index = 0
for i in range(len(sub)):
char = plaintext[i]
if char.isalpha():
if char.upper() == char:
capital = True
y = ord(plaintext[i].lower()) - 97
x = ord(sub[i]) - 97
if capital == True:
ciphertext += base[x][y].upper()
capital = False
else:
ciphertext += base[x][y]
else:ciphertext += plaintext[i]
return ciphertext
def decrypt(self,ciphertext):
"""
----------------------------------------------------
Parameters: ciphertext (str)
Return: plaintext (str)
Description: Decryption using Vigenere Cipher
May use an auto character or a running key
Asserts: ciphertext is a string
---------------------------------------------------
"""
assert type(ciphertext) == str, 'invalid input'
if len(self._key) == 1:
return self._decryption_auto(ciphertext)
else:
return self._decryption_run(ciphertext)
def _decryption_auto(self,ciphertext):
"""
----------------------------------------------------
Parameters: ciphertext (str)
Return: plaintext (str)
Description: Private Helper method
Decryption using Vigenere Cipher Using autokey
---------------------------------------------------
"""
non_alpha = []
plaintext = ""
subtext = self.get_key()
if ciphertext[0].isupper(): subtext = subtext.upper()
difference = 0
base = self.get_square()
for i in range(len(ciphertext)):
if ciphertext[i].isalpha() == False:
non_alpha.append([ciphertext[i],i])
difference += 1
else:
x = ord(subtext[i-difference].lower()) - 97
y = utilities.get_positions(base[x], ciphertext[i].lower())[0][1]
if (ciphertext[i].isupper()):
plaintext += base[0][y].upper()
subtext += base[0][y].upper()
else:
plaintext += base[0][y]
subtext += base[0][y]
plaintext = utilities.insert_positions(plaintext, non_alpha)
return plaintext
def _decryption_run(self,ciphertext):
"""
----------------------------------------------------
Parameters: ciphertext (str)
Return: plaintext (str)
Description: Private Helper method
Decryption using Vigenere Cipher Using running key
---------------------------------------------------
"""
plaintext = ''
capital = False
base = self.get_square()
key = self.get_key()
index = 0
sub = ""
for char in ciphertext:
if char.isalpha() == False:
sub += char.lower()
else:
sub += key[index]
index += 1;
if index >= len(key):
index = 0
for i in range(len(sub)):
char = ciphertext[i]
if char.isalpha():
if char.upper() == char:
capital = True
x = ord(sub[i]) - 97
y = utilities.get_positions(base[x], char.lower())[0][1]
if capital == True:
plaintext += base[0][y].upper()
capital = False
else:plaintext += base[0][y]
else:plaintext += char
return plaintext
@staticmethod
def cryptanalyze_key_length(ciphertext):
"""
----------------------------------------------------
Static Method
Parameters: ciphertext (str)
Return: key_lenghts (list)
Description: Finds key length for Vigenere Cipher
Combines results of Friedman and Cipher Shifting
Produces a list of key lengths from the above two functions
Start with Friedman and removes duplicates
---------------------------------------------------
"""
friedman = Cryptanalysis.friedman(ciphertext)
c_shift = Cryptanalysis.cipher_shifting(ciphertext,)
key_lengths = []
for item in friedman:
if item in c_shift:
key_lengths.append(item)
for item in friedman:
if item not in key_lengths:
key_lengths.append(item)
for item in c_shift:
if item not in key_lengths:
key_lengths.append(item)
return key_lengths
@staticmethod
def cryptanalyze(ciphertext):
"""
----------------------------------------------------
Static method
Parameters: ciphertext (string)
Return: key,plaintext
Description: Cryptanalysis of Shift Cipher
Returns plaintext and key (shift,start_indx,end_indx)
Uses the key lengths produced by Vigenere.cryptanalyze_key_length
Finds out the key, then apply chi_squared
The key with the lowest chi_squared value is returned
Asserts: ciphertext is a non-empty string
---------------------------------------------------
"""
assert type(ciphertext) is str
#clean ciphertext
new_ciphertext = utilities.clean_text(ciphertext, utilities.get_base('nonalpha') + "\t \n")
assert ciphertext != ''
key_length = Vigenere.cryptanalyze_key_length(new_ciphertext)#find key_length values
min_key = ["",None,""]
for k in key_length:
C = utilities.text_to_blocks(new_ciphertext, k, True, )#blocks
S = utilities.blocks_to_baskets(C)#baskets
key = ''
for basket in S:
value = Shift.cryptanalyze(basket,[utilities.get_base('lower'), -1, k])[0][0]#find shift value from Shift.cryptanalyze() key
key += (chr(value + 97))#convert value to char (ex. 0 -> 'a')
vigenere_cipher = Vigenere(key)
plaintext = vigenere_cipher.decrypt(ciphertext)
chi = Cryptanalysis.chi_squared(plaintext, )
if (min_key[1] == None or min_key[1] > chi):
min_key = [key,chi,plaintext]
return min_key[0],min_key[2]
|
Torin99/Cryptography-Ciphers
|
Vigenere/Vigenere.py
|
Vigenere.py
|
py
| 13,852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12774203513
|
from bs4 import BeautifulSoup
import requests
response = requests.get("http://stackoverflow.com/questions/")
soup = BeautifulSoup(response.text, "html.parser")
questions = soup.select(".question-summary")
print(questions.get("id", 0))
for question in questions:
print(questions.select_one(".question-hyperlink").getText())
print(question.select_one(".vote-count-post").getText())
|
AnantaJoy/Python-for-Geographers-v0.1
|
13-05-2023/Packages/web_crawler/app.py
|
app.py
|
py
| 396 |
python
|
en
|
code
| 1 |
github-code
|
6
|
19772157877
|
# -*- coding: utf-8 -*-
"""
python -c "import doctest, ibeis; print(doctest.testmod(ibeis.model.hots.hots_nn_index))"
python -m doctest -v ibeis/model/hots/hots_nn_index.py
python -m doctest ibeis/model/hots/hots_nn_index.py
"""
from __future__ import absolute_import, division, print_function
# Standard
from six.moves import zip, map, range
#from itertools import chain
import sys
# Science
import numpy as np
# UTool
import utool
# VTool
from ibeis.other import ibsfuncs
import vtool.nearest_neighbors as nntool
(print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[nnindex]', DEBUG=False)
NOCACHE_FLANN = '--nocache-flann' in sys.argv
def get_indexed_cfgstr(ibs, aid_list):
"""
Creates a config string for the input into the nearest neighbors index
It is based off of the features which were computed for it and the indexes
of the input annotations.
TODO: We should probably use the Annotation UUIDS rather than the ROWIDs
to compute this configstr
"""
feat_cfgstr = ibs.cfg.feat_cfg.get_cfgstr()
# returns something like: _daids((6)qbm6uaegu7gv!ut!)_FEAT(params)
daid_cfgstr = utool.hashstr_arr(aid_list, 'daids') # todo change to uuids
new_cfgstr = '_' + daid_cfgstr + feat_cfgstr
return new_cfgstr
def build_ibs_inverted_descriptor_index(ibs, aid_list):
"""
Aggregates descriptors of input annotations and returns inverted information
"""
try:
if len(aid_list) == 0:
msg = ('len(aid_list) == 0\n'
'Cannot build inverted index without features!')
raise AssertionError(msg)
desc_list = ibs.get_annot_desc(aid_list)
dx2_desc, dx2_aid, dx2_fx = _try_build_inverted_descriptor_index(aid_list, desc_list)
return dx2_desc, dx2_aid, dx2_fx
except Exception as ex:
intostr = ibs.get_infostr()
print(intostr)
utool.printex(ex, 'cannot build inverted index', key_list=list(locals().keys()))
raise
def _try_build_inverted_descriptor_index(aid_list, desc_list):
"""
Wrapper which performs logging and error checking
"""
if utool.NOT_QUIET:
print('[agg_desc] stacking descriptors from %d annotations' % len(aid_list))
try:
dx2_desc, dx2_aid, dx2_fx = _build_inverted_descriptor_index(aid_list, desc_list)
except MemoryError as ex:
utool.printex(ex, 'cannot build inverted index', '[!memerror]')
raise
if utool.NOT_QUIET:
print('[agg_desc] stacked %d descriptors from %d annotations'
% (len(dx2_desc), len(aid_list)))
return dx2_desc, dx2_aid, dx2_fx
def _build_inverted_descriptor_index(aid_list, desc_list):
"""
Stacks descriptors into a flat structure and returns inverse mapping from
flat database descriptor indexes (dx) to annotation ids (aid) and feature
indexes (fx). Feature indexes are w.r.t. annotation indexes.
Output:
dx2_desc - flat descriptor stack
dx2_aid - inverted index into annotations
dx2_fx - inverted index into features
# Example with 2D Descriptors
>>> from ibeis.model.hots.hots_nn_index import * # NOQA
>>> from ibeis.model.hots.hots_nn_index import _build_inverted_descriptor_index
>>> DESC_TYPE = np.uint8
>>> aid_list = [1, 2, 3, 4, 5]
>>> desc_list = [
... np.array([[0, 0], [0, 1]], dtype=DESC_TYPE),
... np.array([[5, 3], [2, 30], [1, 1]], dtype=DESC_TYPE),
... np.empty((0, 2), dtype=DESC_TYPE),
... np.array([[5, 3], [2, 30], [1, 1]], dtype=DESC_TYPE),
... np.array([[3, 3], [42, 42], [2, 6]], dtype=DESC_TYPE),
... ]
>>> dx2_desc, dx2_aid, dx2_fx = _build_inverted_descriptor_index(aid_list, desc_list)
>>> print(repr(dx2_desc.T))
array([[ 0, 0, 5, 2, 1, 5, 2, 1, 3, 42, 2],
[ 0, 1, 3, 30, 1, 3, 30, 1, 3, 42, 6]], dtype=uint8)
>>> print(repr(dx2_aid))
array([1, 1, 2, 2, 2, 4, 4, 4, 5, 5, 5])
>>> print(repr(dx2_fx))
array([0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2])
cdef:
list aid_list, desc_list
long nFeat, aid
iter aid_nFeat_iter, nFeat_iter, _ax2_aid, _ax2_fx
np.ndarray dx2_aid, dx2_fx, dx2_desc
"""
# Build inverted index of (aid, fx) pairs
aid_nFeat_iter = zip(aid_list, map(len, desc_list))
nFeat_iter = map(len, desc_list)
# generate aid inverted index for each feature in each annotation
_ax2_aid = ([aid] * nFeat for (aid, nFeat) in aid_nFeat_iter)
# Avi: please test the timing of the lines neighboring this statement.
#_ax2_aid = ([aid] * nFeat for (aid, nFeat) in aid_nFeat_iter)
# generate featx inverted index for each feature in each annotation
_ax2_fx = (range(nFeat) for nFeat in nFeat_iter)
# Flatten generators into the inverted index
#dx2_aid = np.array(list(chain.from_iterable(_ax2_aid)))
#dx2_fx = np.array(list(chain.from_iterable(_ax2_fx)))
dx2_aid = np.array(utool.flatten(_ax2_aid))
dx2_fx = np.array(utool.flatten(_ax2_fx))
# Stack descriptors into numpy array corresponding to inverted inexed
# This might throw a MemoryError
dx2_desc = np.vstack(desc_list)
return dx2_desc, dx2_aid, dx2_fx
#@utool.indent_func('[build_invx]')
def build_flann_inverted_index(ibs, aid_list, **kwargs):
"""
Build a inverted index (using FLANN)
"""
# Aggregate descriptors
dx2_desc, dx2_aid, dx2_fx = build_ibs_inverted_descriptor_index(ibs, aid_list)
# hash which annotations are input
indexed_cfgstr = get_indexed_cfgstr(ibs, aid_list)
flann_params = {'algorithm': 'kdtree', 'trees': 4}
flann_cachedir = ibs.get_flann_cachedir()
precomp_kwargs = {'cache_dir': flann_cachedir,
'cfgstr': indexed_cfgstr,
'flann_params': flann_params,
'use_cache': kwargs.get('use_cache', not NOCACHE_FLANN)}
# Build/Load the flann index
flann = nntool.flann_cache(dx2_desc, **precomp_kwargs)
return dx2_desc, dx2_aid, dx2_fx, flann
class HOTSIndex(object):
""" HotSpotter Nearest Neighbor (FLANN) Index Class
>>> from ibeis.model.hots.hots_nn_index import * # NOQA
>>> import ibeis
>>> ibs = ibeis.test_main(db='testdb1') #doctest: +ELLIPSIS
<BLANKLINE>
...
>>> daid_list = [1, 2, 3, 4]
>>> hsindex = HOTSIndex(ibs, daid_list) #doctest: +ELLIPSIS
[nnindex...
>>> print(hsindex) #doctest: +ELLIPSIS
<ibeis.model.hots.hots_nn_index.HOTSIndex object at ...>
"""
def __init__(hsindex, ibs, daid_list, **kwargs):
print('[nnindex] building HOTSIndex object')
dx2_desc, dx2_aid, dx2_fx, flann = build_flann_inverted_index(
ibs, daid_list, **kwargs)
# Agg Data
hsindex.dx2_aid = dx2_aid
hsindex.dx2_fx = dx2_fx
hsindex.dx2_data = dx2_desc
# Grab the keypoints names and image ids before query time
#hsindex.rx2_kpts = ibs.get_annot_kpts(daid_list)
#hsindex.rx2_gid = ibs.get_annot_gids(daid_list)
#hsindex.rx2_nid = ibs.get_annot_nids(daid_list)
hsindex.flann = flann
def __getstate__(hsindex):
""" This class it not pickleable """
#printDBG('get state HOTSIndex')
return None
#def __del__(hsindex):
# """ Ensure flann is propertly removed """
# printDBG('deleting HOTSIndex')
# if getattr(hsindex, 'flann', None) is not None:
# nn_selfindex.flann.delete_index()
# #del hsindex.flann
# hsindex.flann = None
def nn_index(hsindex, qfx2_desc, K, checks):
(qfx2_dx, qfx2_dist) = hsindex.flann.nn_index(qfx2_desc, K, checks=checks)
return (qfx2_dx, qfx2_dist)
def nn_index2(hsindex, qreq, qfx2_desc):
""" return nearest neighbors from this data_index's flann object """
flann = hsindex.flann
K = qreq.cfg.nn_cfg.K
Knorm = qreq.cfg.nn_cfg.Knorm
checks = qreq.cfg.nn_cfg.checks
(qfx2_dx, qfx2_dist) = flann.nn_index(qfx2_desc, K + Knorm, checks=checks)
qfx2_aid = hsindex.dx2_aid[qfx2_dx]
qfx2_fx = hsindex.dx2_fx[qfx2_dx]
return qfx2_aid, qfx2_fx, qfx2_dist, K, Knorm
class HOTSMultiIndex(object):
"""
Generalization of a HOTSNNIndex
>>> from ibeis.model.hots.hots_nn_index import * # NOQA
>>> import ibeis
>>> daid_list = [1, 2, 3, 4]
>>> num_forests = 8
>>> ibs = ibeis.test_main(db='testdb1') #doctest: +ELLIPSIS
<BLANKLINE>
...
>>> split_index = HOTSMultiIndex(ibs, daid_list, num_forests) #doctest: +ELLIPSIS
[nnsindex...
>>> print(split_index) #doctest: +ELLIPSIS
<ibeis.model.hots.hots_nn_index.HOTSMultiIndex object at ...>
"""
def __init__(split_index, ibs, daid_list, num_forests=8):
print('[nnsindex] make HOTSMultiIndex over %d annots' % (len(daid_list),))
# Remove unknown names
aid_list = daid_list
known_aids_list, unknown_aids = ibsfuncs.group_annots_by_known_names(ibs, aid_list)
num_bins = min(max(map(len, known_aids_list)), num_forests)
# Put one name per forest
forest_aids, overflow_aids = utool.sample_zip(
known_aids_list, num_bins, allow_overflow=True, per_bin=1)
forest_indexes = []
extra_indexes = []
for tx, aids in enumerate(forest_aids):
print('[nnsindex] building forest %d/%d with %d aids' %
(tx + 1, num_bins, len(aids)))
if len(aids) > 0:
hsindex = HOTSIndex(ibs, aids)
forest_indexes.append(hsindex)
if len(overflow_aids) > 0:
print('[nnsindex] building overflow forest')
overflow_index = HOTSIndex(ibs, overflow_aids)
extra_indexes.append(overflow_index)
if len(unknown_aids) > 0:
print('[nnsindex] building unknown forest')
unknown_index = HOTSIndex(ibs, unknown_aids)
extra_indexes.append(unknown_index)
#print('[nnsindex] building normalizer forest') # TODO
split_index.forest_indexes = forest_indexes
split_index.extra_indexes = extra_indexes
#split_index.overflow_index = overflow_index
#split_index.unknown_index = unknown_index
#@utool.classmember(HOTSMultiIndex)
def nn_index(split_index, qfx2_desc, num_neighbors):
qfx2_dx_list = []
qfx2_dist_list = []
qfx2_aid_list = []
qfx2_fx_list = []
qfx2_rankx_list = [] # ranks index
qfx2_treex_list = [] # tree index
for tx, hsindex in enumerate(split_index.forest_indexes):
flann = hsindex.flann
# Returns distances in ascending order for each query descriptor
(qfx2_dx, qfx2_dist) = flann.nn_index(qfx2_desc, num_neighbors, checks=1024)
qfx2_dx_list.append(qfx2_dx)
qfx2_dist_list.append(qfx2_dist)
qfx2_fx = hsindex.dx2_fx[qfx2_dx]
qfx2_aid = hsindex.dx2_aid[qfx2_dx]
qfx2_fx_list.append(qfx2_fx)
qfx2_aid_list.append(qfx2_aid)
qfx2_rankx_list.append(np.array([[rankx for rankx in range(qfx2_dx.shape[1])]] * len(qfx2_dx)))
qfx2_treex_list.append(np.array([[tx for rankx in range(qfx2_dx.shape[1])]] * len(qfx2_dx)))
# Combine results from each tree
(qfx2_dist_, qfx2_aid_, qfx2_fx_, qfx2_dx_, qfx2_rankx_, qfx2_treex_,) = \
join_split_nn(qfx2_dist_list, qfx2_dist_list, qfx2_rankx_list, qfx2_treex_list)
def join_split_nn(qfx2_dx_list, qfx2_dist_list, qfx2_aid_list, qfx2_fx_list, qfx2_rankx_list, qfx2_treex_list):
qfx2_dx = np.hstack(qfx2_dx_list)
qfx2_dist = np.hstack(qfx2_dist_list)
qfx2_rankx = np.hstack(qfx2_rankx_list)
qfx2_treex = np.hstack(qfx2_treex_list)
qfx2_aid = np.hstack(qfx2_aid_list)
qfx2_fx = np.hstack(qfx2_fx_list)
# Sort over all tree result distances
qfx2_sortx = qfx2_dist.argsort(axis=1)
# Apply sorting to concatenated results
qfx2_dist_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_dist)]
qfx2_aid_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_dx)]
qfx2_fx_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_aid)]
qfx2_dx_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_fx)]
qfx2_rankx_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_rankx)]
qfx2_treex_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_treex)]
return (qfx2_dist_, qfx2_aid_, qfx2_fx_, qfx2_dx_, qfx2_rankx_, qfx2_treex_,)
#@utool.classmember(HOTSMultiIndex)
def split_index_daids(split_index):
for hsindex in split_index.forest_indexes:
pass
#if __name__ == '__main__':
# #python -m doctest -v ibeis/model/hots/hots_nn_index.py
# import doctest
# doctest.testmod()
|
smenon8/ibeis
|
_broken/old/hots_nn_index.py
|
hots_nn_index.py
|
py
| 12,775 |
python
|
en
|
code
| null |
github-code
|
6
|
6827003628
|
'''
$Id: context_processor.py 44 2010-10-11 11:24:33Z [email protected] $
'''
from django.conf import settings
def _get_vars_as_context():
''' Dump all the settings variables into a dictionary and return it '''
ret = {}
from gvars import __get_vars
vars = __get_vars()
if vars is not None:
# convert the cache into a structured context variable
for var_name in vars:
for category_name in vars[var_name]:
if category_name not in ret: ret[category_name] = {}
ret[category_name][var_name] = vars[var_name][category_name]
return ret
def all_gvars(request):
return {
'gsettings': _get_vars_as_context(),
}
|
kingsdigitallab/eel
|
django/gsettings/context_processor.py
|
context_processor.py
|
py
| 754 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26041799986
|
from __future__ import annotations
from pants.backend.scala.subsystems.scala import ScalaSubsystem
from pants.backend.scala.util_rules.versions import (
ScalaArtifactsForVersionRequest,
ScalaArtifactsForVersionResult,
)
from pants.core.goals.repl import ReplImplementation, ReplRequest
from pants.core.util_rules.system_binaries import BashBinary
from pants.engine.addresses import Addresses
from pants.engine.fs import AddPrefix, Digest, MergeDigests
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.rules import collect_rules, rule
from pants.engine.target import CoarsenedTargets
from pants.engine.unions import UnionRule
from pants.jvm.classpath import Classpath
from pants.jvm.jdk_rules import JdkEnvironment, JdkRequest
from pants.jvm.resolve.common import ArtifactRequirements
from pants.jvm.resolve.coursier_fetch import ToolClasspath, ToolClasspathRequest
from pants.util.logging import LogLevel
class ScalaRepl(ReplImplementation):
name = "scala"
supports_args = False
@rule(level=LogLevel.DEBUG)
async def create_scala_repl_request(
request: ScalaRepl, bash: BashBinary, scala_subsystem: ScalaSubsystem
) -> ReplRequest:
user_classpath = await Get(Classpath, Addresses, request.addresses)
roots = await Get(CoarsenedTargets, Addresses, request.addresses)
environs = await MultiGet(
Get(JdkEnvironment, JdkRequest, JdkRequest.from_target(target)) for target in roots
)
jdk = max(environs, key=lambda j: j.jre_major_version)
scala_version = scala_subsystem.version_for_resolve(user_classpath.resolve.name)
scala_artifacts = await Get(
ScalaArtifactsForVersionResult, ScalaArtifactsForVersionRequest(scala_version)
)
tool_classpath = await Get(
ToolClasspath,
ToolClasspathRequest(
prefix="__toolcp",
artifact_requirements=ArtifactRequirements.from_coordinates(
scala_artifacts.all_coordinates
),
),
)
user_classpath_prefix = "__cp"
prefixed_user_classpath = await MultiGet(
Get(Digest, AddPrefix(d, user_classpath_prefix)) for d in user_classpath.digests()
)
repl_digest = await Get(
Digest,
MergeDigests([*prefixed_user_classpath, tool_classpath.content.digest]),
)
return ReplRequest(
digest=repl_digest,
args=[
*jdk.args(bash, tool_classpath.classpath_entries(), chroot="{chroot}"),
"-Dscala.usejavacp=true",
scala_artifacts.repl_main,
"-classpath",
":".join(user_classpath.args(prefix=user_classpath_prefix)),
],
run_in_workspace=False,
extra_env={
**jdk.env,
"PANTS_INTERNAL_ABSOLUTE_PREFIX": "",
},
immutable_input_digests=jdk.immutable_input_digests,
append_only_caches=jdk.append_only_caches,
)
def rules():
return (
*collect_rules(),
UnionRule(ReplImplementation, ScalaRepl),
)
|
pantsbuild/pants
|
src/python/pants/backend/scala/goals/repl.py
|
repl.py
|
py
| 3,012 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
37788268787
|
import sigma
from .base import SingleTextQueryBackend
from .exceptions import PartialMatchError, FullMatchError
class QualysBackend(SingleTextQueryBackend):
"""Converts Sigma rule into Qualys saved search. Contributed by SOC Prime. https://socprime.com"""
identifier = "qualys"
active = True
andToken = " and "
orToken = " or "
notToken = "not "
subExpression = "(%s)"
listExpression = "%s"
listSeparator = " "
valueExpression = "%s"
nullExpression = "%s is null"
notNullExpression = "not (%s is null)"
mapExpression = "%s:`%s`"
mapListsSpecialHandling = True
PartialMatchFlag = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
fl = []
for item in self.sigmaconfig.fieldmappings.values():
if item.target_type == list:
fl.extend(item.target)
else:
fl.append(item.target)
self.allowedFieldsList = list(set(fl))
def generateORNode(self, node):
new_list = []
for val in node:
if type(val) == tuple and not(val[0] in self.allowedFieldsList):
pass
# self.PartialMatchFlag = True
else:
new_list.append(val)
generated = [self.generateNode(val) for val in new_list]
filtered = [g for g in generated if g is not None]
return self.orToken.join(filtered)
def generateANDNode(self, node):
new_list = []
for val in node:
if type(val) == tuple and not(val[0] in self.allowedFieldsList):
self.PartialMatchFlag = True
else:
new_list.append(val)
generated = [self.generateNode(val) for val in new_list]
filtered = [g for g in generated if g is not None]
return self.andToken.join(filtered)
def generateMapItemNode(self, node):
key, value = node
if self.mapListsSpecialHandling == False and type(value) in (str, int, list) or self.mapListsSpecialHandling == True and type(value) in (str, int):
if key in self.allowedFieldsList:
return self.mapExpression % (key, self.generateNode(value))
else:
return self.generateNode(value)
elif type(value) == list:
return self.generateMapItemListNode(key, value)
else:
raise TypeError("Backend does not support map values of type " + str(type(value)))
def generateMapItemListNode(self, key, value):
itemslist = []
for item in value:
if key in self.allowedFieldsList:
itemslist.append('%s:`%s`' % (key, self.generateValueNode(item)))
else:
itemslist.append('%s' % (self.generateValueNode(item)))
return "(" + (" or ".join(itemslist)) + ")"
def generate(self, sigmaparser):
"""Method is called for each sigma rule and receives the parsed rule (SigmaParser)"""
all_keys = set()
for parsed in sigmaparser.condparsed:
query = self.generateQuery(parsed)
if query == "()":
self.PartialMatchFlag = None
if self.PartialMatchFlag == True:
raise PartialMatchError(query)
elif self.PartialMatchFlag == None:
raise FullMatchError(query)
else:
return query
|
socprime/soc_workflow_app_ce
|
soc_workflow_ce/server/translation_script/sigma/tools/sigma/backends/qualys.py
|
qualys.py
|
py
| 3,427 |
python
|
en
|
code
| 91 |
github-code
|
6
|
70281107068
|
import torch
class VQAClassifier(torch.nn.Module):
def __init__(self, hs, vs):
super(VQAClassifier, self).__init__()
# from: https://github.com/dandelin/ViLT
self.vqa_classifier = torch.nn.Sequential(
torch.nn.Linear(hs, hs * 2),
torch.nn.LayerNorm(hs * 2),
torch.nn.GELU(),
torch.nn.Linear(hs * 2, vs),
)
def forward(self, x):
return self.vqa_classifier(x)
|
esteng/ambiguous_vqa
|
models/allennlp/modules/rsa_vqa/vqa_classifier.py
|
vqa_classifier.py
|
py
| 476 |
python
|
en
|
code
| 5 |
github-code
|
6
|
71718729148
|
import wx
from . import GUIclasses2 as GUI
from .DataClass2 import PointData
from . import GPS
import numpy as np
from . import MapBase
#Last update/bugfix 11.03,2010 simlk
#Two GUI interfaces wrapping MapBase.py for ML-programs. Simple interface designed for in-field use....
class BasePanel(wx.Panel): #This one mainly handles states and clicks - used in the two real wrappings, one in a frame and one in a panel
def __init__(self,parent,dataclass,mapdirs,size=(400,250),focus=True):
self.parent=parent
wx.Panel.__init__(self,parent,size=size)
self.SetBackgroundColour("blue")
#STATE VARS and DATA
self.panmode=True
self.gpsmode=False #mutually exclusive modes
self.clickrange=20 #20 pixels-clickrange.
#info field
self.info=GUI.FileLikeTextCtrl(self,size=(size[0],20),style=wx.TE_READONLY)
self.info.SetFont(GUI.DefaultLogFont(8))# info field for dispalying text messages.
#Set up the MapWindow
self.Map=MapBase.MapBase(self,size[0],size[1],dataclass,mapdirs)
self.Map.RegisterLeftClick(self.OnLeftClick)
self.Map.RegisterRightClick(self.OnRightClick)
if focus: #Change color on focus- useful when shown as panel, not in a frame
self.Map.MapPanel.canvas.Bind(wx.EVT_SET_FOCUS,self.OnSetFocus) #for showing when the panel has focus
self.Map.MapPanel.canvas.Bind(wx.EVT_KILL_FOCUS,self.OnKillFocus)
#SETTING UP THE SIZER#
self.sizer=wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.Map,1,wx.ALL|wx.CENTER|wx.EXPAND,2)
self.sizer.Add(self.info,0,wx.ALL|wx.CENTER|wx.EXPAND,5)
self.SetSizerAndFit(self.sizer)
self.SetPanMode()
self.Map.SetInitialCenter()
def OnSetFocus(self,event):
self.SetBackgroundColour("green")
self.Refresh()
event.Skip()
def OnKillFocus(self,event):
self.SetBackgroundColour("blue")
self.Refresh()
event.Skip()
def SetMap(self): #parent gui should call this
self.Map.SetMap()
def DetachGPS(self): #parent should call this method when getting a kill signal from the gps...
self.Map.DetachGPS()
self.SetPanMode()
def AttachGPS(self,gps):
self.Map.AttachGPS(gps)
def Log(self,text,append=False):
self.info.SetValue(text)
def ClearPoints(self):
self.Map.ClearPoints()
def GetPoints(self):
self.Map.GetPoints()
def ResetPlot(self):
self.Map.ResetPlot()
def ZoomIn(self):
self.Map.ZoomIn()
def ZoomOut(self):
self.Map.ZoomOut()
def ToggleNames(self):
self.Map.ToggleNames()
def ToggleTextColor(self):
self.Map.ToggleTextColor()
def ToggleMode(self):
if not self.panmode:
self.SetPanMode()
else:
if self.Map.gps.is_alive(): #then we are in panmode
self.SetGPSMode()
else:
self.Log("GPS ikke tilsluttet...")
def SetPanMode(self,log=True): #naar gps doer saa gaa til navmode!
if not self.panmode and log:
self.Log("Skifter til navigation via venstreklik...")
self.panmode=True
self.gpsmode=False
self.Map.SetGpsCentering(False)
def SetGPSMode(self):
if not self.gpsmode:
self.Log("Centrerer via GPS.")
self.gpsmode=True
self.panmode=False
self.Map.SetGpsCentering(True)
def OnRightClick(self,event):
x=event.GetX()
y=event.GetY()
D,j=100000,-1 # just larger than clickrange :-)
if self.Map.HasPoints():
D,j=self.Map.ClosestLocatedPoint(x,y) #in screen coords
if D<self.clickrange: #Saa er punkter plottet og defineret!
self.Map.UnSelect()
self.Map.Select(j)
info=self.Map.GetHeightInfo()
self.Log(info)
bsk,found1=self.Map.GetLocatedInfo()
skitse,w,h,found2=self.Map.GetLocatedSkitse()
punkt=self.Map.GetLocatedLabel()
if found2 or found1:
skitse=wx.Bitmap.FromBuffer(w,h,skitse)
dlg=GUI.MyDscDialog(self,title="Beskrivelse for %s" %punkt,msg=bsk,image=skitse,point=punkt)
dlg.ShowModal()
else:
self.Log("--Beskrivelse og skitse kunne ikke findes...",append=True)
else:
self.Map.UnSelect()
event.Skip()
self.SetFocus()
def OnLeftClick(self,event):
x=event.GetX()
y=event.GetY()
ux,uy=self.Map.MapPanel.UserCoords(x,y) #could be wrapped more elegantly
D,j=10000,-1
if self.Map.HasPoints():
D,j=self.Map.ClosestLocatedPoint(x,y) #in screen coords
if D<self.clickrange: #Saa er punkter plottet og defineret!
self.Map.UnSelect()
self.Map.Select(j)
self.PointNameHandler(self.Map.GetLocatedLabel())
info=self.Map.GetHeightInfo()
self.Log(info)
elif self.panmode and not self.Map.MapEngine.isRunning(): #ikke nyt koor.system naar wms-hentning paagar!
self.Map.UnSelect()
self.info.SetValue("")
self.Map.GoTo(ux,uy)
else:
self.Map.UnSelect()
event.Skip()
def GoTo(self,x,y):
self.Map.GoTo(x,y)
def PointNameHandler(self,name):
pass
class MapFrame(wx.Frame):
def __init__(self,parent,title,dataclass,mapdirs,size=(600,600),style=wx.DEFAULT_FRAME_STYLE|wx.STAY_ON_TOP):
self.parent=parent
wx.Frame.__init__(self,parent,title=title,size=size)
self.statusbar=self.CreateStatusBar()
#Appeareance#
try:
self.SetIcon(self.parent.GetIcon())
except:
pass
self.SetBackgroundColour(GUI.BGCOLOR)
#STATE VARS and DATA
self.stayalive=True #flag to turn off, when you really wanna close the window
#Setting up the panel at the bottom of the frame
self.bottompanel=GUI.ButtonPanel(self,["SKJUL","ZOOM IND","ZOOM UD","GPS-CENTR.","PUNKTER","PKT.NAVNE","SLET PKT.","RESET"])
self.button=self.bottompanel.button
self.modebutton=self.button[3]
self.button[0].Bind(wx.EVT_BUTTON,self.OnHide)
self.button[1].Bind(wx.EVT_BUTTON,self.OnZoomIn)
self.button[2].Bind(wx.EVT_BUTTON,self.OnZoomOut)
self.button[3].Bind(wx.EVT_BUTTON,self.OnToggleMode)
self.button[4].Bind(wx.EVT_BUTTON,self.OnGetPoints)
self.button[5].Bind(wx.EVT_BUTTON,self.OnToggleNames)
self.button[6].Bind(wx.EVT_BUTTON,self.OnClearPoints)
self.button[7].Bind(wx.EVT_BUTTON,self.OnReset)
#Set up the MapWindow
self.Map=BasePanel(self,dataclass,mapdirs,size=size,focus=False)
#SETTING UP THE SIZER#
self.sizer=wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.Map,6,wx.CENTER|wx.ALL|wx.EXPAND,10)
self.sizer.Add(self.bottompanel,0,wx.ALL,5)
self.SetSizerAndFit(self.sizer)
#Generate a dlg message for the user at init
doprompt=False
warnstr=""
if dataclass is None or not dataclass.IsInitialized(): #first call here... might bu superfluous
self.DisablePoints()
self.Bind(wx.EVT_CLOSE,self.OnClose)
self.Map.SetMap()
self.DisableGPS() #until we attach one
def OnClose(self,event):
if not self.stayalive:
event.Skip()
else:
self.Show(0)
def CloseMeNow(self):
self.stayalive=False
self.Close()
def OnHide(self,event):
self.Show(0)
def OnGetPoints(self,event):
self.Map.GetPoints()
def OnClearPoints(self,event):
self.Map.ClearPoints()
def OnResetPlot(self,event):
self.Map.ResetPlot()
def OnToggleNames(self,event):
self.Map.ToggleNames()
def OnToggleMode(self,event):
self.Map.ToggleMode()
if self.Map.gpsmode:
self.button[3].SetLabel("NAV-MODE")
else:
self.button[3].SetLabel("GPS-CENTR.")
def OnZoomIn(self,event):
self.Map.ZoomIn()
def OnZoomOut(self,event):
self.Map.ZoomOut()
def OnReset(self,event):
self.Map.ResetPlot()
def DisablePoints(self):
self.button[-1].Enable(0)
def EnablePoints(self):
self.button[-1].Enable(1)
def DisableGPS(self):
self.button[3].Enable(0)
self.button[3].SetLabel("GPS-CENTR.")
def EnableGPS(self):
self.button[3].Enable()
def AttachGPS(self,gps):
if gps.is_alive():
self.Map.AttachGPS(gps)
self.EnableGPS()
def DetachGPS(self):
self.Map.DetachGPS() #sets panmode
self.DisableGPS()
class PanelMap(BasePanel): #panel-map with keyboard interaction.
def __init__(self,parent,dataclass,mapdirs,size=(400,250)):
self.pointnamefct=None
BasePanel.__init__(self,parent,dataclass,mapdirs,size)
self.Map.MapPanel.canvas.Bind(wx.EVT_CHAR,self.OnChar)
def OnChar(self,event):
key=event.GetKeyCode()
if key==45: #'-'
self.ZoomOut()
elif key==43: #'+'
self.ZoomIn()
elif key==42: #'*'
self.Map.GetPoints(small=True) #we only update in a smaller region... (searchradius attribute)
elif key==47: #'/'
self.ResetPlot()
elif key==wx.WXK_DELETE:
self.Map.ClearPoints()
elif key==wx.WXK_INSERT:
self.ToggleMode()
elif key==wx.WXK_PAGEDOWN:
self.ToggleNames()
elif key==wx.WXK_PAGEUP:
self.ToggleTextColor()
event.Skip()
def UpdatePoints(self):
self.Map.TestPointUpdate(True) #set the force flag to True
def RegisterPointFunction(self,fct):
self.pointnamefct=fct
def PointNameHandler(self,name):
if self.pointnamefct is not None:
self.pointnamefct(name)
|
SDFIdk/nivprogs
|
MyModules/MLmap.py
|
MLmap.py
|
py
| 8,768 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26581236560
|
from TrelloApi.TrelloConfig import Trello as tconfig
import requests
import datetime
import json
import re
import os
import threading
import xlsxwriter
class OpenFolderError(Exception):
def __str__(self):
return 'Diretรณrio jรก exite'
class GeraRelatorio(object):
def __init__(self):
self.Trello = tconfig()
self.lista_idBoards = self.Trello.idBoards()
self.status_code = False
def function_nameBoards(self, key, token,idBoard):
url = "https://api.trello.com/1/boards/"+str(idBoard)
idBoard = '5c879757f9ec7677ec8dc306'
querystring = {"actions":"all",
"boardStars":"none",
"cards":"none",
"card_pluginData":"false",
"checklists":"none",
"customFields":"false",
"fields":"name",
"lists":"open",
"members":"none",
"memberships":"none",
"membersInvited":"none",
"membersInvited_fields":"all",
"pluginData":"false",
"organization":"false",
"organization_pluginData":"false",
"myPrefs":"false",
"tags":"false",
"key":key,"token":token
}
self.setResponse(requests.request("GET", url, params=querystring))
return self.setName(json.loads(self.getResponse().content.decode('utf-8')))
def function_IDs(self, key, token, idBoard):
url = "https://api.trello.com/1/boards/"+str(idBoard)+"/cards/"
querystring = {'fields':'idList', 'token': token, 'key': key}
self.setResponse(requests.request("GET", url, params=querystring))
return self.setIds(json.loads(self.getResponse().content.decode('utf-8')))
def function_nameCards(self, key, token, idCard):
url = "https://api.trello.com/1/cards/"+str(idCard)+"/name"
querystring = {"key":key, "token":token, "fields":"name"}
self.setResponse(requests.request('GET',url, params=querystring))
self.nameCard = (self.setNameCard(json.loads(self.getResponse().content.decode('utf-8'))))
return self.nameCard
def function_nameList(self, key, token, idList):
url = "https://api.trello.com/1/lists/"+str(idList)
querystring = { 'key' : key , 'token' : token}
self.setResponse(requests.request('PUT', url, params=querystring))
self.nameList = (self.setNameList(json.loads(self.getResponse().content.decode('utf-8'))))
return self.nameList
def function_CommentCard(self, key, token, idCard):
url = "https://api.trello.com/1/cards/"+str(idCard)+"/actions"
querystring = {"key":key,"token":token}
self.setResponse(requests.request("GET", url, params=querystring))
self.commentCard = self.setCommentCard(json.loads(self.getResponse().content.decode('utf-8')))
self.comment_card = self.getCommentCard()
self.arrayComment = []
for self.Comment in (self.comment_card):
self.typeComment = self.Comment['type']
if str(self.typeComment) == 'commentCard':
self.comment_singular_card = (self.Comment['data']['text'])
self.comment_singular_card = re.sub('\\n|\\t| ',', ',self.comment_singular_card)
self.arrayComment.append(self.comment_singular_card)
return self.arrayComment
def function_Description_card(self, key, token, idCard):
url = "https://api.trello.com/1/cards/"+str(idCard)
querystring = {"fields":"desc",
"attachments":"false",
"attachment_fields":"all",
"members":"false",
"membersVoted":"false",
"checkItemStates":"false",
"checklists":"none",
"checklist_fields":"all",
"board":"false","list":"false",
"pluginData":"false",
"stickers":"false",
"sticker_fields":"all",
"customFieldItems":"false",
"key":key,"token":token}
self.setResponse(requests.request("GET", url, params=querystring))
try:
self.description_card = self.setDescritionCard(json.loads(self.getResponse().content.decode('utf-8')))
return self.description_card
except:
self.description_card = 'Sem comentรกrio'
return self.description_card
def function_main(self):
self.pathLocal = os.getcwd()
print('=====================================')
data = datetime.date.today()
self.data = str(data).split('-')
NomeMes = {'01':'Janeiro', '02':'Fevereiro', '03':'Marรงo', '04':'Abril',
'05':'Maio','06':'Junho', '07':'Julho', '08':'Agosto',
'09':'Setembro', '10':'Outubro','11':'Novembro', '12':'Dezembro'}
self.mes = self.data[1]
self.nomeMes = NomeMes['%s'%self.mes]
self.day = (self.data[2])
self.year = self.data[0]
self.nameDir = ('Relatรณrios-%s-%s'%(self.nomeMes, self.year))
try:
self.status_access = (os.access(r'%s\%s'%(self.pathLocal,self.nameDir), os.R_OK))
if self.status_access == False:
self.newDirPerMonth = os.mkdir('%s\%s'%(self.pathLocal,self.nameDir))
print(os.access('%s\%s'%(self.pathLocal,self.nameDir), os.R_OK))
else:
print(os.access('%s\%s'%(self.pathLocal,self.nameDir), os.R_OK))
except OpenFolderError:
print('Diretorio jรก exite')
except FileNotFoundError:
print('except1')
self.newDirPerMonth = os.mkdir('%s\%s'%(self.pathLocal,self.nameDir))
print(os.access(r'%s\%s'%(self.pathLocal,self.nameDir), os.R_OK))
except FileExistsError:
print('except2')
self.newDirPerMonth = os.mkdir('%s\%s'%(self.pathLocal,self.nameDir))
print(os.access(r'%s\%s'%(self.pathLocal,self.nameDir), os.R_OK))
self.token = self.Trello.token
self.key = self.Trello.key
try:
print('%s/%s/%s'%(self.day,self.nomeMes,self.year))
# self.arquivo = xlsxwriter.Workbook('Relatรณrio-%s-%s-%s.xlsx'%(self.day, self.mes, self.year))
# self.arquivo = self.arquivo.add_worksheet()
self.arquivo = open('%s\%s\Relatรณrio-%s-%s-%s.xlsx'%(self.pathLocal,self.nameDir,self.day, self.mes, self.year),'a+')
self.arquivo.write('Nome do Board;Nome da Lista;Nome do card;Descriรงรฃo;Comentรกrios')
for num_board in self.lista_idBoards:
self.singular_ids = self.lista_idBoards[num_board]
self.name_board = self.function_nameBoards(self.key, self.token, self.singular_ids)
self.name_board = self.getName()
self.ids_card_list = self.function_IDs(self.key,self.token,self.singular_ids)
self.ids_card_list = self.getIds()
for i in range(len(self.ids_card_list)):
self.id_card = self.ids_card_list[i]['id']
self.id_list = self.ids_card_list[i]['idList']
self.name_card = self.function_nameCards(self.key, self.token, self.id_card)
self.name_card = self.getNameCard()
self.name_list = self.function_nameList(self.key, self.token, self.id_list)
self.name_list = self.getNameList()
self.description_in_card = self.function_Description_card(self.key, self.token, self.id_card)
self.description_in_card = self.getDescritionCard()
self.comment_card = self.function_CommentCard(self.key, self.token, self.id_card)
self.comment_card = re.sub("[|]|'|",'',str(self.comment_card))
self.replaced_comment_card = ("'"+str(self.comment_card)+"'")
self.replaced_comment_card = self.replaced_comment_card.replace("'[",'').replace("]'", '')
self.conc = ('%s ; %s ; %s ; %s ; %s \n'%(self.name_board,self.name_list, self.name_card, self.description_in_card, str(self.replaced_comment_card)))
self.conc = re.sub('[|]','',self.conc)
try:
print(self.conc)
self.arquivo.write(self.conc)
except UnicodeEncodeError:
pass
except KeyboardInterrupt:
self.arquivo.close()
return 'Fim da execussรฃo'
self.arquivo.close()
return 'Fim da execussรฃo'
def getStatus_code(self):
return self.status_code
def setStatus_code(self, status_code):
self.status_code = status_code
def getDescritionCard(self):
self.desc_card = self.desc_card['desc']
self.desc_card = self.desc_card.replace('\n', '')
return self.desc_card
def setDescritionCard(self, desc_card):
self.desc_card = desc_card
def getCommentCard(self):
return self.com_Card
def setCommentCard(self, commentCard):
self.com_Card = commentCard
def getNameList(self):
return self.NameList['name']
def setNameList(self, NameList):
self.NameList = NameList
def getIds(self):
return self.__idlist
def setIds(self, idlist):
self.__idlist = idlist
def getNameCard(self):
return str(self.nameCards['_value'])
def setNameCard(self, nameCard):
self.nameCards = nameCard
def getResponse(self):
return self.__response
def setResponse(self, response):
self.__response = response
def getName(self):
return self.__nome['name']
def setName(self, nome):
self.__nome = nome
|
LeandroGelain/PersonalGit
|
2018-2019/Programas executaveis/tkinterApp_arquivosSemExe/TrelloApi/GeraRelatรณrio.py
|
GeraRelatรณrio.py
|
py
| 10,255 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25012412373
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pytorch-dl
Created by raj at 7:48 AM, 7/31/20
"""
import os
import time
import torch
from dataset.iwslt_data import rebatch_source_only
from models.decoding import batched_beam_search
from models.utils.model_utils import load_model_state
from onmt import opts, inputters
from onmt.utils import set_random_seed
from onmt.utils.parse import ArgumentParser
def translate(opt):
set_random_seed(opt.seed, False)
start_steps, model, fields = load_model_state(os.path.join(opt.models[0], 'checkpoints_best.pt'), opt,
data_parallel=False)
model.eval()
src_vocab = fields['src'].base_field.vocab
trg_vocab = fields['tgt'].base_field.vocab
pad_idx = src_vocab.stoi["<blank>"]
unk_idx = src_vocab.stoi["<unk>"]
start_symbol = trg_vocab.stoi["<s>"]
if start_symbol == unk_idx:
if opt.tgt_lang_id:
start_symbol = trg_vocab.stoi["<" + opt.tgt_lang_id + ">"]
else:
raise AssertionError("For mBart fine-tuned model, --tgt_lang_id is necessary to set. eg DE EN etc.")
with open(opt.src) as input:
src = input.readlines()
src_reader = inputters.str2reader['text'].from_opt(opt)
src_data = {"reader": src_reader, "data": src, "dir": ''}
_readers, _data, _dir = inputters.Dataset.config(
[('src', src_data)])
# corpus_id field is useless here
if fields.get("corpus_id", None) is not None:
fields.pop('corpus_id')
data = inputters.Dataset(fields, readers=_readers, dirs=_dir, data=_data, sort_key=inputters.str2sortkey['text'])
data_iter = inputters.OrderedIterator(
dataset=data,
batch_size=1,
train=False,
sort=False,
sort_within_batch=True,
shuffle=False
)
cuda_condition = torch.cuda.is_available() and not opt.cpu
device = torch.device("cuda:0" if cuda_condition else "cpu")
if cuda_condition:
model.cuda()
with torch.no_grad():
translated = list()
reference = list()
start = time.time()
for k, batch in enumerate(rebatch_source_only(pad_idx, b, device=device) for b in data_iter):
print('Processing: {0}'.format(k))
# out = greedy_decode(model, batch.src, batch.src_mask, start_symbol=start_symbol)
# out = beam_search(model, batch.src, batch.src_mask,
# start_symbol=start_symbol, pad_symbol=pad_idx,
# max=batch.ntokens + 10)
out = batched_beam_search(model, batch.src, batch.src_mask,
start_symbol=start_symbol, pad_symbol=pad_idx,
max=batch.ntokens + 10)
# print("Source:", end="\t")
# for i in range(1, batch.src.size(1)):
# sym = SRC.vocab.itos[batch.src.data[0, i]]
# if sym == "<eos>": break
# print(sym, end=" ")
# print()
# print("Translation:", end="\t")
transl = list()
start_idx = 0 # for greedy decoding the start index should be 1 that will exclude the <sos> symbol
for i in range(start_idx, out.size(1)):
sym = trg_vocab.itos[out[0, i]]
if sym == "</s>": break
transl.append(sym)
text_transl = " ".join(transl).replace("@@ ", '')
translated.append(text_transl)
print(text_transl)
# print()
# print("Target:", end="\t")
# ref = list()
# for i in range(1, batch.trg.size(1)):
# sym = trg_vocab.itos[batch.trg.data[0, i]]
# if sym == "</s>": break
# ref.append(sym)
# reference.append(" ".join(ref))
# if k == 1:
# break
with open('test-beam-decode.de-en.en', 'w', encoding='utf8') as outfile:
outfile.write('\n'.join(translated))
# with open('valid-ref.de-en.en', 'w', encoding='utf-8') as outfile:
# outfile.write('\n'.join(reference))
print('Time elapsed:{}'.format(time.time() - start))
def _get_parser():
parser = ArgumentParser(description='translate.py')
opts.config_opts(parser)
opts.translate_opts(parser)
return parser
def main():
parser = _get_parser()
opt = parser.parse_args()
translate(opt)
if __name__ == "__main__":
main()
|
patelrajnath/pytorch-dl
|
translate.py
|
translate.py
|
py
| 4,521 |
python
|
en
|
code
| 10 |
github-code
|
6
|
72067319869
|
import numpy as np
import cv2
def compute_perspective_transform(corner_points,width,height,image):
""" Compute the transformation matrix
@ corner_points : 4 corner points selected from the image
@ height, width : size of the image
"""
# Create an array out of the 4 corner points
corner_points_array = np.float32(corner_points)
# Create an array with the parameters (the dimensions) required to build the matrix
img_params = np.float32([[0,0],[width,0],[0,height],[width,height]])
# Compute and return the transformation matrix
matrix = cv2.getPerspectiveTransform(corner_points_array,img_params)
img_transformed = cv2.warpPerspective(image,matrix,(width,height))
return matrix,img_transformed
def compute_point_perspective_transformation(matrix,list_downoids):
""" Apply the perspective transformation to every ground point which have been detected on the main frame.
@ matrix : the 3x3 matrix
@ list_downoids : list that contains the points to transform
return : list containing all the new points
"""
# Compute the new coordinates of our points
list_points_to_detect = np.float32(list_downoids).reshape(-1, 1, 2)
transformed_points = cv2.perspectiveTransform(list_points_to_detect, matrix)
# Loop over the points and add them to the list that will be returned
transformed_points_list = list()
for i in range(0,transformed_points.shape[0]):
transformed_points_list.append([transformed_points[i][0][0],transformed_points[i][0][1]])
return transformed_points_list
|
basileroth75/covid-social-distancing-detection
|
src/bird_view_transfo_functions.py
|
bird_view_transfo_functions.py
|
py
| 1,517 |
python
|
en
|
code
| 123 |
github-code
|
6
|
24683471152
|
class Node:
def __init__(self, name):
self.name = name
self.routing_table = {} # {destination: (next_hop, cost)}
def update_routing_table(self, destination, next_hop, cost):
if destination not in self.routing_table or cost < self.routing_table[destination][1]:
self.routing_table[destination] = (next_hop, cost)
class Network:
def __init__(self):
self.nodes = {}
def add_node(self, node):
self.nodes[node.name] = node
def update_distance_vector_routing(self, source, destination, cost):
for node_name, node in self.nodes.items():
if node_name != source:
if node_name != destination:
if destination in node.routing_table:
existing_cost = node.routing_table[destination][1]
if source not in node.routing_table or cost + existing_cost < node.routing_table[source][1]:
node.update_routing_table(source, source, cost + existing_cost)
else:
if source not in node.routing_table:
node.update_routing_table(source, source, cost)
def print_routing_tables(self):
for node_name, node in self.nodes.items():
print(f"Routing table for {node_name}:")
print("Destination\tNext Hop\tCost")
for destination, (next_hop, cost) in node.routing_table.items():
print(f"{destination}\t\t{next_hop}\t\t{cost}")
print("\n")
def main():
A = Node('A')
B = Node('B')
C = Node('C')
D = Node('D')
network = Network()
network.add_node(A)
network.add_node(B)
network.add_node(C)
network.add_node(D)
A.update_routing_table('A', 'A', 0)
B.update_routing_table('B', 'B', 0)
C.update_routing_table('C', 'C', 0)
D.update_routing_table('D', 'D', 0)
network.update_distance_vector_routing('A', 'B', 1)
network.update_distance_vector_routing('A', 'C', 2)
network.update_distance_vector_routing('B', 'C', 3)
network.update_distance_vector_routing('C', 'D', 1)
network.print_routing_tables()
if __name__ == "__main__":
main()
|
ShrutikaM25/CNSL
|
UDP/udp.py
|
udp.py
|
py
| 2,223 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75226771708
|
#............ Calculates average return for every time interval for every stock and store in the DB
import pymongo
import datetime
import numpy as np
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
historical_col = myclient["core"]["historical_data"]
time_heat_map = myclient["core"]["analytics"]["time_heat"]
functional_data_col =myclient["core"]["functional"]
# time_heat_map.delete_many({})
#functional_data_col.delete_many({})
intervals = ['minute', 'day', '3minute', '5minute', '10minute', '15minute', '30minute', '60minute']
def create_time_heat_map(hist_coll):
max_count_per_interval = {'minute': 0, 'day': 0, '3minute': 0, '5minute': 0, '10minute': 0, '15minute': 0, '30minute': 0, '60minute':0}
for instruments in hist_coll.find({},{"_id":0}):
heat_map_dict = {}
heat_map_dict["tradingsymbol"] = instruments["tradingsymbol"]
heat_map_dict["name"] = instruments["name"]
heat_map_dict["instrument_token"] = instruments["instrument_token"]
for interval in intervals:
unique_intervals = {}
for unit_intervals in instruments[interval]:
#print(unit_intervals)
ist_unit_intervals = convert_to_ist(unit_intervals['date'].time())
open_price = unit_intervals['open']
close_price = unit_intervals['close']
interval_returns = calc_interval_returns(open_price,close_price)
#print(interval_returns)
if ist_unit_intervals not in unique_intervals:
unique_intervals[ist_unit_intervals] = [interval_returns]
else:
unique_intervals[ist_unit_intervals].append(interval_returns)
for intervals_keys in unique_intervals.keys():
# print('Processing: instrument- ', instruments["tradingsymbol"], ' interval- ', interval, ' interval unit- ', intervals_keys)
interval_keys_dict = {}
interval_keys_dict['average_return'] = average_from_list(unique_intervals[intervals_keys])
interval_keys_dict['count'] = np.size(unique_intervals[intervals_keys])
if max_count_per_interval[interval] < np.size(unique_intervals[intervals_keys]):
max_count_per_interval[interval] = np.size(unique_intervals[intervals_keys])
print(max_count_per_interval,interval,np.size(unique_intervals[intervals_keys]))
unique_intervals[intervals_keys] = interval_keys_dict
# heat_map_dict[interval] = unique_intervals
time_heat_map.update_one({"instrument_token":instruments["instrument_token"]},{"$set":{interval:unique_intervals}})
#print(heat_map_dict)
# time_heat_map.insert_one(heat_map_dict)
# functional_data = {}
# functional_data['description'] = 'Max count per interval'
# functional_data['variable'] = 'max_count_per_interval'
# functional_data['values'] = max_count_per_interval
functional_data_col.update_one({"variable":"max_count_per_interval"},{"$set":{"values":max_count_per_interval}})
def calc_interval_returns(open_price, close_price):
if open_price == 0:
return 0
else:
return (close_price-open_price)/open_price
def convert_to_ist(gmt_time):
ist_hour = 0
ist_min = 0
hour = gmt_time.hour
min = gmt_time.minute
if int((min+30)/60) == 0:
ist_min = min+30
if int((hour+5)/23) == 0:
ist_hour = hour+5
else:
ist_hour = (hour+5)%24
else:
ist_min = (min+30)%60
if int((hour+6)/23) == 0:
ist_hour = hour+6
else:
ist_hour = (hour+6)%24
#print(gmt_time, datetime.time(ist_hour,ist_min))
return datetime.time(ist_hour,ist_min).strftime('%H:%M')
def average_from_list(returns_list):
#print(np.sum(returns_list),np.size(returns_list))
if np.size(returns_list) == 0:
return 0.0
else:
return np.sum(returns_list)/np.size(returns_list)
create_time_heat_map(historical_col)
|
prashanth470/trading
|
source/analysis/time_heat_map.py
|
time_heat_map.py
|
py
| 4,241 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19646311537
|
ohm = []
while True:
a = int(input())
ohm.append(a)
if a == 0:
break
plus =0
min = 0
if ohm[0]==0:
print("เนเธกเนเธกเธตเธเนเธญเธกเธนเธฅ")
else:
for x in range(len(ohm)):
if ohm[x] > 0:
plus+=1
elif ohm[x] <0:
min+=1
print("เธเธณเธเธงเธเธเธฑเธงเนเธฅเธเธเธตเนเธกเธตเธเนเธฒเนเธเนเธเธเธงเธ",plus)
print("เธเธณเธเธงเธเธเธฑเธงเนเธฅเธเธเธตเนเธกเธตเธเนเธฒเนเธเนเธเธฅเธ",min)
|
KanapongAiamtip/DIP
|
Lab Basic Python/P2Q4.py
|
P2Q4.py
|
py
| 483 |
python
|
th
|
code
| 0 |
github-code
|
6
|
17702310414
|
import os
import shutil
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
# Construct the path to the download folder
download_folder = os.path.join(os.path.expanduser('~'), 'Downloads')
class FileSorter(FileSystemEventHandler):
def on_created(self, event):
temp_file_paths = [
os.path.join(download_folder, f)
for f in os.listdir(download_folder)
if f.endswith(('.tmp', '.crdownload'))
]
# Wait until the temp files are no longer present
while any(os.path.exists(p) for p in temp_file_paths):
time.sleep(1)
# Sort the files in the download folder
files = [
f
for f in os.listdir(download_folder)
if not f.endswith(('.tmp', '.crdownload')) and os.path.getsize(os.path.join(download_folder, f)) > 1_000
]
for file in files:
file_name, file_ext = os.path.splitext(file)
dest_folder = os.path.join(download_folder, file_ext[1:])
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
src_file = os.path.join(download_folder, file)
dest_file = os.path.join(dest_folder, file)
shutil.move(src_file, dest_file)
# Create the file system event handler
event_handler = FileSorter()
# Create the observer
observer = Observer()
# Set the observer to watch the download folder
observer.schedule(event_handler, download_folder, recursive=True)
# Start the observer
observer.start()
# Run the observer indefinitely
try:
while True:
# Sort the files every 10 seconds
time.sleep(10)
event_handler.on_created(None)
except KeyboardInterrupt:
observer.stop()
# Join the observer thread
observer.join()
|
phelannathan42/Download-Librarian
|
DLIBV0.04WATCHDOG.py
|
DLIBV0.04WATCHDOG.py
|
py
| 1,885 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70766866107
|
from typing import Union, Tuple, List, Sequence
from .base import BasePayload
class FlowPayload(BasePayload):
""" """
def payloads(self) -> Union[Tuple, List]:
return findall_subpayload([self.__args__, self.__kwargs__])
def __make__(self, *args, **kwargs):
raise NotImplementedError
def findall_subpayload(
arg: Sequence
) -> List[Union[List[FlowPayload], List[List], FlowPayload]]:
""" ่ฟญไปฃๆ็ดข่ฏทๆฑ็payloadใ"""
def search_array(o) -> None:
""" ๆ็ดข list, tuple, set่ฟญไปฃๅฏน่ฑกใ"""
for v in o:
if isinstance(v, FlowPayload):
payloads.append(v)
else:
goto_search(v)
def search_dict(o) -> None:
""" ๆ็ดขๅญๅ
ธใ"""
for k, v in o.items():
if isinstance(k, FlowPayload):
payloads.append(k)
else:
goto_search(k)
if isinstance(v, FlowPayload):
payloads.append(v)
else:
goto_search(v)
def goto_search(o) -> None:
""" ่ฟญไปฃๆ็ดขใๆณจๆๅจไบคๅๅตๅฅ็ๆ
ๅตไธไผๅบ็ฐๆ ้่ฟญไปฃ็้ฎ้ขใ
ไฝไบๅฎไธpayload้ๅธธไธๅญๅจไบคๅๅตๅฅ็ๆ
ๅตใ
"""
if isinstance(o, (list, tuple, set)):
search_array(o)
elif isinstance(o, dict):
search_dict(o)
elif isinstance(o, FlowPayload):
payloads.append(o)
payloads = []
goto_search(arg)
return payloads
|
ZSAIm/VideoCrawlerEngine
|
helper/payload/flow.py
|
flow.py
|
py
| 1,523 |
python
|
en
|
code
| 420 |
github-code
|
6
|
24905743163
|
import cadquery as cq
import logging
from types import SimpleNamespace as Measures
log = logging.getLogger(__name__)
# A parametric mount for stepper motors shaped as an L-bracket.
class MotorMountL:
def __init__(self, workplane, measures):
"""
A parametric stepper motor mount in the shape of an L bracket.
This is an adaptation of Eddie Liberato's design, as published at:
https://eddieliberato.github.io/blog/2020-08-01-stepper-motor-bracket/
:param workplane: The CadQuery workplane to create the chute on.
:param measures: The measures to use for the parameters of this design. Expects a nested
[SimpleNamespace](https://docs.python.org/3/library/types.html#types.SimpleNamespace)
object, which may have the following attributes:
- **``shell_thickness``:** Shell thickness of the tube element.
"""
# todo
self.model = workplane
self.debug = False
self.measures = measures
self.build()
def build(self):
m = self.measures
self.model = (
cq.Workplane("front")
.box(m.width, m.fplate_thickness, m.fplate_height + m.bplate_thickness)
.faces(">Y")
.workplane()
.move(0, m.bplate_thickness / 2)
.rect(m.fplate_between_holes, m.fplate_between_holes, forConstruction = True)
.vertices()
.cboreHole(m.fplate_screw_clearance, m.fplate_cbore_diameter, m.fplate_cbore_depth)
.faces("<Y")
.workplane()
.move(0, m.bplate_thickness / 2)
.cboreHole(m.main_bore_diameter, m.main_cbore_diameter, m.main_cbore_depth)
.faces("<Y")
.workplane(centerOption = 'CenterOfBoundBox')
.move(0, -m.fplate_height / 2)
.rect(m.width, m.bplate_thickness)
.extrude(m.bplate_length)
.faces("<Z[1]")
.workplane()
.move(0, m.bplate_holes_offset)
.rect(m.bplate_between_holes, m.bplate_between_holes, forConstruction = True)
.vertices()
.cboreHole(m.bplate_screw_clearance, m.bplate_cbore_diameter, m.bplate_cbore_depth)
)
if m.gusset:
self.model = (
self.model
.faces(">X")
.workplane(centerOption = 'CenterOfBoundBox')
.move(0, -(m.fplate_height + m.bplate_thickness) / 2)
.line((m.bplate_length + m.fplate_thickness) / 2, 0)
.line(0, m.fplate_height)
.close()
.extrude(-m.gusset_thickness)
.faces("<X")
.workplane(centerOption = 'CenterOfBoundBox')
.move(0, -(m.fplate_height + m.bplate_thickness) / 2)
.line(-(m.bplate_length + m.fplate_thickness) / 2, 0)
.line(0, m.fplate_height)
.close()
.extrude(-m.gusset_thickness)
)
def part(self, part_class, measures):
"""CadQuery plugin that provides a factory method for custom parts"""
part = part_class(self, measures) # Dynamic instantiation from the type contained in part_class.
return self.newObject(
part.model.objects
)
# =============================================================================
# Measures and Part Creation
# =============================================================================
cq.Workplane.part = part
measures = Measures(
width = 66.0,
fplate_height = 60.0,
fplate_thickness = 10.0,
# rectangular distance between stepper mounting holes (NEMA 23 = 47.1)
fplate_between_holes = 47.1,
fplate_screw_clearance = 5.0,
fplate_cbore_diameter = 7.5,
fplate_cbore_depth = 4.0,
main_bore_diameter = 28.2,
main_cbore_diameter = 40.0,
main_cbore_depth = 2.0,
bplate_length = 86.0,
bplate_thickness = 4.0,
bplate_between_holes = 50.0, # holes to mount it to the frame
bplate_holes_offset = 5.0,
bplate_screw_clearance = 5.0,
bplate_cbore_diameter = 7.5,
bplate_cbore_depth = 2.0,
gusset_thickness = 3.0,
gusset = True
)
show_options = {"color": "lightgray", "alpha": 0}
motor_mount = cq.Workplane("XY").part(MotorMountL, measures)
show_object(motor_mount, name = "motor_mount", options = show_options)
|
tanius/cadquery-models
|
motormount/motor_mount_l.py
|
motor_mount_l.py
|
py
| 4,389 |
python
|
en
|
code
| 11 |
github-code
|
6
|
15548564668
|
import sys
import re
from typing import Dict, Union, List
def get_symb_value(symb: Dict[str, str], context) -> (Union[str, int, bool], str):
"""
Get value and type of symbol.
:param symb: XML argument
:param context: Interpret class
:return: Tuple of value and type
"""
if symb['type'] == 'var':
var: List[str] = symb['value'].strip().split('@')
var_data: Dict[str, str] = get_var_value(var, context)
return var_data['value'], var_data['type']
elif symb['type'] == 'int':
val: int = 0
try:
val: int = int(symb['value'])
except ValueError:
exit_with_code(32, "Error: Wrong type of value.")
return val, 'int'
elif symb['type'] == 'bool':
if symb['value'] == 'true':
return True, 'bool'
elif symb['value'] == 'false':
return False, 'bool'
elif symb['type'] == 'string':
if symb['value'] is None:
return '', 'string'
string: str = symb['value'].strip().replace('\n', '')
string: str = remove_escape_seq(string)
return string, 'string'
elif symb['type'] == 'nil':
return 'nil', 'nil'
def store_val_to_var(var: List[str], val: Union[int, str, bool], val_type: str, context) -> None:
"""
Store value to variable.
:param var: Variable frame and name where to store the value
:param val: Value to store
:param val_type: Type of value
:param context: Interpret class
:return: None
"""
err: bool = True
if var[0] == 'GF':
if var[1] in context.global_frame.keys():
context.global_frame[var[1]] = {'type': val_type, 'value': val}
return
elif var[0] == 'LF':
if len(context.local_frame) == 0:
exit_with_code(55, "Error: No local frame.")
if var[1] in context.local_frame[-1].keys():
context.local_frame[-1][var[1]] = {'type': val_type, 'value': val}
return
elif var[0] == 'TF':
if context.tmp_frame is None:
exit_with_code(55, "Error: No temporary frame.")
if var[1] in context.tmp_frame.keys():
context.tmp_frame[var[1]] = {'type': val_type, 'value': val}
return
else:
exit_with_code(52, "Error: Wrong variable type.")
if err:
exit_with_code(54, "Error: Variable doesn't exist.")
def get_var_value(var: List[str], context) -> Dict[str, str]:
"""
Get value of variable.
:param var: Variable frame and name
:param context: Interpret class
:return: Value of variable
"""
val: None = None
if var[0] == 'GF':
val: Dict[str, str] = context.global_frame.get(var[1])
elif var[0] == 'LF':
if len(context.local_frame) == 0:
exit_with_code(55, "Error: No local frame.")
val: Dict[str, str] = context.local_frame[-1].get(var[1])
elif var[0] == 'TF':
if context.tmp_frame is None:
exit_with_code(55, "Error: No temporary frame.")
val: Dict[str, str] = context.tmp_frame.get(var[1])
else:
exit_with_code(52, "Error: Wrong variable type.")
if val is None:
exit_with_code(54, "Error: Variable doesn't exist.")
return val
def exit_with_code(code: int, text: str) -> None:
"""
Exit with error code and print error message.
:param code: Int value of error code
:param text: Error message
:return: None
"""
print(text, file=sys.stderr)
sys.exit(code)
def remove_escape_seq(string: str) -> str:
"""
Replace escape sequences with characters.
:param string: String with escape sequences
:return: String with replaced escape sequences
"""
if len(string) != 0:
string: str = re.sub(r'\\(\d{3})', lambda match: chr(int(match.group(1))), string)
return string
def check_arguments(args: Dict[str, Dict[str, str]], num_of_args: int) -> None:
"""
Check if operation has correct number of arguments.
:param args: List of arguments
:param num_of_args: Number of operation arguments
:return: None
"""
if len(args)-1 != num_of_args:
exit_with_code(32, "Error: Wrong number of arguments.")
arg_cnt: int = 1
for arg in range(1, num_of_args+1):
if f"arg{arg_cnt}" not in args.keys():
exit_with_code(32, "Error: Wrong argument name.")
arg_cnt += 1
|
lukasvecerka23/ipp-hw
|
lib/utils.py
|
utils.py
|
py
| 4,409 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27407521058
|
from livereload import Server, shell
from pathlib import Path
import sys
cur_dir = Path(__file__).parent
server = Server()
if "no" not in sys.argv:
exts = ("rst", "py", "jinja2")
print(f"Watching file changes {exts}")
cmd = shell("make html", cwd=str(cur_dir))
for ext in exts:
# nested or
server.watch(str(cur_dir / f"**.{ext}"), cmd)
# top level
server.watch(str(cur_dir / f"**/*.{ext}"), cmd)
server.serve(root=str(cur_dir / "_build" / "html"))
|
sudojarvis/xonsh
|
docs/serve_docs.py
|
serve_docs.py
|
py
| 499 |
python
|
en
|
code
| null |
github-code
|
6
|
44166444397
|
# Yusuf Nadir Cavus
# February 26, 2023
import socket
import threading
PORT = 8080 # assumed port number
HOST = 'localhost' # assumed host
HTML_FILE = "index.html" # assumed http file/webpage
IMAGE_FILE = "image.jpg" # assumed image file
BUF_SIZE = 1024 # max size for the request
# func: requestHandler
# parameters: c_socket : Any. this is a socket object, more specificcally the client socket
# This function recieves the request from the passed socket
# Then splits the request message to get the request method and what is requested
# if the method is 'GET', composes a response depending on the requested file (http file or image or neither)
def requestHandler(c_socket):
req_sentence = c_socket.recv(BUF_SIZE).decode() # receive therequest message
print(req_sentence)
req_method = req_sentence.split(' ')[0] # method
req_file = req_sentence.split(' ')[1] # file
if req_method == "GET":
if req_file == "/" + HTML_FILE:
# if the client's request is GET /index.html(HTML_FILE)
with open(HTML_FILE, "r") as f:
data = f.read()
# HTTP response header
response = "HTTP/1.0 200 OK\r\n" # status code
response += "Content-Type: text/html\r\n" # content type
response += "Content-Length: " + str(len(data)) + "\r\n" # content length
response += "\r\n" # indicating the end of the response header
response += data # this is the html, added to response after the header
c_socket.sendall(response.encode()) # send the response back
elif req_file == "/" + IMAGE_FILE:
# if the client's request is GET /image.jpg(IMAGE_FILE)
with open(IMAGE_FILE, "rb") as f: # the mode specifier is 'rb' instead of 'r', becasue the file should be treated as binary
data = f.read() # otherwise, we get the error "UnicodeDecodeError: 'utf-8' codec can't decode"
# HTTP response header
response = "HTTP/1.0 200 OK\r\n" # status code
response += "Content-Type: image/jpeg\r\n" # content type
response += "Content-Length: " + str(len(data)) + "\r\n" # content length
response += "\r\n" # indicating the end of the response header
c_socket.sendall(response.encode() + data) # send the response back
elif req_file == "/page1.html":
# if the client's request is GET /page1.jpg
response = "HTTP/1.0 301 Moved Permanently\r\n" # status code
response += "Content-Type: text/plain\r\n" # content type
response += "Location: /page2.html\r\n" # Location, speciifes where the site should be redirected to
response += "\r\n" # indicating the end of the response header
c_socket.sendall(response.encode() + data) # send the response back
else:
data = "404 Not Found"
response = "HTTP/1.0 404 Not Found\r\n" # status code
response += "Content-Type: text/plain\r\n" # content type
response += "Content-Length: {}\r\n".format(len(data)) # content length
response += "\r\n" # indicating the end of the response header
response += data
c_socket.sendall(response.encode()) # send the response back
else:
c_socket.close()
# main function
# creates a socket object and binds it to localhost:8080
# main thread listens to the port 1
# for each request, a new thread is created and started
def main():
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create a socket object
soc.bind((HOST, PORT)) # bind it to localhost:8080
soc.listen(1)
print('The server is ready to receive\n')
while True:
connectionSocket = soc.accept()[0] # accept() returns a tuple[socket, address]. we only need the socket
thread = threading.Thread(target = requestHandler, args = (connectionSocket,)) # the requestHandler function is called on a new thread
# "(connectionSocket,)" the reason there is a comma after connecctionSocket here is to make it
# interpreted as a tuple with a single element instead of a variable, which is what the args = acceepts
thread.start()
if __name__ == '__main__':
main()
|
ysfndr/Multi-thred-Web-Server
|
webServer.py
|
webServer.py
|
py
| 4,527 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71077185467
|
import Gmail_API_Lib
import Track_API_Lib
import Slack_API_Lib
import importlib
import json
import csv
import lovely_logger as log
import datetime
import time
late_checkin_alert_hour = 21
unclean_property_alert_hour = 14
regular_check_interval_minutes = 15
check_checkin_interval_minutes = 15
reload = 1#dummy variable to make the library re-save
#All times are in local time (EST)
late_checkins_time = datetime.datetime.now() - datetime.timedelta(days = 1) #Init
alert_checkins_time = datetime.datetime.now() - datetime.timedelta(days = 1) #Init
check_for_cleans_time = datetime.datetime.now() - datetime.timedelta(days = 1) #Init
set_cleans_time = datetime.datetime.now() - datetime.timedelta(days = 1) #Init
regular_interval_check_time = datetime.datetime.now() - datetime.timedelta(days = 1) #Init
check_checkin_time = datetime.datetime.now() - datetime.timedelta(days = 1) #Init
last_email_subject_read_file_cleaner = 'C:\\Users\\Bailey\\Documents\\Cozi\\Automations\\Track Automations\\email subject logs\\Last_Email_Read_Cleaner.txt'
last_email_subject_read_file_UMC = 'C:\\Users\\Bailey\\Documents\\Cozi\\Automations\\Track Automations\\email subject logs\\Last_Email_Read_UMC.txt' #universal Master Code
log.init('C:\\Users\\Bailey\\Documents\\Cozi\\Automations\\Track Automations\\Daily_Checks_Log')
try:
while (1):
today = datetime.datetime.now()
current_hour = today.hour
if (check_checkin_time + datetime.timedelta(minutes = check_checkin_interval_minutes) < today): #Updates todays reservations every 15 minutes.
log.info("Updating todays checkins")
todays_checkins = Track_API_Lib.get_todays_arrivals_units_and_names()
check_checkin_time = today
#General checks at regular intervals
if (regular_interval_check_time + datetime.timedelta(minutes = regular_check_interval_minutes)) < today: #Check every hour for Universal Master Code
log.info('Getting messages from Gmail')
msg_info = Gmail_API_Lib.get_gmail_subjects_and_dates() #E-mail subjects and dates
log.info('Got messages from Gmail')
#Alert for Master Code usage
log.info('Starting UMC Check')
UMC_check = Gmail_API_Lib.check_universal_master_code(msg_info) #Checks to see if the universal master code. Sends a Slack notification if so.
log.info('Completed UMC Check')
log.info('Starting New Checkins check')
new_checkins = Gmail_API_Lib.check_for_checkins(msg_info, todays_checkins) #Already strips non-PC properties and notifies CS team in Slack
if (len(new_checkins) > 0):
new_alerts = Gmail_API_Lib.alert_checkin()
Track_API_Lib.note_checkins(new_checkins)
regular_interval_check_time = today
if (current_hour == 13 or current_hour == late_checkin_alert_hour): #check for late checkins at 12pm and 8pm CST (Check twice to ensure there aren't more than 500 messages in inbox)
if ((late_checkins_time + datetime.timedelta(hours = 1)) <= today):
log.info('Checking for late checkins')
msg_info = Gmail_API_Lib.get_gmail_subjects_and_dates() #E-mail subjects and dates
log.info('Got messages from Gmail')
log.info('Getting todays checkins')
todays_checkins = Track_API_Lib.get_todays_arrivals_units_and_names()
log.info('Processing missing checkins')
missing_checkins = Gmail_API_Lib.check_for_checkins(msg_info, todays_checkins) #Already strips non-PC properties
log.info('Processing missing checkins')
late_checkins_time = today #subtract an hour to ensure the execution time doesn't keep creeping up over time.
if (current_hour == late_checkin_alert_hour and missing_checkins != None): #Alert for late checkins at 8pm CST. MUST BE SAME HOUR AS IF STATEMENT ABOVE OR THIS WONT TRIGGER
if ((alert_checkins_time + datetime.timedelta(hours = 1)) <= today):
log.info('Alerting for late checkins')
late_checkins = Gmail_API_Lib.report_late_checkins()
log.info('Sending Slack notifications')
Slack_API_Lib.send_guest_late_checkin_alert(late_checkins)
log.info('Posting notes to reservations in Track')
Track_API_Lib.note_late_checkins(late_checkins)
alert_checkins_time = today
log.info('Completed late checkins')
if ((current_hour >= 7 and current_hour <= 21) or current_hour == 3): #Check between 7am EST and 8pm EST and again at 3am EST
if ((check_for_cleans_time + datetime.timedelta(hours = 1)) <= today): #Checks every hour. Need to keep file updated with properties that have PC locks
log.info('Checking for cleaned properties')
#Set cleaned property statuses in Track
msg_info = Gmail_API_Lib.get_gmail_subjects_and_dates() #E-mail subjects and dates
log.info('Got messages from Gmail')
cleaned_units = Gmail_API_Lib.check_for_cleaners(msg_info) #Need to ensure Point Central has people properly labeled
inspected_units = Gmail_API_Lib.check_for_inspectors(msg_info) #Figure out what to do with Inspected Units
ready_units = Track_API_Lib.add_clean_and_inspected(cleaned_units, inspected_units)
log.info("Updating clean properties")
if (ready_units != None):
res = Track_API_Lib.set_unit_clean_status(ready_units, 1) #Sets units to clean. 1 sets status to clean
log.info("Updating clean combo properties")
res = Track_API_Lib.set_combo_properties_clean_status() #Sets combo properties to clean. Need to manually keep this list up to date. Is it necessary?
log.info('Set unit statuses')
check_for_cleans_time = today
if (current_hour == unclean_property_alert_hour): #Check at ~3pm EST (2pm CST) and alert
if ((set_cleans_time + datetime.timedelta(hours = 1)) < today):
#Alert for non-clean units
log.info('Checking for unclean properties to alert')
msg_info = Gmail_API_Lib.get_gmail_subjects_and_dates() #E-mail subjects and dates
log.info('Got messages from Gmail')
todays_checkins = Track_API_Lib.get_todays_arrivals_units_and_names()
check_for_clean = Gmail_API_Lib.remove_non_PC_properties(todays_checkins) #Removes non PC Properties from the clean check
unclean_units = Track_API_Lib.check_unclean_units(check_for_clean) #Need to cross reference the unit name as well
#Handle combo units based on what Track says
log.info('Sending Slack alerts if any')
for unit in unclean_units:
last_access = Gmail_API_Lib.last_cleaner(msg_info, unit['unit_name'])
res = Slack_API_Lib.send_slack_message('automated-alerts',"UNCLEAN CHECKIN POSSIBLE! " + last_access)
set_cleans_time = today
time.sleep(60)
except Exception as e:
Slack_API_Lib.send_slack_message("automation-errors", "Error with the Daily Checks code. Need to restart")
print(e)
#Check Track for unit clean status, and set to Clean if a claner has been there. (For combo's, both units must be Clean, then Combo can be Clean)
#Check email subjects for owners, then verify it is used during an owner stay. If not...? How about if the unit is blocked? Still notify?
|
mammalwithashell/scott-heyman-gcp-functions
|
Daily_Checks_v1.0.py
|
Daily_Checks_v1.0.py
|
py
| 7,843 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35032853414
|
import pdb
from models.merchant import Merchant
from models.transaction import Transaction
from models.user import User
from models.category import Category
import repositories.merchant_repository as merchant_repository
import repositories.transaction_repository as transaction_repository
import repositories.user_repository as user_repository
import repositories.category_repository as category_repository
user1 = User("John", 50.00)
user2 = User("Emma", 30.00)
user_repository.save(user1)
user_repository.save(user2)
merchant1 = Merchant("Tesco", "Glasgow")
merchant2 = Merchant("Oasis", "Edinburgh")
merchant3 = Merchant("Asda", "Glasgow")
merchant_repository.save(merchant1)
merchant_repository.save(merchant2)
merchant_repository.save(merchant3)
category1 = Category("Grocieres")
category2 = Category("Clothing")
category3 = Category("Fuel")
category_repository.save(category1)
category_repository.save(category2)
category_repository.save(category3)
transaction1 = Transaction(25.00, category2, "2020-09-22", merchant1, user1)
transaction2 = Transaction(5.00, category1, "2020-01-12", merchant2, user1)
transaction3 = Transaction(10.00, category3, "2020-02-15", merchant3, user2)
transaction4 = Transaction(90.00, category2, "2020-05-01", merchant2, user2)
transaction_repository.save(transaction1)
transaction_repository.save(transaction2)
transaction_repository.save(transaction3)
transaction_repository.save(transaction4)
|
linseycurrie/Spending-Tracker
|
spending_tracker/console.py
|
console.py
|
py
| 1,447 |
python
|
en
|
code
| 2 |
github-code
|
6
|
40892321700
|
import os
import discord
import re
import asyncio
from keepAlive import KeepAlive
from spotifySelfAPI import SpotifyAuthAccessToken, SpotifySearch, SpotifyPlaylistCreate, SpotifyPlaylistAdd
from replaceBadKeywords import ReplaceBadKeywords
from collections import OrderedDict
from youtubeSelfAPI import YoutubePlaylistCreate, YoutubeSearch, YoutubePlaylistAdd
import time
client = discord.Client()
@client.event
async def on_ready():
print("we have logged in as {0.user}".format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('$ppls'):
start = time.time()
print("chaliye shuru karte hai")
#main code
l = 10000
req_limit = 50
s_client_id = os.environ['SPOTIFY_CLIENT_ID']
s_client_secret = os.environ['SPOTIFY_CLIENT_SECRET']
s_refresh_token = os.environ['SPOTIFY_REFRESH_TOKEN']
text_scraper = []
embedlist = []
s_rawuri=[]
s_temprawuri = []
name_id_pair = []
tempembedlist = []
async for msg in message.channel.history(limit=l):
if (msg.author.name == "Rythm"):
text_scraper.append([msg.content])
embedlist.append(msg.embeds)
if (re.match(r"^:thumbsup:", msg.content)):
break
try:
n = len(text_scraper)
new_embedlist = embedlist[:n+1]
except UnboundLocalError:
raise Exception("init message before l=10000")
for i in range(n):
if new_embedlist[i]: #MIND BLOWING TECHNIQUE TO CHECK EMPTY LIST
tempembedlist.append(new_embedlist[i])
s_access_token = SpotifyAuthAccessToken(s_client_id, s_client_secret, s_refresh_token)
pplatform_embed = discord.Embed(
title="Do you want playlist on Spotify or Youtube Music?\nType y for youtube music or type s for spotify",
description="This request will timeout after 1 min"
)
pplatform_embed_sent = await message.channel.send(embed=pplatform_embed)
try:
def check(m):
return m.author == message.author and m.channel == message.channel
pplatform_msg = await client.wait_for(
'message',
timeout=60,
check=check)
platform_name = pplatform_msg.content
for i in range(len(tempembedlist)):
temp = tempembedlist[i][0]
tempdesc = temp.description
if re.match("^\*", tempdesc):
tempurl = re.findall('(?:(?:https?|ftp):\/\/)?[\w/\-?=%.]+\.[\w/\-&?=%.]+',tempdesc)
try:
url = tempurl[0]
parsed = url.split("=")
y_videoId = parsed[1]
except:
y_videoId = None
print("printing none videoID")
pass
tempname = re.findall('\[(.*?)\]', tempdesc) #list of one item
try:
tempname = ReplaceBadKeywords(tempname[0])
except:
pass
tempkv = [tempname, y_videoId]
name_id_pair.append(tempkv)
if pplatform_msg:
pname_embed = discord.Embed(
title="What should be the name of your playlist",
description="This request will timeout after 1 min"
)
pname_embed_sent = await message.channel.send(embed=pname_embed)
try:
pname_msg = await client.wait_for(
'message',
timeout=60,
check=check)
playlist_name = pname_msg.content
if pname_msg:
if (platform_name == "y") or (platform_name == "youtube") :
y_playlist_id = YoutubePlaylistCreate(playlist_name)
y_rawvideoIds = [k[1] for k in name_id_pair]
y_videoIds = [y_rawvideoIds[i:i + req_limit] for i in range(0, len(y_rawvideoIds), req_limit)]
await message.channel.send("Your Youtube Playlist is being generated")
for j in range(len(y_videoIds)):
YoutubePlaylistAdd(y_videoIds[j], y_playlist_id)
y_playlist_link = f"https://music.youtube.com/playlist?list={y_playlist_id}"
await message.channel.send(y_playlist_link)
if (platform_name == "s") or (platform_name == "spotify") :
for i in range(len(name_id_pair)):
try:
s_tempuri = SpotifySearch(name_id_pair[i][0], s_access_token)
s_temprawuri.append(s_tempuri)
except IndexError:
try:
song_name = YoutubeSearch(name_id_pair[i][0])
s_tempuri = SpotifySearch(song_name, s_access_token)
s_temprawuri.append(s_tempuri)
except IndexError:
print("idk somethings wrong but ok, video list:", name_id_pair[i])
await message.channel.send("Your Spotify Playlist is being generated")
s_playlist_id = SpotifyPlaylistCreate(playlist_name, s_access_token)
s_rawuri = list(OrderedDict.fromkeys(s_temprawuri))
s_uri = [s_rawuri[i:i + req_limit] for i in range(0, len(s_rawuri), req_limit)]
for j in range(len(s_uri)):
SpotifyPlaylistAdd(s_uri[j], s_playlist_id, s_access_token)
s_playlist_link = f"http://open.spotify.com/user/r4xa4j5m4mjpz14d0kz0v9gfz/playlist/{s_playlist_id}"
await message.channel.send(s_playlist_link)
else:
await message.channel.send("you didnt enter a valid response, kindly run the bot again")
except asyncio.TimeoutError:
await pname_embed_sent.delete()
await message.channel.send("Cancelling due to timeout", delete_after=10)
except asyncio.TimeoutError:
await pplatform_embed_sent.delete()
await message.channel.send("Cancelling due to timeout", delete_after=10)
print("hogya")
end = time.time()
print(f"Runtime of the program is {end - start}")
KeepAlive()
client.run(os.environ['DISCORD_BOT_TOKEN'])
|
sarvagya6/discord-playlist-bot
|
main.py
|
main.py
|
py
| 7,019 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72462557307
|
from ex1 import Person
class Student(Person):
def __init__(self, name, height, age, clas, group, surname):
super().__init__(name,height,age,surname)
self.clas = clas
if isinstance(age, int) and isinstance(height, int):
self.group = group
else:
TypeError(f'{type(height).__name__} object cannot be interpreted')
def __str__(self):
return f'{super().__str__()}; Class = {self.clas}, Group = {self.group}'
inst1 = Student('Dan', 190, 17, '8A', '2P-19', 'Rim')
print(inst1)
|
jurbx/python_pro
|
day2/ex2.py
|
ex2.py
|
py
| 547 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.