content
stringlengths 5
1.05M
|
---|
# -*- coding: utf-8 -*-
"""
Classifiers
Created on Fri Mar 2 05:18:46 2018
@author: Oliver
"""
import sys
import os
from sklearn import svm, metrics
#from sklearn.decomposition import PCA
#from sklearn.pipeline import Pipeline
#from sklearn.model_selection import GridSearchCV
#from sklearn.linear_model import LogisticRegression
#from sklearn.ensemble import BaggingClassifier, RandomForestClassifier
#from sklearn.multiclass import OneVsRestClassifier
import numpy as np
import tensorflow as tf
from attack import attacks
import scipy.misc
#import datetime
flags = tf.app.flags
flags.DEFINE_string("models", "linsvc,cnet", "the models to run")
flags.DEFINE_bool("adversarial", False, "run adversarial attacks")
flags.DEFINE_integer("examples", 20000, "number of examples")
flags.DEFINE_string("log", 'record.txt' , "log file")
flags.DEFINE_integer("epoch", -1, "number of epochs")
FLAGS = flags.FLAGS
def report(expected, predicted, message='', outfile = './results/record.txt') :
creport = metrics.classification_report(expected, predicted)
print(message)
print("Classification report:\n%s\n" % (creport))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
with open(outfile, 'a') as log:
log.write('\n' + ('*'*50) +'\n'+message+'\n\n'+ creport)
return metrics.accuracy_score(expected, predicted)
def build(pre, modeler, post, name):
def train(x_train_raw, y_train_raw, train_descr):
print('raw X,Y shapes: ', x_train_raw.shape, y_train_raw.shape)
sys.stdout.flush()
x_train,y_train, params = pre(x_train_raw, y_train_raw)
print(name, ' -- train data ready. ', x_train.shape, y_train.shape)
sys.stdout.flush()
clf = modeler(x_train, y_train, **params)
print(name, ' -- train data fit.')
sys.stdout.flush()
def test(x_test_raw, y_test_raw, test_descr):
x_test, y_test, params2 = pre(x_test_raw, y_test_raw)
if( FLAGS.adversarial ) :
accuracy = attacks(clf, x_test, y_test)
# require params = params2
adv = "\n\nvulnerability: "+str(accuracy)
else:
adv = ''
predict = post(clf.predict(x_test))
expect = post(y_test)
print(name, ' -- test data predicted')
sys.stdout.flush()
report(expect, predict, " Model: "+name+"\nTraining: "+train_descr+
"\n Testing: "+test_descr + adv, './results/'+FLAGS.log+'.txt')
return test
test.test = test
return test
train.train = train
return name, train
def linsvc():
def pre(X, Y):
assert(X.shape[0] == Y.shape[0])
return X.reshape((Y.shape[0], -1)), Y, {}
def post(Y):
return Y
def modeler(X, Y):
classifier = svm.LinearSVC()
#classifier = Pipeline([('pca', PCA(n_components = dim_latent)), ('log', LogisticRegression())], verbose=True);
##### too long
#classifier = OneVsRestClassifier(BaggingClassifier(svm.SVC(kernel='linear'),
# max_samples=1.0 / n_estimators, n_estimators=n_estimators))
classifier.fit(X, Y)
return classifier
return build(pre, modeler, post, "linsvc")
def net():
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
#from keras import backend as K
def pre(X, Y):
num_classes = len(set(Y))
# input image dimensions
#img_rows, img_cols = 28, 28
#chan =
#input_shape = (chan, img_rows, img_cols) if K.image_data_format() == 'channels_first' else (img_rows, img_cols, chan)
# First dimension is the sample. If the next two are provided and nothing else, then we still need a channel.
if len(X.shape) == 3:
X = X.reshape(*X.shape, 1)
input_shape = X.shape[1:]
# X = X.reshape(X.shape[0], *input_shape)
X = X.astype('float32')
X /= 255
Y = keras.utils.to_categorical(Y, num_classes)
return X, Y, dict(num_classes=num_classes, input_shape=input_shape)
def modeler(X, Y, **params):
epochs = 12
batch_size = 128
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=params['input_shape']))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(params['num_classes'], activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(X, Y,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.1)
return model
def post(Y):
return Y.argmax(axis=1)
return build(pre, modeler, post, "cnet")
def sample(dataset, number):
xs, ys, name = dataset
indices = np.array(np.random.randint(xs.shape[0], size=number))
return xs[indices], ys[indices], name+'-crop['+str(number)+']'
def ttsplit(X,Y, name, prc):
n = int(prc * X.shape[0])
return (X[:n], Y[:n], name+'-train'), (X[n:], Y[n:], name+'-test')
def both(data1, data2):
X1, Y1, n1 = data1
X2, Y2, n2 = data2
sfx = ['-train', '-test']
name = n1 +'&'+ n2
for a, b in (sfx, reversed(sfx)):
if n1.endswith(a):
begin = n1[:-len(a)]
if n2 == begin+b:
name = begin+'-all'
return np.concatenate((X1, X2), axis=0), np.concatenate((Y1, Y2), axis=0), name
def shuffle(data):
X, Y, n = data
idx = np.arange(X.shape[0])
np.random.shuffle(idx)
return X[idx], Y[idx], '§'+n
global VARS;
if __name__ == '__main__':
import scipy.misc
from keras import backend as K
K.set_learning_phase(1) #set learning phase
N_EXAMPLES = FLAGS.examples
# Questions
# HOw well trained on this test on original
# train on this, test on thsi
# train on original test on this.
# trained on part this part original, test on original
# samples are all in
# /sampledir/fashion-n/split/...
datasetname = sys.argv[1]
gen_method = sys.argv[2]
# First load original dataset
def getoriginaldata():
# I suspsect these might be faster which is why I've left them in.
# if datasetname == 'fashion':
# from keras.datasets import fashion_mnist
# return fashion_mnist.load_data()
#
# elif datasetname == 'mnist':
# from keras.datasets import mnist
# return mnist.load_data()
# elif datasetname == 'cifar':
# from keras.datasets import cifar10
# return cifar10.load_data()
from scipy.misc import imread
Xs = {'train': [], 'test':[]}
Ys = {'train': [], 'test':[]}
base = './data/'+datasetname
for slabel in os.listdir(base):
for mode in 'train', 'test':
count = 0
base2 = base + '/' + slabel+ '/' + mode
for imgfn in os.listdir(base2):
x = imread(base2+"/"+imgfn)
Xs[mode].append(x)
Ys[mode].append(int(slabel))
count += 1
if count >= N_EXAMPLES:
break
print("original data loaded")
sys.stdout.flush()
return (np.array(Xs['train']), np.array(Ys['train'])), (np.array(Xs['test']), np.array(Ys['test']))
#raise ValueError('what is '+datasetname+'??')
(xtrain, ytrain), (xtest, ytest) = getoriginaldata()
nlabels = len(set(ytest))
stand_train = xtrain, ytrain, 'standard-'+datasetname+'-train'
stand_test = xtest, ytest, 'standard-'+datasetname+'-test'
X_stand = np.concatenate((xtrain, xtest), axis=0)
Y_stand = np.concatenate((ytrain, ytest), axis=0)
stand_all = X_stand, Y_stand, 'standard-'+datasetname+'-all'
stand_train_small = sample(stand_train, N_EXAMPLES) if N_EXAMPLES < stand_train[0].shape[0] else stand_train
stand_test_small = sample(stand_test, N_EXAMPLES) if N_EXAMPLES < stand_test[0].shape[0] else stand_test
Xs = []
Ys = []
#datapath = './samples'
for method in gen_method.split('&'):
datapath = './samples/'+method
folders = [f for f in os.listdir(datapath) if f.startswith(datasetname+'-')]
for f in folders:
label = int(f[len(datasetname)+1:])
extra = ''
if FLAGS.epoch > 0:
extra += '/epoch-%d' % FLAGS.epoch
if 'split' in os.listdir(datapath+'/'+f + extra) :
extra += '/split'
for idx, imagename in enumerate(os.listdir(datapath+'/'+f+extra)):
# silly test optimization, force smaller data.
if idx > N_EXAMPLES / nlabels:
break;
# VERY IMPORTANT. This next line makes sure shitty training things from
# early on in the GAN process are not reused.
# if ('test' in imagename):
dataX = scipy.misc.imread(datapath+'/'+f+extra+'/'+imagename)
Xs.append(dataX)
Ys.append(label)
X_gen = np.array(Xs)
Y_gen = np.array(Ys)
gen_inorder = X_gen, Y_gen, datasetname + '-gen-' + gen_method
gen = shuffle(gen_inorder)
#gen_small = sample(gen, N_EXAMPLES)
gen_small = gen
gen_small_train, gen_small_test = ttsplit(*gen_small, 0.7)
print("ALL DATA LOADED\n" + '*'*50)
learners = dict([linsvc(), net()])
global VARS
VARS = locals()
for name in FLAGS.models.split(','):
if name not in learners:
print("NO such learner: "+name)
continue
learner = learners[name]
print("learner: ", name)
# train on gen, test on standard_all
learner.train(*gen_small_train) \
.test(*stand_train_small)(*stand_test_small)(*gen_small_test)
learner.train(*stand_train_small) \
.test(*stand_test_small)(*gen_small_test)(*gen_small)
learner.train(*gen_small) \
.test(*stand_test)(*stand_all)
|
import PyV8
ctxt = PyV8.JSContext()
ctxt.enter()
func = ctxt.eval("""
(function(){
function hello(){
return "Hello world.";
}
return hello();
})
""")
print func()
|
from django.apps import AppConfig
class PriceAnalysisConfig(AppConfig):
name = 'price_analysis'
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-07-10 15:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trend', '0005_auto_20190710_1810'),
]
operations = [
migrations.AlterField(
model_name='product',
name='category',
field=models.CharField(choices=[(1, '1'), (2, '1'), (3, '3')], max_length=50),
),
]
|
# Copyright 2021 Pants project contributors.
# Licensed under the Apache License, Version 2.0 (see LICENSE).
DEV_PORTS = {
"helloworld.service.frontend": 8000,
"helloworld.service.admin": 8001,
"helloworld.service.user": 8010,
"helloworld.service.welcome": 8020,
}
def get_dev_port(service: str) -> int:
try:
return DEV_PORTS[service]
except KeyError:
raise ValueError(f"No dev port found for service {service}")
|
from random import randint, seed
import cv2 as cv
import sys
import os
import math
import numpy as np
import heapq
from timeit import default_timer as timer
# from numba import jit
# from numba.typed import List
seed(42069)
# functions for converting images to grids
def getListOfFiles(dirName, allFiles):
# create a list of file and sub directories
# names in the given directory
listOfFile = os.listdir(dirName)
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
allFiles.append(fullPath)
def imageToGrid(image, grid, dim):
print("----- Converting Image to Grid Object -----")
# load image
img = cv.imread(cv.samples.findFile(image))
if img is None:
sys.exit("Could not read the image.")
cv.imwrite("output/original.png", img)
# resize image
resized = cv.resize(img, dim, interpolation = cv.INTER_AREA)
cv.imwrite("output/resized.png", resized)
print('Resized Dimensions : ',resized.shape)
# convert to grayscale
grayImg = cv.cvtColor(resized, cv.COLOR_BGR2GRAY)
cv.imwrite("output/grayscale.png", grayImg)
# apply thresholding to easily separate walkable and unwalkable areas
ret, thresh1 = cv.threshold(grayImg,75,229,cv.THRESH_BINARY)
cv.imwrite("output/threshold.png", thresh1)
print('Threshold Dimensions : ', thresh1.shape)
for i in range(thresh1.shape[0]):
for j in range(thresh1.shape[1]):
# if the current value = 0 (meaning black) append to list of walls
if thresh1[i][j] == 0:
grid[i,j] = 0
else:
grid[i,j] = 1
def createGridFromDatasetImage(dataset, grid, dim):
print('----- Creating Grid Object from Dataset Image-----')
listOfImages = []
getListOfFiles(dataset, listOfImages)
image = listOfImages[randint(0, len(listOfImages)-1)]
print('Random Image: ', image)
imageToGrid(image, grid, dim)
def randomStartGoal(grid, start, goal):
print('----- Generating Random Start and Goal -----')
dist = 0
width, height = grid.shape
hypotenuse = math.sqrt(math.pow(width, 2) + math.pow(height,2))
while dist <= 0.50*hypotenuse:
random_x = randint(0, width-1)
random_y = randint(0, height-1)
start[0] = random_x
start[1] = random_y
while grid[random_x, random_y] == 0:
random_x = randint(0, width-1)
random_y = randint(0, height-1)
start[0] = random_x
start[1] = random_y
random_x = randint(0, width-1)
random_y = randint(0, height-1)
goal[0] = random_x
goal[1] = random_y
while grid[random_x, random_y] == 0:
random_x = randint(0, width-1)
random_y = randint(0, height-1)
goal[0] = random_x
goal[1] = random_y
a = np.array(start)
b = np.array(goal)
dist = np.linalg.norm(a-b)
# function for reconstructing found path
def reconstructPathV2(cameFrom, start, goal, path):
currentX, currentY = goal
while (currentX, currentY) != start:
path.append((currentX, currentY))
currentX, currentY = cameFrom[currentX, currentY]
path.append(start)
path.reverse
# functions for pathfinding
# @jit
def passable(grid, tile):
x,y = tile
return grid[tile] == 1
# @jit
def inBounds(grid, tile):
(x, y) = tile
return 0 <= x < grid.shape[0] and 0 <= y < grid.shape[1]
# @jit
def getNeighbors(grid, tile):
(x, y) = tile
results = []
possibleNeighbors = [(x+1,y), (x,y-1), (x-1,y), (x,y+1)]
for tile in possibleNeighbors:
if inBounds(grid, tile):
if passable(grid, tile):
results.append(tile)
if (x + y)%2 == 0: results.reverse()
return results
# @jit
def heuristic(a, b):
(x1, y1) = a
(x2, y2) = b
return abs(x1-x2) + abs(y1-y2)
# @jit(nopython=True)
def search(grid, start, goal, parentHash, FValue):
width, height = grid.shape
openList = []
# openList.append([9999, (-1, -1)])
openListEntryFinder = {}
closedList = []
# openList.append([9999, (-1, -1)])
closedListEntryFinder = {}
GValue = np.zeros((width, height), dtype=np.int32)
HValue = np.zeros((width, height), dtype=np.int32)
parentHash[:] = np.array([-1,-1])
addToPQ(openList, openListEntryFinder, start, 0)
startX, startY = start
GValue[startX, startY] = 0
HValue[startX, startY] = heuristic(start, goal)
FValue[startX, startY] = GValue[startX, startY] + HValue[startX, startY]
while not len(openList) == 0:
# print(openListEntryFinder)
current = popFromPQ(openList, openListEntryFinder)
# print(grid[current])
currentX, currentY = current
if current == goal:
# print('Found goal %s' %(str(current)))
break
for next in getNeighbors(grid, current):
# print(next)
nextX, nextY = next
newG = GValue[currentX, currentY] + 1 # constant 1 since grid
if next in openListEntryFinder:
if newG < GValue[nextX, nextY]:
removeFromPQ(openList, openListEntryFinder, next)
if next in closedListEntryFinder:
if newG < GValue[nextX, nextY]:
removeFromPQ(closedList, closedListEntryFinder, next)
if (next not in openListEntryFinder) and (next not in closedListEntryFinder):
parentHash[nextX, nextY] = np.array([currentX, currentY])
GValue[nextX, nextY] = newG
HValue[nextX, nextY] = heuristic(next, goal)
FValue[nextX, nextY] = GValue[nextX, nextY] + HValue[nextX, nextY]
addToPQ(openList, openListEntryFinder, next, FValue[nextX, nextY])
addToPQ(closedList, closedListEntryFinder, current, FValue[currentX, currentY])
# functions for priority queue
# @jit
def addToPQ(elements, entryFinder, item, priority=0):
if item in entryFinder:
removeFromPQ(elements, entryFinder, item)
entry = (priority, item)
entryFinder[item] = entry
heapq.heappush(elements, entry)
# print(elements)
# print(entryFinder)
# @jit
def removeFromPQ(elements, entryFinder, item):
entry = entryFinder.pop(item)
elements.remove(entry)
heapq.heapify(elements)
# @jit
def popFromPQ(elements, entryFinder):
priority, item = heapq.heappop(elements)
return item
def main():
# create grid from image dataset
scale_factor = 7 # scales to a power of 2
dim = (int(math.pow(2, scale_factor)), int(math.pow(2, scale_factor)))
grid = np.zeros(dim, dtype=np.int32)
createGridFromDatasetImage('dataset/da2-png', grid, dim)
print(grid)
# generate random start and goal
start = [-1, -1]
goal = [-1, -1]
randomStartGoal(grid, start, goal)
start = tuple(start)
goal = tuple(goal)
# search for path
width, height = grid.shape
parents = np.empty((width, height, 2), dtype=np.int32)
cost = np.zeros((width, height), dtype=np.int32)
s = timer()
search(grid, start, goal, parents, cost)
# print(parents)
# print(cost)
# reconstruct path
path = []
reconstructPathV2(parents, tuple(start), tuple(goal), path)
e = timer()
print('Before compilation: ', e-s)
s = timer()
search(grid, start, goal, parents, cost)
# print(parents)
# print(cost)
# reconstruct path
path = []
reconstructPathV2(parents, tuple(start), tuple(goal), path)
e = timer()
print('After compilation: ', e-s)
print(path)
if __name__ == "__main__":
main()
|
from datetime import datetime
from pysnmp.smi import rfc1902
from piat.utils.logger import get_logger
LOGGER = get_logger(__name__)
class TrapMsg:
""" Trap msg Object """
def __init__(self, var_binds, viewer):
"""
Trap Msg Object.
:param var_binds: pysnmp var_binds.
:param viewer: pysnmp MibViewController
"""
self._var_binds = var_binds
self._viewer = viewer
self.timestamp = datetime.now()
self._parsed_data = {}
self._parse()
self.result = {'ip': self._parsed_data['snmpTrapAddress'],
'timestamp': self.timestamp, **self._parsed_data}
def _parse(self):
""" translate the trap msg with the mib viewer """
for name, val in self._var_binds:
var_bind = rfc1902.ObjectType(rfc1902.ObjectIdentity(name), val)
var_bind.resolveWithMib(self._viewer)
obj_name = var_bind[0].getMibSymbol()[1]
value = var_bind[1].prettyPrint()
self._parsed_data[obj_name] = value
def get_dictionary(self):
return self.result
|
K = int(input())
h = K // 2
print((K - h) * h)
|
# Copyright (c) 2013, Lin To and contributors
# For license information, please see license.txt
# PLEASE NOTE : THIS CODE IS TERRIBLE, I GENERALLY DO BETTER THAN THIS
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None, filter_by_date=True):
records = get_sql_records(filters, filter_by_date)
if len(records) == 0:
frappe.throw(_("No values available for set filters"))
accounts = get_sql_records_accounts(filters)
tree = build_sheet_tree(records, accounts)
columns = get_columns()
data = get_data(tree)
chart = get_chart_data(data)
return columns, data, "", chart
def get_chart_data(data):
labels = ["Assets", "Liabilities", "Equity"]
c_dict = {}
for d in data:
acc = d["account"]
amt = d["amount"]
for l in labels:
if l in acc:
c_dict[l] = amt
chart = dict(
type="bar",
data=dict(
labels=labels, datasets=[dict(name=_(l), values=[f(c_dict[l])] ) for l in labels]
),
)
return chart
def f(v):
return float(v.replace(",",""))
def get_data(tree):
tk = [(k, i[3]) for k, i in tree.items() if i[1] is None]
tk.sort(key=lambda i: i[1])
data = []
totals = {0: 0, 1: 0}
default = int(tk[0][1])
has_switched = False
for i, (an, i_c) in enumerate(tk):
assert isinstance(i_c, int), "Apply cast."
totals[i_c] += tree[an][0]
if i_c != default and not has_switched:
append_total_row(default, totals, data)
has_switched == True
data.append(get_data_row("", "", 0, 0))
add_data_row(an, tree, data, 0)
if i == len(tk) - 1:
append_total_row(i_c, totals, data)
return data
def append_total_row(i_c, totals, data):
label = frappe.bold(f'Total ({"Credit" if i_c else "Debit"})')
amount = frappe.format(totals[i_c], "Currency")
row = get_data_row(label, amount, 0, 0)
data.append(row)
def add_data_row(an, tree, data, i):
balance, _, children, _ = tree[an]
is_group = 0 if not children else 1
account = an
if i == 0:
account = frappe.bold(account)
amount = frappe.format(balance, "Currency")
data.append(get_data_row(account, amount, i, is_group))
if is_group:
for child in children:
add_data_row(child, tree, data, i + 1)
def get_data_row(account, amount, indent, is_group):
return frappe._dict(
{"account": account, "amount": amount, "indent": indent, "is_group": is_group}
)
def get_columns():
return ["Account:Data:300", "Amount:Currency:200"]
def build_sheet_tree(records, accounts):
statement = get_balance_statement(records)
accounts = {n: (n, an, pn, i_g, i_c, c) for n, an, pn, i_g, i_c, c in accounts}
accs = {}
for an in statement:
balance, pn, _ = statement[an]
if an not in accs:
pan = None if pn is None else accounts[pn][1]
accs[an] = [balance, pan, None, None]
manage_parents(an, pn, accs, accounts, balance)
return accs
def manage_parents(an, pn, accs, accounts, balance):
insert_parents(pn, accs, accounts)
update_parents(an, pn, accs, accounts, balance)
def insert_parents(pn, accs, accounts):
pan = accounts[pn][1]
if pan in accs:
return
ppn = accounts[pn][2]
i_c = accounts[pn][4]
if ppn is None:
accs[pan] = [0, None, [], i_c]
else:
ppan = accounts[ppn][1]
accs[pan] = [0, ppan, [], None]
insert_parents(ppn, accs, accounts)
def update_parents(an, pn, accs, accounts, balance):
pan = accounts[pn][1]
accs[pan][0] += balance
accs[pan][2].append(an)
ppn = accounts[pn][2]
if ppn is not None:
update_parents(pan, ppn, accs, accounts, balance)
def get_balance_statement(records):
statement = {}
for _, cr, dr, is_credit, account_name, parent_account in records:
if account_name not in statement:
statement[account_name] = [0, parent_account, None]
if is_credit:
statement[account_name][0] += cr
statement[account_name][0] -= dr
else:
statement[account_name][0] -= cr
statement[account_name][0] += dr
return statement
def get_sql_records_accounts(filters):
query = f"""
SELECT
name,
account_name,
parent_account,
is_group,
is_credit,
currency
FROM
tabAccount
WHERE
company="{filters['company']}"
ORDER BY
is_group DESC
"""
return frappe.db.sql(query)
def get_sql_records(filters, filter_by_date=True):
query = f"""
SELECT
`tabGL Entry`.account,
`tabGL Entry`.credit,
`tabGL Entry`.debit,
`tabAccount`.is_credit,
`tabAccount`.account_name,
`tabAccount`.parent_account
FROM
`tabGL Entry` JOIN `tabAccount`
ON `tabGL Entry`.account =`tabAccount`.name
WHERE
`tabAccount`.company="{filters['company']}"
AND
`tabAccount`.report_type="Balance Sheet"
AND
`tabGL Entry`.docstatus=1
"""
if filter_by_date:
query += f"""
AND
`tabGL Entry`.posting_date BETWEEN
"{filters['from_date']}"
AND
"{filters['to_date']}"
"""
return frappe.db.sql(query)
# PLEASE NOTE : THIS CODE IS TERRIBLE, I GENERALLY DO BETTER THAN THIS
|
#!/usr/bin/python
import os
import sys
import argparse
import numpy as np
import pylab as pl
import scipy.io
from copy import deepcopy
from scai_mne.viz import circular_layout, plot_connectivity_circle
from scai_utils import *
from aparc12 import get_aparc12_cort_rois
lobes = ["Prefrontal", "Premotor", "Insular", "Precentral", \
"Postcentral", "PPC", "Temporal", "Cingulate"]
# lobeClrs = [(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 0, 0), \
# (1, 1, 0), (0, 0.5, 0), (1, 0.5, 0), (0.5, 0, 0.5)]
lobeClrs = [(0.5, 0.5, 0.5)] * len(lobes)
COORD_FILE = "/users/cais/STUT/FSDATA/fsaverage2/mri/aparc12_roi_coords.txt"
hemis=["lh", "rh"]
FIG_DIR = "/users/cais/STUT/figures"
if __name__ == "__main__":
ap = argparse.ArgumentParser(description="Draw connectivity circle plot")
ap.add_argument("inMatFN", help="Input mat file with the a_cmat")
ap.add_argument("hemi", type=str, choices=hemis, help="Hemisphere")
ap.add_argument("grp", type=str, help="Group (e.g., PWS, PFS: must exist as a_cmat[grp] in inMatFN")
ap.add_argument("--vmax", type=float, default=np.nan,
help="Maximum value (e.g., 331.8")
if len(sys.argv) == 1:
ap.print_help()
sys.exit(0)
# === Parse input arguments === #
args = ap.parse_args()
inMatFN = args.inMatFN
hemi = args.hemi
grp = args.grp
vmax = args.vmax
# === ROIs by lobe ===
rois_bl = {}
for (i0, t_lobe) in enumerate(lobes):
rois_bl[t_lobe] = get_aparc12_cort_rois(lobe=t_lobe, bSpeech=True)
rois_bl[t_lobe] = np.array(rois_bl[t_lobe])
# === Read the ROI centers of gravity from text file === #
# check_file(COORD_FILE)
cf = open(COORD_FILE, "rt")
ct = cf.read().split('\n')
ct = remove_empty_strings(ct)
cf.close()
roi_names = []
roi_nums = []
roi_coords = []
for (i0, tline) in enumerate(ct):
t_items = tline.split(' ')
if len(t_items) != 5:
raise Exception, "Unrecognized formant in a line of %s: %s" \
% (COORD_FILE, tline)
roi_names.append(t_items[0])
roi_nums.append(t_items[1])
t_coord = [float(t_items[2]), float(t_items[3]), float(t_items[4])]
roi_coords.append(t_coord)
cogy_bl = {}
for (i0, t_lobe) in enumerate(lobes):
cogy_bl[t_lobe] = np.zeros(len(rois_bl[t_lobe]))
for (i1, t_roi) in enumerate(rois_bl[t_lobe]):
assert(roi_names.count("lh_" + t_roi) == 1)
t_coord = roi_coords[roi_names.index("lh_" + t_roi)]
cogy_bl[t_lobe][i1] = t_coord[1]
# print("%s - %f" % (t_roi, t_coord[1])) # DEBUG
sortidx = sorted(range(len(cogy_bl[t_lobe])), \
key=lambda k: cogy_bl[t_lobe][k], \
reverse=True)
rois_bl[t_lobe] = rois_bl[t_lobe][sortidx]
# === Combine into a single list of ROIs === #
rois = []
for (i0, t_lobe) in enumerate(lobes):
rois += rois_bl[t_lobe]
for (i0, t_roi) in enumerate(rois):
rois[i0] = hemi[0].upper() + " " + t_roi
rois = np.array(rois)
nrois = len(rois)
roi_clrs = [()] * nrois
ccnt = 0
for (i0, t_lobe) in enumerate(lobes):
for i1 in range(len(rois_bl[t_lobe])):
roi_clrs[ccnt] = lobeClrs[i0]
ccnt += 1
print("nrois = %d" % (nrois))
# === Load the matrix from the mat file === #
check_file(inMatFN)
condat = scipy.io.loadmat(inMatFN)
assert(condat.keys().count("mn_cmat") == 1)
assert(condat.keys().count("sprois") == 1)
trois = deepcopy(condat["sprois"])
trois = trois[0]
assert(len(trois) == nrois)
for (i0, t_roi) in enumerate(trois):
t_str_roi = str(trois[i0])
trois[i0] = t_str_roi.replace("[u'", "").replace("']", "")\
.replace("lh_", "L ").replace("rh_", "R ")
trois = list(trois)
idxr = []
for (i0, t_roi) in enumerate(rois):
idxr.append(trois.index(t_roi))
trois = np.array(trois)
tcon = deepcopy(condat["mn_cmat"][grp])
mn_con = tcon[0][0]
# mn_con = np.mean(tcon, axis=2)
mn_con = mn_con[idxr, :]
mn_con = mn_con[:, idxr]
# == Set the self-connetions to zero == #
for i0 in range(nrois):
mn_con[i0][i0] = 0.0
# === === #
node_order = list(rois)
node_angles = circular_layout(rois, node_order, start_pos=0)
if np.isnan(vmax):
vmax = np.max(mn_con)
print("vmax = %.1f" % vmax)
# con = np.random.rand(nrois, nrois) # DEBUG
plot_connectivity_circle(mn_con, rois, node_angles=node_angles,
facecolor="w", textcolor="k",
node_colors=roi_clrs,
colormap="binary",
vmax=vmax,
fontsize=12,
title="Connectivity matrix: %s - %s" % (grp, hemi))
# === Save to tif file === #
figFN = os.path.join(FIG_DIR, "conn_mat_circle_%s_%s.png" % (grp, hemi))
pl.savefig(figFN, faceColor="w", format="png", dpi=200)
check_file(figFN)
print("INFO: Saved to image file: %s" % (figFN))
pl.show()
|
import os.path
from validator.contextgenerator import ContextGenerator
class ChromeManifest(object):
"""This class enables convenient parsing and iteration of
chrome.manifest files."""
def __init__(self, data, path):
self.context = ContextGenerator(data)
self.lines = data.split('\n')
# Extract the data from the triples in the manifest
triples = []
counter = 0
for line in self.lines:
line = line.strip()
counter += 1
# Skip weird lines.
if line.startswith('#'):
continue
triple = line.split(None, 2)
if not triple:
continue
elif len(triple) == 2:
triple.append('')
if len(triple) < 3:
continue
triples.append({'subject': triple[0],
'predicate': triple[1],
'object': triple[2],
'line': counter,
'filename': path,
'context': self.context})
self.triples = triples
def get_value(self, subject=None, predicate=None, object_=None):
"""Returns the first triple value matching the given subject,
predicate, and/or object"""
for triple in self.triples:
# Filter out non-matches
if (subject and triple['subject'] != subject) or \
(predicate and triple['predicate'] != predicate) or \
(object_ and triple['object'] != object_): # pragma: no cover
continue
# Return the first found.
return triple
return None
def get_objects(self, subject=None, predicate=None):
"""Returns a generator of objects that correspond to the
specified subjects and predicates."""
for triple in self.triples:
# Filter out non-matches
if ((subject and triple['subject'] != subject) or
(predicate and triple['predicate'] != predicate)):
continue
yield triple['object']
def get_triples(self, subject=None, predicate=None, object_=None):
"""Returns triples that correspond to the specified subject,
predicates, and objects."""
for triple in self.triples:
# Filter out non-matches
if subject is not None and triple['subject'] != subject:
continue
if predicate is not None and triple['predicate'] != predicate:
continue
if object_ is not None and triple['object'] != object_:
continue
yield triple
def get_applicable_overlays(self, error_bundle):
"""
Given an error bundle, a list of overlays that are present in the
current package or subpackage are returned.
"""
content_paths = self.get_triples(subject='content')
if not content_paths:
return set()
# Create some variables that will store where the applicable content
# instruction path references and where it links to.
chrome_path = ''
content_root_path = '/'
# Look through each of the listed packages and paths.
for path in content_paths:
chrome_name = path['predicate']
if not path['object']:
continue
path_location = path['object'].strip().split()[0]
# Handle jarred paths differently.
if path_location.startswith('jar:'):
if not error_bundle.is_nested_package:
continue
# Parse out the JAR and it's location within the chrome.
split_jar_url = path_location[4:].split('!', 2)
# Ignore invalid/unsupported JAR URLs.
if len(split_jar_url) != 2:
continue
# Unpack the JAR URL.
jar_path, package_path = split_jar_url
# Ignore the instruction if the JAR it points to doesn't match
# up with the current subpackage tree.
if jar_path != error_bundle.package_stack[0]:
continue
chrome_path = self._url_chunk_join(chrome_name, package_path)
# content_root_path stays at the default: /
break
else:
# If we're in a subpackage, a content instruction referring to
# the root of the package obviously doesn't apply.
if error_bundle.is_nested_package:
continue
chrome_path = self._url_chunk_join(chrome_name, 'content')
content_root_path = '/%s/' % path_location.strip('/')
break
if not chrome_path:
return set()
applicable_overlays = set()
chrome_path = 'chrome://%s' % self._url_chunk_join(chrome_path + '/')
for overlay in self.get_triples(subject='overlay'):
if not overlay['object']:
error_bundle.error(
err_id=('chromemanifest', 'get_applicable_overalys',
'object'),
error='Overlay instruction missing a property.',
description='When overlays are registered in a chrome '
'manifest file, they require a namespace and '
'a chrome URL at minimum.',
filename=overlay['filename'],
line=overlay['line'],
context=self.context) #TODO(basta): Update this!
continue
overlay_url = overlay['object'].split()[0]
if overlay_url.startswith(chrome_path):
overlay_relative_path = overlay_url[len(chrome_path):]
applicable_overlays.add('/%s' %
self._url_chunk_join(content_root_path,
overlay_relative_path))
return applicable_overlays
def reverse_lookup(self, state, path):
"""
Returns a chrome URL for a given path, given the current package depth
in an error bundle.
State may either be an error bundle or the actual package stack.
"""
# Make sure the path starts with a forward slash.
if not path.startswith('/'):
path = '/%s' % path
# If the state is an error bundle, extract the package stack.
if not isinstance(state, list):
state = state.package_stack
content_paths = self.get_triples(subject='content')
for content_path in content_paths:
chrome_name = content_path['predicate']
if not content_path['object']:
continue
path_location = content_path['object'].split()[0]
if path_location.startswith('jar:'):
if not state:
continue
# Parse out the JAR and it's location within the chrome.
split_jar_url = path_location[4:].split('!', 2)
# Ignore invalid/unsupported JAR URLs.
if len(split_jar_url) != 2:
continue
# Unpack the JAR URL.
jar_path, package_path = split_jar_url
if jar_path != state[0]:
continue
return 'chrome://%s' % self._url_chunk_join(chrome_name,
package_path,
path)
else:
if state:
continue
path_location = '/%s/' % path_location.strip('/')
rel_path = os.path.relpath(path, path_location)
if rel_path.startswith('../') or rel_path == '..':
continue
return 'chrome://%s' % self._url_chunk_join(chrome_name,
rel_path)
return None
def _url_chunk_join(self, *args):
"""Join the arguments together to form a predictable URL chunk."""
# Strip slashes from either side of each path piece.
pathlets = map(lambda s: s.strip('/'), args)
# Remove empty pieces.
pathlets = filter(None, pathlets)
url = '/'.join(pathlets)
# If this is a directory, add a trailing slash.
if args[-1].endswith('/'):
url = '%s/' % url
return url
|
import torch
from torch.autograd import Variable
import torch.utils.data as Data
import torch.nn as nn
import matplotlib.pyplot as plt
import torch.optim as optim
from matplotlib import cm
import numpy as np
import copy
torch.manual_seed(1) # reproducible
def franke(X, Y):
term1 = .75*torch.exp(-((9*X - 2).pow(2) + (9*Y - 2).pow(2))/4)
term2 = .75*torch.exp(-((9*X + 1).pow(2))/49 - (9*Y + 1)/10)
term3 = .5*torch.exp(-((9*X - 7).pow(2) + (9*Y - 3).pow(2))/4)
term4 = .2*torch.exp(-(9*X - 4).pow(2) - (9*Y - 7).pow(2))
f = term1 + term2 + term3 - term4
dfx = -2*(9*X - 2)*9/4 * term1 - 2*(9*X + 1)*9/49 * term2 + \
-2*(9*X - 7)*9/4 * term3 + 2*(9*X - 4)*9 * term4
dfy = -2*(9*Y - 2)*9/4 * term1 - 9/10 * term2 + \
-2*(9*Y - 3)*9/4 * term3 + 2*(9*Y - 7)*9 * term4
return f, dfx, dfy
class Net(nn.Module):
def __init__(self,inp,out, activation, num_hidden_units=100, num_layers=1):
super(Net, self).__init__()
self.fc1 = nn.Linear(inp, num_hidden_units, bias=True)
self.fc2 = nn.ModuleList()
for i in range(num_layers):
self.fc2.append(nn.Linear(num_hidden_units, num_hidden_units, bias=True))
self.fc3 = nn.Linear(num_hidden_units, out, bias=True)
self.activation = activation
def forward(self, x):
x = self.fc1(x)
x = self.activation(x)
for fc in self.fc2:
x = fc(x)
x = self.activation(x)
x = self.fc3(x)
return x
def predict(self, x):
self.eval()
y = self(x)
x = x.cpu().numpy().flatten()
y = y.cpu().detach().numpy().flatten()
return [x, y]
def init_weights(m):
classname = m.__class__.__name__
# for every Linear layer in a model..
if classname.find('Linear') != -1:
# apply a uniform distribution to the weights and a bias=0
n = m.in_features
y = 1.0 / np.sqrt(n)
m.weight.data.uniform_(-y, y)
m.bias.data.fill_(0)
def train(lam1, loader, EPOCH, BATCH_SIZE):
state = copy.deepcopy(net.state_dict())
best_loss = np.inf
lossTotal = np.zeros((EPOCH, 1))
lossRegular = np.zeros((EPOCH, 1))
lossDerivatives = np.zeros((EPOCH, 1))
# start training
for epoch in range(EPOCH):
scheduler.step()
epoch_mse0 = 0.0
epoch_mse1 = 0.0
for step, (batch_x, batch_y) in enumerate(loader): # for each training step
b_x = Variable(batch_x)
b_y = Variable(batch_y)
net.eval()
b_x.requires_grad = True
output0 = net(b_x)
output0.sum().backward(retain_graph=True, create_graph=True)
output1 = b_x.grad
b_x.requires_grad = False
net.train()
mse0 = loss_func(output0, b_y[:,0:1])
mse1 = loss_func(output1, b_y[:,1:3])
epoch_mse0 += mse0.item() * BATCH_SIZE
epoch_mse1 += mse1.item() * BATCH_SIZE
loss = mse0 + lam1 * mse1
optimizer.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
epoch_mse0 /= num_data
epoch_mse1 /= num_data
epoch_loss = epoch_mse0+lam1*epoch_mse1
lossTotal[epoch] = epoch_loss
lossRegular[epoch] = epoch_mse0
lossDerivatives[epoch] = epoch_mse1
if epoch%50==0:
print('epoch', epoch,
'lr', '{:.7f}'.format(optimizer.param_groups[0]['lr']),
'mse0', '{:.5f}'.format(epoch_mse0),
'mse1', '{:.5f}'.format(epoch_mse1),
'loss', '{:.5f}'.format(epoch_loss))
if epoch_loss < best_loss:
best_loss = epoch_loss
state = copy.deepcopy(net.state_dict())
#state = copy.deepcopy(net.state_dict())
print('Best score:', best_loss)
return state, lossTotal, lossRegular, lossDerivatives
def getDerivatives(x):
x1 = x.requires_grad_(True)
output = net.eval()(x1)
nn = output.shape[0]
gradx = np.zeros((nn,2))
for ii in range(output.shape[0]):
y_def =output[ii].backward(retain_graph=True)
gradx[ii,:] = x1.grad[ii]
return gradx
def plotLoss(lossTotal, lossRegular, lossDerivatives):
fig, ax = plt.subplots(1, 1, dpi=120)
plt.semilogy(lossTotal / lossTotal[0], label='Total loss')
plt.semilogy(lossRegular[:, 0] / lossRegular[0], label='Regular loss')
plt.semilogy(lossDerivatives[:, 0] / lossDerivatives[0], label='Derivatives loss')
ax.set_xlabel("epochs")
ax.set_ylabel("L/L0")
ax.legend()
fig.subplots_adjust(left=0.1, right=0.9, bottom=0.15, top=0.9, wspace=0.3, hspace=0.2)
plt.savefig("figures/Loss.png")
plt.show()
def plotPredictions(prediction,gradx, f,dfx,dfy, extent):
# Initialize plots
fig, ax = plt.subplots(2, 3, figsize=(14, 10))
ax[0, 0].imshow(f, extent=extent)
ax[0, 0].set_title('True values')
psm_f = ax[0, 0].pcolormesh(f, cmap=cm.jet, vmin=np.amin(f.detach().numpy()), vmax=np.amax(f.detach().numpy()))
fig.colorbar(psm_f, ax=ax[0, 0])
ax[0, 0].set_aspect('auto')
ax[0, 1].imshow(dfx, extent=extent, cmap=cm.jet)
ax[0, 1].set_title('True x-derivatives')
psm_dfx = ax[0, 1].pcolormesh(dfx, cmap=cm.jet, vmin=np.amin(dfx.detach().numpy()), vmax=np.amax(dfx.detach().numpy()))
fig.colorbar(psm_dfx, ax=ax[0, 1])
ax[0, 1].set_aspect('auto')
ax[0, 2].imshow(dfy, extent=extent, cmap=cm.jet)
ax[0, 2].set_title('True y-derivatives')
psm_dfy = ax[0, 2].pcolormesh(dfy, cmap=cm.jet, vmin=np.amin(dfy.detach().numpy()), vmax=np.amax(dfy.detach().numpy()))
fig.colorbar(psm_dfy, ax=ax[0, 2])
ax[0, 2].set_aspect('auto')
ax[1, 0].imshow(prediction[:, 0].detach().numpy().reshape(nx_test, ny_test), extent=extent, cmap=cm.jet)
ax[1, 0].set_title('Predicted values')
fig.colorbar(psm_f, ax=ax[1, 0])
ax[1, 0].set_aspect('auto')
ax[1, 1].imshow(gradx[:, 0].reshape(nx_test, ny_test), extent=extent, cmap=cm.jet)
ax[1, 1].set_title('Predicted x-derivatives')
fig.colorbar(psm_dfx, ax=ax[1, 1])
ax[1, 1].set_aspect('auto')
ax[1, 2].imshow(gradx[:, 1].reshape(nx_test, ny_test), extent=extent, cmap=cm.jet)
ax[1, 2].set_title('Predicted y-derivatives')
fig.colorbar(psm_dfy, ax=ax[1, 2])
ax[1, 2].set_aspect('auto')
plt.savefig("figures/PredictionOverTestPoints.png")
plt.show()
if __name__ == "__main__":
nx_train = 10
ny_train = 10
xv, yv = torch.meshgrid([torch.linspace(0, 1, nx_train), torch.linspace(0, 1, ny_train)])
train_x = torch.cat((
xv.contiguous().view(xv.numel(), 1),
yv.contiguous().view(yv.numel(), 1)),
dim=1
)
f, dfx, dfy = franke(train_x[:, 0], train_x[:, 1])
train_y = torch.stack([f, dfx, dfy], -1).squeeze(1)
x, y = Variable(train_x), Variable(train_y)
net = Net(inp=2, out=1, activation=nn.Tanh(), num_hidden_units=256, num_layers=2)
optimizer = torch.optim.Adam(net.parameters(), lr=3e-4, weight_decay=1e-6)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1000, gamma=0.5)
loss_func = torch.nn.MSELoss() # this is for regression mean squared loss
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
BATCH_SIZE = 100
EPOCH = 10000
num_data = train_x.shape[0]
torch_dataset = Data.TensorDataset(x, y)
loader = Data.DataLoader(
dataset=torch_dataset,
batch_size=BATCH_SIZE,
shuffle=False, num_workers=2, )
# Define derivative loss component to total loss
lam1 = .5
state, lossTotal, lossRegular, lossDerivatives = train(lam1, loader, EPOCH, BATCH_SIZE)
net.load_state_dict(state)
# Test points
nx_test, ny_test = 40, 40
xv, yv = torch.meshgrid([torch.linspace(0, 1, nx_test), torch.linspace(0, 1, ny_test)])
f, dfx, dfy = franke(xv, yv)
test_x = torch.stack([xv.reshape(nx_test * ny_test, 1), yv.reshape(nx_test * ny_test, 1)], -1).squeeze(1)
gradx = getDerivatives(test_x)
plotLoss(lossTotal, lossRegular, lossDerivatives)
prediction = net(test_x)
extent = (xv.min(), xv.max(), yv.max(), yv.min())
plotPredictions(prediction, gradx, f, dfx, dfy, extent)
|
from struct import unpack
from LnkParse3.text_processor import TextProcessor
"""
An ItemID is an element in an IDList structure (section 2.2.1). The data stored
in a given ItemID is defined by the source that corresponds to the location in
the target namespace of the preceding ItemIDs. This data uniquely identifies
the items in that part of the namespace.
------------------------------------------------------------------
| 0-7b | 8-15b | 16-23b | 24-31b |
------------------------------------------------------------------
| ItemIDSize | Data (variable) |
------------------------------------------------------------------
| ... |
------------------------------------------------------------------
"""
class LnkTargetBase:
SHELL_ITEM_SHEL_FS_FOLDER = {
0x01: "Is directory",
0x02: "Is file",
0x04: "Has Unicode strings",
0x08: "Unknown",
0x80: "Has CLSID",
}
SIZE_OF_TARGET_SIZE = 2
def __init__(self, indata=None, cp=None):
self._target = {}
self.cp = cp
self._raw = indata
self.text_processor = TextProcessor(cp=self.cp)
start = self.SIZE_OF_TARGET_SIZE
end = start + self.size()
self._raw_target = self._raw[start:end]
def as_item(self):
return {
"class": self.name,
}
def size(self):
"""ItemIDSize (2 bytes):
A 16-bit, unsigned integer that specifies the size, in bytes, of the
ItemID structure, including the ItemIDSize field.
"""
start, end = 0, 2
size = unpack("<H", self._raw[start:end])[0]
return size
def class_type_indicator(self):
start, end = 0, 1
flags = unpack("<B", self._raw_target[start:end])[0]
return flags
def has_unicode_strings(self):
inv = {v: k for k, v in self.SHELL_ITEM_SHEL_FS_FOLDER.items()}
mask = inv["Has Unicode strings"]
return bool(self.class_type_indicator() & mask)
|
import sys
from pathlib import Path
from tools import human_readable, progress_bar, get_func_exec_time
from datetime import datetime
class FileCrawler:
_directories: int = 0
_files: int = 0
_hidden_files: int = 0
_total_size: int = 0
_stats_link = "\x1b]8;;file://./crawls-stats.txt/\acrawls-stats.txt\x1b]8;;\a"
_path = Path(sys.argv[1])
@classmethod
def _save_path_info(cls) -> None:
lines = [
f'\n----- In your {cls._path} path, you have:\n',
f'* {human_readable(cls._directories)} directories\n',
f'* {human_readable(cls._files)} files\n',
f'* {human_readable(cls._hidden_files)} hidden files\n',
f'Total size: {human_readable(cls._total_size, is_file_sys=True)}\n',
f'Information generated on {datetime.today()}\n'
]
with open('crawls-stats.txt', 'a+', encoding='utf8') as file:
file.writelines(lines)
@classmethod
def _update_hidden_files(cls, path: Path) -> None:
if path.is_file() and path.name.startswith('.'):
cls._hidden_files += 1
@classmethod
def _update_files(cls, path: Path) -> None:
if path.is_file():
cls._files += 1
@classmethod
def _update_directories(cls, path: Path) -> None:
if path.is_dir():
cls._directories += 1
@classmethod
def _update_path_info(cls, path: Path) -> None:
if path.exists():
cls._total_size += path.stat().st_size
cls._update_directories(path)
cls._update_files(path)
cls._update_hidden_files(path)
@classmethod
@get_func_exec_time
def fetch_info(cls) -> None:
if not cls._path.exists() or not cls._path.is_absolute() or not cls._path.is_dir():
return print('The path must exist, must be absolute and must be a directory.')
print('\nLoading files...\n')
try:
for path in progress_bar(list(cls._path.glob('**/*'))):
cls._update_path_info(path)
except (PermissionError, OSError):
print("\n\n🤕️ Due to system errors, the process was interrupted.")
cls._save_path_info()
print('\n🤗️ Ctrl+click on ->', cls._stats_link, 'to display info.', '\n')
if __name__ == '__main__':
try:
FileCrawler.fetch_info()
except IndexError:
print('[command-error]: $ python file_crawler.py your_path')
|
from understat.constants import BASE_URL, LEAGUE_URL, PLAYER_URL, TEAM_URL
from understat.utils import (filter_by_positions, filter_data, get_data,
to_league_name)
class Understat():
def __init__(self, session):
self.session = session
async def get_stats(self, options=None, **kwargs):
"""Returns a list containing stats of every league, grouped by month.
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: List of dictionaries.
:rtype: list
"""
stats = await get_data(self.session, BASE_URL, "statData")
if options:
kwargs = options
filtered_data = filter_data(stats, kwargs)
return filtered_data
async def get_teams(self, league_name, season, options=None, **kwargs):
"""Returns a list containing information about all the teams in
the given league in the given season.
:param league_name: The league's name.
:type league_name: str
:param season: The season.
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:type season: str or int
:return: A list of the league's table as seen on Understat's
league overview.
:rtype: list
"""
url = LEAGUE_URL.format(to_league_name(league_name), season)
teams_data = await get_data(self.session, url, "teamsData")
if options:
kwargs = options
filtered_data = filter_data(list(teams_data.values()), kwargs)
return filtered_data
async def get_league_players(
self, league_name, season, options=None, **kwargs):
"""Returns a list containing information about all the players in
the given league in the given season.
:param league_name: The league's name.
:type league_name: str
:param season: The season.
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:type season: str or int
:return: A list of the players as seen on Understat's league overview.
:rtype: list
"""
url = LEAGUE_URL.format(to_league_name(league_name), season)
players_data = await get_data(self.session, url, "playersData")
if options:
kwargs = options
filtered_data = filter_data(players_data, kwargs)
return filtered_data
async def get_league_results(
self, league_name, season, options=None, **kwargs):
"""Returns a list containing information about all the results
(matches) played by the teams in the given league in the given season.
:param league_name: The league's name.
:type league_name: str
:param season: The season.
:type season: str or int
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: A list of the results as seen on Understat's league overview.
:rtype: list
"""
url = LEAGUE_URL.format(to_league_name(league_name), season)
dates_data = await get_data(self.session, url, "datesData")
results = [r for r in dates_data if r["isResult"]]
if options:
kwargs = options
filtered_data = filter_data(results, kwargs)
return filtered_data
async def get_league_fixtures(
self, league_name, season, options=None, **kwargs):
"""Returns a list containing information about all the upcoming
fixtures of the given league in the given season.
:param league_name: The league's name.
:type league_name: str
:param season: The season.
:type season: str or int
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: A list of the fixtures as seen on Understat's league overview.
:rtype: list
"""
url = LEAGUE_URL.format(to_league_name(league_name), season)
dates_data = await get_data(self.session, url, "datesData")
fixtures = [f for f in dates_data if not f["isResult"]]
if options:
kwargs = options
filtered_data = filter_data(fixtures, kwargs)
return filtered_data
async def get_player_shots(self, player_id, options=None, **kwargs):
"""Returns the player with the given ID's shot data.
:param player_id: The player's Understat ID.
:type player_id: int or str
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: List of the player's shot data.
:rtype: list
"""
url = PLAYER_URL.format(player_id)
shots_data = await get_data(self.session, url, "shotsData")
if options:
kwargs = options
filtered_data = filter_data(shots_data, kwargs)
return filtered_data
async def get_player_matches(self, player_id, options=None, **kwargs):
"""Returns the player with the given ID's matches data.
:param player_id: The player's Understat ID.
:type player_id: int or str
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: List of the player's matches data.
:rtype: list
"""
url = PLAYER_URL.format(player_id)
matches_data = await get_data(self.session, url, "matchesData")
if options:
kwargs = options
filtered_data = filter_data(matches_data, kwargs)
return filtered_data
async def get_player_stats(self, player_id, positions=None):
"""Returns the player with the given ID's min / max stats, per
position(s).
:param player_id: The player's Understat ID.
:type player_id: int or str
:param positions: Positions to filter the data by, defaults to None.
:param positions: list, optional
:return: List of the player's stats per position.
:rtype: list
"""
url = PLAYER_URL.format(player_id)
player_stats = await get_data(self.session, url, "minMaxPlayerStats")
player_stats = filter_by_positions(player_stats, positions)
return player_stats
async def get_player_grouped_stats(self, player_id):
"""Returns the player with the given ID's grouped stats (as seen at
the top of a player's page).
:param player_id: The player's Understat ID.
:type player_id: int or str
:return: Dictionary of the player's grouped stats.
:rtype: dict
"""
url = PLAYER_URL.format(player_id)
player_stats = await get_data(self.session, url, "groupsData")
return player_stats
async def get_team_stats(self, team_name, season):
"""Returns a team's stats, as seen on their page on Understat, in the
given season.
:param team_name: A team's name, e.g. Manchester United.
:type team_name: str
:param season: A season / year, e.g. 2018.
:type season: int or str
:return: A dictionary containing a team's stats.
:rtype: dict
"""
url = TEAM_URL.format(team_name.replace(" ", "_"), season)
team_stats = await get_data(self.session, url, "statisticsData")
return team_stats
async def get_team_results(
self, team_name, season, options=None, **kwargs):
"""Returns a team's results in the given season.
:param team_name: A team's name.
:type team_name: str
:param season: The season.
:type season: int or str
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: List of the team's results in the given season.
:rtype: list
"""
url = TEAM_URL.format(team_name.replace(" ", "_"), season)
dates_data = await get_data(self.session, url, "datesData")
results = [r for r in dates_data if r["isResult"]]
if options:
kwargs = options
filtered_data = filter_data(results, kwargs)
return filtered_data
async def get_team_fixtures(
self, team_name, season, options=None, **kwargs):
"""Returns a team's upcoming fixtures in the given season.
:param team_name: A team's name.
:type team_name: str
:param season: The season.
:type season: int or str
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: List of the team's upcoming fixtures in the given season.
:rtype: list
"""
url = TEAM_URL.format(team_name.replace(" ", "_"), season)
dates_data = await get_data(self.session, url, "datesData")
fixtures = [f for f in dates_data if not f["isResult"]]
if options:
kwargs = options
filtered_data = filter_data(fixtures, kwargs)
return filtered_data
async def get_team_players(
self, team_name, season, options=None, **kwargs):
"""Returns a team's player statistics in the given season.
:param team_name: A team's name.
:type team_name: str
:param season: The season.
:type season: int or str
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: List of the team's players' statistics in the given season.
:rtype: list
"""
url = TEAM_URL.format(team_name.replace(" ", "_"), season)
players_data = await get_data(self.session, url, "playersData")
if options:
kwargs = options
filtered_data = filter_data(players_data, kwargs)
return filtered_data
|
from django.urls import path
from .views import SharedImageListView,FriendsListView
urlpatterns = [
path('get-shared-img/', SharedImageListView.as_view(), name="get-share"),
path('get-friends/', FriendsListView.as_view(), name="get-friends"),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from smorest_sfs.utils.text import camel_to_snake
@pytest.mark.parametrize(
"camel, snake",
[
("AdminAvator", "admin_avator"),
("getHTTPResponseCode", "get_http_response_code"),
("HTTPResponseCodeXYZ", "http_response_code_xyz"),
],
)
def test_check_ext_success(camel: str, snake: str) -> None:
assert camel_to_snake(camel) == snake
|
from Views import stok_View as stokView
from Controllers import stok_Controller as stokController
stokView(stokController())
|
from selenium import webdriver
import os
result=os.system("ls -l ")
print(result)
driver=webdriver.Firefox()
driver.get("http:/www.126.com")
driver.implicitly_wait(10)
driver.find_element_by_id("lbNormal").click()
try:
driver.find_element_by_css_selector(".j-inputtext dlemail").send_keys("jslzsy")
driver.find_element_by_name("password").send_keys("jiangsulzsy")
driver.find_element_by_id("dologin").click()
except BaseException as msg:
print(msg)
driver.quit()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import sys
import torch
import numpy as np
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
parser = argparse.ArgumentParser(description='PyTorch GMVAE')
# TRAINING PARAMS
parser.add_argument('--epochs', type=int, default=1500, metavar='',
help='Amount of epochs for training (default: 100)')
parser.add_argument('--batch_size', type=int, default=100, metavar='',
help='Batch size for SGD (default: 100)')
parser.add_argument('--lrate', type=float, default=0.01, metavar="",
help="Learning rate (default: 0.01")
parser.add_argument('--use_cuda', action='store_true', dest='use_cuda',
help="Shall cuda be used (default: False)")
argv = parser.parse_args()
sys.argv = [sys.argv[0]]
from ummon import *
import ummon.utils as uu
from ummon.unsupervised import UnsupervisedTrainer
from ummon.logger import Logger
from ummon.trainingstate import Trainingstate
from gmvae import GMVAE
from gmvae import NegVariationalLowerBound
torch.manual_seed(4)
if __name__ == '__main__':
def binarize(x, double = False):
'''
Binarize Image
'''
m = torch.distributions.Uniform(0, 1)
xb = m.sample(x.size())
bin_image = (x > xb).float() * 1
if double == True:
return bin_image.double()
else:
return bin_image
# MNIST
transform = transforms.Compose([transforms.ToTensor(), binarize])
mnist_data = MNIST("/ext/data/mnist", train=True, transform=transform, target_transform=None, download=True)
mnist_data_test = MNIST("/ext/data/mnist", train=False, transform=transform, target_transform=None, download=True)
trainloader = DataLoader(mnist_data, batch_size=argv.batch_size, shuffle=True, sampler=None, batch_sampler=None, num_workers=2)
# MODEL
gmvae = GMVAE()
# INTANTIATE LOSS
criterion = NegVariationalLowerBound(gmvae, size_average=False)
# INSTANTIATE OPTIMIZER
optimizer = torch.optim.Adam(gmvae.parameters())
# LOAD TRAINING STATE
try:
ts = Trainingstate("MNIST_GMVAE.pth.tar")
except FileNotFoundError:
ts = Trainingstate()
with Logger(loglevel=10, logdir='.', log_batch_interval=20) as lg:
# EARLY STOPPING
earlystop = StepLR_earlystop(optimizer, ts, gmvae, step_size=100, nsteps=5, patience=20, logger=lg)
# CREATE A TRAINER
my_trainer = UnsupervisedTrainer(lg,
gmvae,
criterion,
optimizer,
scheduler = earlystop,
trainingstate = ts,
model_filename="MNIST_GMVAE",
use_cuda= argv.use_cuda,
after_eval_hook = criterion.compute_special_losses)
# START TRAINING
trainingsstate = my_trainer.fit(dataloader_training=trainloader,
epochs=argv.epochs,
validation_set=mnist_data_test)
# def register_nan_checks_(model):
# def check_grad(module, input, output):
# if not hasattr(module, "weight"):
# return
# if module.weight is None or module.weight.grad is None:
# return
# # if (module.weight.grad.abs() == 0).any():
# # print('Gradient in ' + type(module).__name__)
# # print(module.weight.grad)
# # print(module.extra_repr)
# #if (module.weight.grad.abs() > 1.).any():
# # print('Gradient in ' + type(module).__name__)
# # print(module.weight.grad)
# # print(module.extra_repr)
# if (module.weight.grad != module.weight.grad).any():
# print('NaN Gradients in ' + type(module).__name__)
# print(module.weight.grad)
# print(module.extra_repr)
# if module.weight.grad.abs().max() > 10000.:
# print('Exploding Gradients in ' + type(module).__name__)
# print(module.weight.grad)
# print(module.extra_repr)
# handles = []
# for module in model.modules():
# handles.append(module.register_forward_hook(check_grad))
# return handles
#
# register_nan_checks_(gmvae)
|
__author__ = 'stephen'
import os,sys
import numpy as np
HK_DataMiner_Path = os.path.relpath(os.pardir)
#HK_DataMiner_Path = os.path.abspath("/home/stephen/Dropbox/projects/work-2015.5/HK_DataMiner/")
sys.path.append(HK_DataMiner_Path)
from utils import plot_cluster, plot_each_cluster, XTCReader, plot_landscape
import argparse
cli = argparse.ArgumentParser()
cli.add_argument('-t', '--trajListFns', default = 'trajlist',
help='List of trajectory files to read in, separated by spaces.')
cli.add_argument('-a', '--atomListFns', default='atom_indices',
help='List of atom index files to read in, separated by spaces.')
cli.add_argument('-g', '--topology', default='native.pdb', help='topology file.')
cli.add_argument('-o', '--homedir', help='Home dir.', default=".", type=str)
cli.add_argument('-e', '--iext', help='''The file extension of input trajectory
files. Must be a filetype that mdtraj.load() can recognize.''',
default="xtc", type=str)
cli.add_argument('-c', '--assignments', type=str)
cli.add_argument('-s', '--centers', type=str)
cli.add_argument('-l', '--traj_len', type=str, default='traj_len.txt')
args = cli.parse_args()
trajlistname = args.trajListFns
atom_indicesname = args.atomListFns
trajext = args.iext
File_TOP = args.topology
homedir = args.homedir
# ===========================================================================
# Reading phi angles and psi angles data from XTC files
if os.path.isfile("./phi_angles.txt") and os.path.isfile("./psi_angles.txt") is True:
phi_angles = np.loadtxt("./phi_angles.txt", dtype=np.float32)
psi_angles = np.loadtxt("./psi_angles.txt", dtype=np.float32)
else:
trajreader = XTCReader(trajlistname, atom_indicesname, homedir, trajext, File_TOP)
trajs = trajreader.trajs
traj_len = trajreader.traj_len
np.savetxt("./traj_len.txt", traj_len, fmt="%d")
phi_angles, psi_angles = trajreader.get_phipsi(trajs, psi=[6, 8, 14, 16], phi=[4, 6, 8, 14])
np.savetxt("./phi_angles.txt", phi_angles, fmt="%f")
np.savetxt("./psi_angles.txt", psi_angles, fmt="%f")
# ===========================================================================
# Reading split assignments and the length of each traj
assignments_dir = args.assignments
labels = np.loadtxt(assignments_dir, dtype=np.int32)
#centers_dir = args.centers
#centers = np.loadtxt(centers_dir, dtype=np.int32)
#plot_cluster(labels=labels, phi_angles=phi_angles, psi_angles=psi_angles, name=dir+'Dihedrals'+assignments_dir)
#phi_ctr = phi_angles[centers]
#psi_ctr = psi_angles[centers]
#labels_ctr = labels[centers]
#plot_cluster(labels=labels_lf, phi_angles=phi_new, psi_angles=psi_new, name=clustering_name)
name = assignments_dir[:-4] + '_Energy_Landscape'
#print "name:", name
#plot_cluster_size_distribution(populations=populations, name=name)
plot_landscape(labels=None, phi_angles=phi_angles, psi_angles=psi_angles, phi_ctr=None, psi_ctr=None, name=name)
#plot_landscape(labels=None, phi_angles=phi_angles, psi_angles=psi_angles)
|
from selenium.webdriver.remote.webelement import WebElement
import conftest
from locators.course_page_locators import CoursePageLocators
from locators.course_page_locators import ManageCoursePageLocators
from locators.login_page_locators import LoginPageLocators
from models.course import CourseData
from pages.base_page import BasePage
class CoursePage(BasePage):
def sidebar_button(self) -> WebElement:
return self.find_element(LoginPageLocators.SIDEBAR_BUTTON)
def is_sidebar_open(self) -> bool:
element = self.find_element(LoginPageLocators.SIDEBAR_MENU)
is_open = self.element_is_visible(element)
if is_open:
return True
return False
def administration_button(self) -> WebElement:
return self.get_clickable_element(LoginPageLocators.ADMINISTRATION_BUTTON)
def courses_button(self) -> WebElement:
return self.find_element(LoginPageLocators.COURSES_HEADER)
def manage_courses_button(self) -> WebElement:
return self.find_element(LoginPageLocators.MANAGE_COURSES_BUTTON)
def create_course_button(self) -> WebElement:
return self.find_element(ManageCoursePageLocators.CREATE_COURSE_BUTTON)
def is_course_exist(self, data: CourseData) -> bool:
elements = self.find_elements(ManageCoursePageLocators.COURSES_LIST_DATA)
courses_list = [c.text for c in elements]
if data.full_course_name in courses_list:
return True
return False
def go_to_design_courses_frame(self) -> None:
if not self.is_sidebar_open():
self.click_element(self.sidebar_button())
self.click_element(self.administration_button())
self.click_element(self.courses_button())
self.click_element(self.manage_courses_button())
self.click_element(self.create_course_button())
def full_course_name_input(self) -> WebElement:
return self.find_element(CoursePageLocators.FULL_COURSE_NAME)
def short_course_name_input(self) -> WebElement:
return self.find_element(CoursePageLocators.SHORT_COURSE_NAME)
def course_end_day_select(self) -> WebElement:
return self.find_element(CoursePageLocators.END_DAY)
def course_end_month_select(self) -> WebElement:
return self.find_element(CoursePageLocators.END_MONTH)
def course_end_year_select(self) -> WebElement:
return self.find_element(CoursePageLocators.END_YEAR)
def course_end_hour_select(self) -> WebElement:
return self.find_element(CoursePageLocators.END_HOUR)
def course_end_minute_select(self) -> WebElement:
return self.find_element(CoursePageLocators.END_MINUTE)
def course_id_input(self) -> WebElement:
return self.find_element(CoursePageLocators.COURSE_ID)
def course_description_input(self) -> WebElement:
return self.find_element(CoursePageLocators.COURSE_DESCRIPTION)
def course_format_button(self) -> WebElement:
return self.find_element(CoursePageLocators.COURSE_FORMAT)
def course_section_number_select(self) -> WebElement:
return self.find_element(CoursePageLocators.SECTION_NUMBER)
def course_appearance_button(self) -> WebElement:
return self.find_element(CoursePageLocators.COURSE_APPEARANCE)
def course_language_select(self) -> WebElement:
return self.find_element(CoursePageLocators.COURSE_LANGUAGE)
def grade_display_select(self) -> WebElement:
return self.find_element(CoursePageLocators.GRADE_DISPLAY_OPTION)
def files_and_downloads_button(self) -> WebElement:
return self.find_element(CoursePageLocators.FILES_AND_DOWNLOADS)
def max_file_size_select(self) -> WebElement:
return self.find_element(CoursePageLocators.MAX_FILE_SIZE)
def role_rename_button(self) -> WebElement:
return self.find_element(CoursePageLocators.ROLE_RENAME_MENU)
def course_creator_name_input(self) -> WebElement:
return self.find_element(CoursePageLocators.COURSE_CREATOR_NAME)
def course_teacher_name_input(self) -> WebElement:
return self.find_element(CoursePageLocators.COURSE_TEACHER_NAME)
def course_student_name_input(self) -> WebElement:
return self.find_element(CoursePageLocators.COURSE_STUDENT_NAME)
def save_return_button(self) -> WebElement:
return self.find_element(CoursePageLocators.SAVE_AND_RETURN_BUTTON)
def chooose_created_course_button(self, course_name):
return self.find_element(
ManageCoursePageLocators.get_course_locator(course_name)
)
def delete_course_button(self) -> WebElement:
return self.find_element(ManageCoursePageLocators.DELETE_COURSE_BUTTON)
def confirm_delete_button(self) -> WebElement:
return self.find_element(ManageCoursePageLocators.CONFIRM_DELETE_BUTTON)
def course_delete_confirmation(self) -> WebElement:
return self.find_element(ManageCoursePageLocators.COURSE_DELETE_CONFIRMATION)
def continue_button(self) -> WebElement:
return self.find_element(ManageCoursePageLocators.CONTINUE_BUTTON)
def full_name_error_message(self) -> WebElement:
return self.find_element(CoursePageLocators.EMPTY_FULL_NAME_ERROR)
def short_name_error_message(self) -> WebElement:
return self.find_element(CoursePageLocators.EMPTY_SHORT_NAME_ERROR)
def create_course(self, data: CourseData) -> None:
self.go_to_design_courses_frame()
self.fill_element(self.full_course_name_input(), data.full_course_name)
self.fill_element(self.short_course_name_input(), data.short_course_name)
self.select_element(self.course_end_day_select(), value=data.end_day)
self.select_element(self.course_end_month_select(), value=data.end_month)
self.select_element(self.course_end_year_select(), value=data.end_year)
self.select_element(self.course_end_hour_select(), value=data.end_hour)
self.select_element(self.course_end_minute_select(), value=data.end_minute)
self.fill_element(self.course_id_input(), data.course_id)
self.fill_element(self.course_description_input(), data.course_description)
self.click_element(self.course_format_button())
self.select_element(
self.course_section_number_select(), value=data.section_number
)
self.click_element(self.course_appearance_button())
self.select_element(self.course_language_select(), value=data.course_language)
self.select_element(self.grade_display_select(), text=data.grade_display_option)
self.click_element(self.files_and_downloads_button())
self.select_element(self.max_file_size_select(), value=data.max_file_size)
self.click_element(self.role_rename_button())
self.fill_element(self.course_creator_name_input(), data.creator_name)
self.fill_element(self.course_teacher_name_input(), data.teacher_name)
self.fill_element(self.course_student_name_input(), data.student_name)
self.click_element(self.save_return_button())
conftest.logger.info(f"Создан курс '{data.full_course_name}'")
def remove_course(self, data: CourseData) -> None:
conftest.logger.info(f"Курс '{data.full_course_name}' удалён!")
self.click_element(self.chooose_created_course_button(data.full_course_name))
self.click_element(self.delete_course_button())
self.click_element(self.confirm_delete_button())
self.continue_button()
def all_required_fields_filled(self) -> bool:
if self.full_name_error_message() or self.short_name_error_message():
return False
return True
|
# imports
import argparse
import numpy as np
from scipy.special import gammainc
########################################################################################################################
# Game value as a function of m,n,k
########################################################################################################################
def game_value_mnk(m, n, d, k):
""""""
if n > m:
log_val = np.log((n * (m + k)) / (m * (n + k)))
denominator = 2 * k * (n - m)
x1 = (n * d * (m + k) * log_val) / denominator
x2 = (m * d * (n + k) * log_val) / denominator
v = 0.5 + 0.5 * (gammainc(d/2, x1) - gammainc(d/2, x2))
else:
v = 0.5
return v
def game_value_as_func_of_n(m, n_max, d, k):
""""""
v = np.zeros((n_max,))
n_array = np.arange(1, n_max + 1)
for n in n_array:
v[n - 1] = game_value_mnk(m, n, d, k)
return n_array, v
########################################################################################################################
# Game value as a function of delta, rho
########################################################################################################################
def game_value_rho_delta(d, rho, delta):
""""""
if delta < 1:
log_val = np.log((1. + rho) / (delta + rho))
denominator = 2 * (1 - delta)
x1 = d * (1 + rho) * log_val / denominator
x2 = d * (delta + rho) * log_val / denominator
v = 0.5 + 0.5 * (gammainc(d/2, x1) - gammainc(d/2, x2))
else:
v = 0.5
return v
def ml_attacker_game_value_rho_delta(d, rho, delta):
""""""
log_val = np.log((1. + rho + delta) / (delta + rho))
denominator = 2.
x1 = d * (1 + rho + delta) * log_val / denominator
x2 = d * (delta + rho) * log_val / denominator
v = 0.5 + 0.5 * (gammainc(d/2, x1) - gammainc(d/2, x2))
return v
def game_value_diff_ml_vs_opt_rho_delta(d, rho, delta):
return ml_attacker_game_value_rho_delta(d, rho, delta) - game_value_rho_delta(d, rho, delta)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-m', type=int, default=1, help='m: the number of leaked observations')
parser.add_argument('-n', type=int, default=5, help='n: the number of test observations')
parser.add_argument('-k', type=int, default=10, help='k: the number of registration observations')
parser.add_argument('-d', type=int, default=100, help='d: the dimension of observations')
return parser.parse_args()
########################################################################################################################
# Unit Test
########################################################################################################################
if __name__ == '__main__':
args = get_args()
v = game_value_mnk(m=args.m, n=args.n, k=args.k, d=args.d)
print(v) |
# django imports
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.mail import EmailMultiAlternatives
from django.template import RequestContext
from django.template.base import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
def send_order_sent_mail(order):
try:
_send_order_sent_mail.delay(order)
except AttributeError:
_send_order_sent_mail(order)
def _send_order_sent_mail(order):
"""Sends an order has been sent mail to the shop customer
"""
import lfs.core.utils
shop = lfs.core.utils.get_default_shop()
try:
subject = render_to_string("lfs/mail/order_sent_subject.txt", {"order": order})
except TemplateDoesNotExist:
subject = _(u"Your order has been sent")
from_email = shop.from_email
to = [order.customer_email]
bcc = shop.get_notification_emails()
# text
text = render_to_string("lfs/mail/order_sent_mail.txt", {"order": order})
mail = EmailMultiAlternatives(
subject=subject, body=text, from_email=from_email, to=to, bcc=bcc)
# html
html = render_to_string("lfs/mail/order_sent_mail.html", {
"order": order
})
mail.attach_alternative(html, "text/html")
mail.send(fail_silently=True)
def send_order_paid_mail(order):
try:
_send_order_paid_mail.delay(order)
except AttributeError:
_send_order_paid_mail(order)
def _send_order_paid_mail(order):
"""Sends an order has been paid mail to the shop customer.
"""
import lfs.core.utils
shop = lfs.core.utils.get_default_shop()
try:
subject = render_to_string("lfs/mail/order_paid_subject.txt", {"order": order})
except TemplateDoesNotExist:
subject = _(u"Your order has been paid")
from_email = shop.from_email
to = [order.customer_email]
bcc = shop.get_notification_emails()
# text
text = render_to_string("lfs/mail/order_paid_mail.txt", {"order": order})
mail = EmailMultiAlternatives(
subject=subject, body=text, from_email=from_email, to=to, bcc=bcc)
# html
html = render_to_string("lfs/mail/order_paid_mail.html", {
"order": order
})
mail.attach_alternative(html, "text/html")
mail.send(fail_silently=True)
def send_order_received_mail(request, order):
try:
_send_order_received_mail.delay(request, order)
except AttributeError:
_send_order_received_mail(request, order)
def _send_order_received_mail(request, order):
"""Sends an order received mail to the shop customer.
Customer information is taken from the provided order.
"""
import lfs.core.utils
shop = lfs.core.utils.get_default_shop()
try:
subject = render_to_string("lfs/mail/order_received_subject.txt", {"order": order})
except TemplateDoesNotExist:
subject = _(u"Your order has been received")
from_email = shop.from_email
to = [order.customer_email]
bcc = shop.get_notification_emails()
# text
text = render_to_string("lfs/mail/order_received_mail.txt", RequestContext(request, {"order": order}))
mail = EmailMultiAlternatives(
subject=subject, body=text, from_email=from_email, to=to, bcc=bcc)
# html
html = render_to_string("lfs/mail/order_received_mail.html", RequestContext(request, {
"order": order
}))
mail.attach_alternative(html, "text/html")
mail.send(fail_silently=True)
def send_customer_added(user):
try:
_send_customer_added.delay(user)
except AttributeError:
_send_customer_added(user)
def _send_customer_added(user):
"""Sends a mail to a newly registered user.
"""
import lfs.core.utils
shop = lfs.core.utils.get_default_shop()
from_email = shop.from_email
to = [user.email]
bcc = shop.get_notification_emails()
# text
text = render_to_string("lfs/mail/new_user_mail.txt", {
"user": user, "shop": shop})
# subject
subject = render_to_string("lfs/mail/new_user_mail_subject.txt", {
"user": user, "shop": shop})
mail = EmailMultiAlternatives(
subject=subject, body=text, from_email=from_email, to=to, bcc=bcc)
# html
html = render_to_string("lfs/mail/new_user_mail.html", {
"user": user, "shop": shop,
})
mail.attach_alternative(html, "text/html")
mail.send(fail_silently=True)
def send_review_added(review):
try:
_send_review_added.delay(review)
except AttributeError:
_send_review_added(review)
def _send_review_added(review):
"""Sends a mail to shop admins that a new review has been added
"""
import lfs.core.utils
shop = lfs.core.utils.get_default_shop()
subject = _(u"New review has been added")
from_email = shop.from_email
to = shop.get_notification_emails()
ctype = ContentType.objects.get_for_id(review.content_type_id)
product = ctype.get_object_for_this_type(pk=review.content_id)
# text
text = render_to_string("lfs/mail/review_added_mail.txt", {
"review": review,
"product": product,
})
mail = EmailMultiAlternatives(
subject=subject, body=text, from_email=from_email, to=to)
# html
html = render_to_string("lfs/mail/review_added_mail.html", {
"site": "http://%s" % Site.objects.get(id=settings.SITE_ID),
"review": review,
"product": product,
})
mail.attach_alternative(html, "text/html")
mail.send(fail_silently=True)
# celery
try:
from celery.task import task
except ImportError:
pass
else:
_send_customer_added = task(_send_customer_added)
_send_order_paid_mail = task(_send_order_paid_mail)
_send_order_received_mail = task(_send_order_received_mail)
_send_order_sent_mail = task(_send_order_sent_mail)
_send_review_added = task(_send_review_added)
|
#!/usr/bin/env python3
class Example:
"""Ez egy példa osztály"""
bela = 'alma'
def gizi(self):
print('I love you.')
jozsi = gizi
def __init__(self):
self.vali = 24
ex = Example()
ey = Example()
print('Kakukk')
ex.bruno = ['répa']
print(ex.bruno[0])
del(Example.gizi)
print(type(ex))
ex.bela = 'barack'
print(ex.bela)
print(ey.bela)
ex.jozsi()
#ex.gizi()
# AttributeError
message1 = 'Szép '
message2 = 'napot'
message3 = '!'
message = message1 + message2 + message3
print(message)
print('Teszt')
|
import requests
'''
launch simulation [POST]
curl -X POST -H "Content-Type: application/json" -d "@input_data.json" http://localhost:5000/launch_simulation
'''
response = requests.post('http://127.0.0.1:5000/launch_simulation', json={'key': 'value'})
print(response.headers)
print(response.json())
'''
as_vulnerability_ranking [GET]
curl -X GET -H "Content-Type: application/json" -d "@as_vuln_rank_data.json" http://localhost:5000/as_vulnerability_ranking
'''
'''
get all simulation events from db [GET]
curl -X GET -H "Content-Type: application/json" http://localhost:5000/simulation_events
'''
'''
get all info for a specific simulation [GET]
curl -X GET "http://localhost:5000/simulation_details?simulation_uuid=a5d3c351-bfd8-40be-ae25-e0c1912c5b7e"
'''
|
from flyingpigeon import eodata
from datetime import datetime as dt
from os import listdir
from os.path import join, basename
from flyingpigeon.utils import archiveextract
# DIR = '/home/nils/birdhouse/var/lib/pywps/cache/flyingpigeon/EO_data/PSScene4Band/analytic'
# # DIR = '/home/nils/data/planet/PSScene3Band/'
# tiles = [join(DIR, pic) for pic in listdir(DIR) if '.tif' in pic]
DIR = '/home/nils/birdhouse/var/lib/pywps/cache/flyingpigeon/EO_data/PSScene4Band/analytic'
# DIR = '/home/nils/data/planet/ndvi/'
# DIR = '/home/nils/birdhouse/var/lib/pywps/cache/flyingpigeon/EO_data/PSScene3Band/visual'
# DIR = '/home/nils/data/planet/PSScene3Band/'
tiles = [join(DIR, pic) for pic in listdir(DIR) if '.tif' in pic]
# tiles = archiveextract('/home/nils/data/planet/tmpQyYDEX.tar')
dates = set()
for tile in tiles:
dates = dates.union([eodata.get_timestamp(tile).date()])
dl = list(dates)
mosaics = []
for date in dl:
print "calculating date %s " % date
tiles_day = [tile for tile in tiles if eodata.get_timestamp(tile).date() == date]
# print(tiles_day)
archive = eodata.merge(tiles_day)
print archive
mosaics.extend([archive])
print mosaics
for mosaic in mosaics:
img = eodata.plot_truecolorcomposite(mosaic)
print img
#
# from flyingpigeon import gdal_merge as gm
# from os.path import join, basename
# import sys
#
# # merged_tiles = []
# # dates = set()
# # # dates = dates.union([basename(pic).split('_')[0] for pic in tiles])
# # dates = dates.union(get_timestamp(tile).date() for tile in tiles)
# #
# # for date in dates:
#
#
# LOGGER.debug('start merging')
# # prefix = dt.strftime(date, "%Y%m%d")
# _, filename = mkstemp(dir='.', prefix=prefix, suffix='.tif')
# call = ['-o', filename]
# #
# # tiles_day = [tile for tile in tiles if date.date() == get_timestamp(tile).date()]
#
# for tile in tiles:
# call.extend([tile])
# sys.argv[1:] = call
# gm.main()
|
# Import necessary libraries
import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from my_utils import getKmers, get_metrics, predict_sequence_class
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score, StratifiedKFold
import pickle
def app():
st.markdown("### Machine Learning for DNA Classification")
st.markdown("[Get Jupyter Notebook](https://raw.githubusercontent.com/CyrilleMesue/dna-seq-classifier/main/DNA%20Sequencing%20and%20applying%20Classifier.ipynb)")
st.markdown("""
In this notebook, we will apply a classification model that can predict a gene's function based on the DNA sequence of the coding sequence alone. The model can be used to deterimine if the gene product is any of the following classes of proteins.
| Protein Class | Name | Description
| ----------- | ----------- | ----------
| 0 | G protein coupled receptors | [Description](https://en.wikipedia.org/wiki/G_protein-coupled_receptor)
| 1 | Tyrosine kinase | [Description](https://en.wikipedia.org/wiki/Tyrosine_kinase)
| 2 | Tyrosine phosphatase | [Description](https://en.wikipedia.org/wiki/Protein_tyrosine_phosphatase)
| 3 | Synthetase | [Description](https://en.wikipedia.org/w/index.php?title=Synthetase&redirect=no)
| 4 | Synthase | [Description](https://en.wikipedia.org/wiki/Synthase)
| 5 | Ion channel | [Description](https://en.wikipedia.org/wiki/Ion_channel)
| 6 | Transcription factor | [Description](https://en.wikipedia.org/wiki/Transcription_factor#:~:text=In%20molecular%20biology%2C%20a%20transcription,to%20a%20specific%20DNA%20sequence.)
The dataset for this study was gotten from kaggle and can be accessed through the link below:
https://www.kaggle.com/nageshsingh/dna-sequence-dataset
And this code was adapted from : https://github.com/krishnaik06/DNA-Sequencing-Classifier
"""
)
st.markdown("### Upload Data")
st.markdown("You can get a sample dataset [here](https://raw.githubusercontent.com/CyrilleMesue/dna-seq-classifier/main/datasets/chimp_data.txt).")
input_file = st.file_uploader("Please upload a comma separated text file!!! First column should contain dna strings and second column, the numeric labels", type=["txt"])
if st.button("Upload file"):
# set the on_click
if input_file == None:
st.markdown("**Please upload a comma separated text file!!! First column should contain dna strings and second column, the numeric labels**")
else:
uploaded_data = pd.read_table(input_file)
st.markdown("""
***Data Uploaded. It might take about upto an hour for everything to display***
""")
st.dataframe(uploaded_data)
st.write("There are {} data samples\n".format(len(uploaded_data)))
st.markdown("""### Methodology
If all DNA molecules were of equal lengths, then perhaps the best solution would have to convert each DNA sequence into an image and train convolutional neural networks on such images. Every other machine learning appoach is not effective due to its requirements of fixed length elements. A solution to this is to split the DNA molecules into fixed length shorter DNA strings called k-mers. Therefore, each DNA molecule will have a particular number of different k-length sub DNA strings which encode a particular information. The k-mers of a given DNA string are then considered as words and the DNA itself as a sentence or paragraph, and a natural language processing algorithm is applied.
""")
st.markdown("### Import Utilites")
st.code("""
import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from my_utils import getKmers, get_metrics, predict_sequence_class
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score, StratifiedKFold
import pickle
""", language='python')
st.markdown("### Helper functions")
st.code("""
# function to convert sequence strings into k-mer words, default size = 6 (hexamer words)
def getKmers(sequence, size=6):
return [sequence[x:x+size].lower() for x in range(len(sequence) - size + 1)]
# This function computes the accuracy, precision, recall and f1-score of the model
def get_metrics(y_test, y_predicted):
accuracy = accuracy_score(y_test, y_predicted)
precision = precision_score(y_test, y_predicted, average='weighted')
recall = recall_score(y_test, y_predicted, average='weighted')
f1 = f1_score(y_test, y_predicted, average='weighted')
return accuracy, precision, recall, f1
# This function predicts the class of any raw DNA string
def predict_sequence_class(sequence, vectorizer_model, classifier_model, class_labels):
'''
inputs: DNA sequence, model and class labels (dictionary)
Function takes in a DNA sequence returns its class amongst:
[G protein coupled receptors', Tyrosine kinase', 'Tyrosine phosphatase', 'Synthetase', 'Synthase', 'Ion channel', Transcription factor'}
'''
seq_o_kmers = getKmers(sequence)
text_data = ' '.join(seq_o_kmers)
input_x = vectorizer_model.transform([text_data])
predicted_value = classifier_model.predict(input_x)[0]
predicted_label = class_labels[str(predicted_value)]
return predicted_label
""", language='python')
st.markdown("### Convert DNA strings into k-mers")
st.code("""
data_preprocessed = uploaded_data
data_preprocessed['words'] = data_preprocessed.apply(lambda x: getKmers(x['sequence']), axis=1)
data_preprocessed = data_preprocessed.drop('sequence', axis=1)
st.dataframe(data_preprocessed) """, language='python'
)
data_preprocessed = uploaded_data
data_preprocessed['words'] = data_preprocessed.apply(lambda x: getKmers(x['sequence']), axis=1)
data_preprocessed = data_preprocessed.drop('sequence', axis=1)
st.dataframe(data_preprocessed)
st.markdown("""
### Convert list of k_mers to a string of words
This is necessing for applying Natural Language Processing Algorithms. The labels also need to be extracted from the pandas table into an array. """)
st.code("""
# Convert to text
texts = list(data_preprocessed['words'])
for item in range(len(texts)):
texts[item] = ' '.join(texts[item])
# extract labels
y_data = data_preprocessed.iloc[:, 0].values
texts[0]
""", language='python'
)
# Convert to text
texts = list(data_preprocessed['words'])
for item in range(len(texts)):
texts[item] = ' '.join(texts[item])
# extract labels
y_data = data_preprocessed.iloc[:, 0].values
st.markdown(texts[0])
st.markdown("### Applying the BAG of WORDS using CountVectorizer using NLP")
st.code("""
# Creating the Bag of Words model using CountVectorizer()
# This is equivalent to k-mer counting
cv = CountVectorizer(ngram_range=(4,4))
# This will create an n by m compressed matrix where n is the number of samples and m is the number of unique k-mers over
# all the samples
X = cv.fit_transform(texts)
X.shape""", language='python'
)
# Creating the Bag of Words model using CountVectorizer()
# This is equivalent to k-mer counting
cv = CountVectorizer(ngram_range=(4,4))
# This will create an n by m compressed matrix where n is the number of samples and m is the number of unique k-mers over
# all the samples
X = cv.fit_transform(texts)
st.write(X.shape)
st.markdown("### Checking for Class Imbalance dataset.")
st.markdown("""```
data_preprocessed['class'].value_counts().sort_index().plot.bar()
""")
st.bar_chart(data=data_preprocessed['class'].value_counts().sort_index(), width=800, height=500, use_container_width=False)
st.markdown("### Data Splitting into Training and Testing")
st.markdown("By default we use 80 percent data for training and 20 percent for testing by setting the test_size to 0.2. You can change the test_size value and try different split ratios.")
# Splitting the human dataset into the training set and test set
st.code("""
X_train, X_test, y_train, y_test = train_test_split(X, y_data, test_size = 0.20, random_state=42)
print("Training set : {}".format(X_train.shape))
print("Test set : {}".format(X_test.shape))""", language='python'
)
X_train, X_test, y_train, y_test = train_test_split(X,y_data,test_size = 0.20,random_state=42)
st.write("Training set : {}".format(X_train.shape))
st.write("Test set : {}".format(X_test.shape))
st.markdown("### Training A multinomial naive Bayes classifier ")
st.code("""
### Multinomial Naive Bayes Classifier ###
# The alpha parameter was determined by grid search previously
classifier = MultinomialNB(alpha=0.1)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)""", language='python'
)
### Multinomial Naive Bayes Classifier ###
# The alpha parameter was determined by grid search previously
classifier = MultinomialNB(alpha=0.1)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
st.markdown("### Testing the model on the Test Data")
st.code("""
print("Confusion matrix\n")
print(pd.crosstab(pd.Series(y_test, name='Actual'), pd.Series(y_pred, name='Predicted')))
accuracy, precision, recall, f1 = get_metrics(y_test, y_pred)
print("accuracy = {:.3f}\n precision = {:.3f} \n recall = {:.3f} \n f1 = {:.3f}".format(accuracy, precision, recall, f1))""", language='python'
)
st.write("Confusion matrix\n")
st.write(pd.crosstab(pd.Series(y_test, name='Actual'), pd.Series(y_pred, name='Predicted')))
accuracy, precision, recall, f1 = get_metrics(y_test, y_pred)
st.write("accuracy = {:.3f}".format(accuracy))
st.write("precision = {:.3f}".format(precision))
st.write("recall = {:.3f}".format(recall))
st.write("f1 = {:.3f}".format(f1))
st.markdown("""
### Hyperparameter Tunning and Cross Validation
Cross validation employs that the model is trained on all the data and tested on all the data. First, the dataset is split into k equal groups. The first k-1 groups are used for the training while the k-th group is used for testing. This is reapeated over all grups such that each group becomes the test set and the average test accuracies over all groups is taken. This approach of validation provides a more convenient accuracy matrix than just randomly secting training and test set.
Sklearn employs gridsearch to tune hyper parameters and return the various n-fold (n can be changed) cross validation accuracy for each parameter combination. Our parameter of interest is alpha."""
)
st.code("""
clf = GridSearchCV(MultinomialNB(), {'alpha': [1,0.1,0.01,0.001,0.0001,0.00001]}, cv=10, return_train_score=True)
clf.fit(X, y_data)
df = pd.DataFrame(clf.cv_results_)
df[['param_alpha','mean_test_score', 'mean_train_score']]
print(("The best alpha value is {} ".format(clf.best_params_["alpha"]))""", language='python'
)
clf = GridSearchCV(MultinomialNB(), {
'alpha': [1,0.1,0.01,0.001,0.0001,0.00001]
}, cv=10, return_train_score=True)
clf.fit(X, y_data)
df = pd.DataFrame(clf.cv_results_)
st.dataframe(df[['param_alpha','mean_test_score', 'mean_train_score']])
st.write("The best alpha value is {} ".format(clf.best_params_["alpha"]))
st.markdown("### Train Model on Optimal Parameter")
st.code("""
a = clf.best_params_["alpha"]
classifier = MultinomialNB(alpha=a)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
print("Confusion matrix\n")
print(pd.crosstab(pd.Series(y_test, name='Actual'), pd.Series(y_pred, name='Predicted')))
accuracy, precision, recall, f1 = get_metrics(y_test, y_pred)
print("accuracy = {:.3f}\n precision = {:.3f} \n recall = {:.3f} \n f1 = {:.3f}".format(accuracy, precision, recall, f1))""", language='python'
)
a = clf.best_params_["alpha"]
classifier = MultinomialNB(alpha=a)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
st.write("Confusion matrix\n")
st.write(pd.crosstab(pd.Series(y_test, name='Actual'), pd.Series(y_pred, name='Predicted')))
accuracy, precision, recall, f1 = get_metrics(y_test, y_pred)
st.write("accuracy = {:.3f}".format(accuracy))
st.write("precision = {:.3f}".format(precision))
st.write("recall = {:.3f}".format(recall))
st.write("f1 = {:.3f}".format(f1))
st.markdown("### Stratified k-fold Cross Validation")
st.code("""
# 10-fold cross validation Can play with n_splits parameter
kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=43)
# Print output
scores = cross_val_score(MultinomialNB(alpha=a), X, y_data, cv=kf, n_jobs=None, scoring='f1_micro')
print(f'K-Fold test: {scores}')
print(f'Mean: {scores.mean().round(3)}')
print(f'Std: {scores.std().round(3)}')""", language='python'
)
# 10-fold cross validation Can play with n_splits parameter
kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=43)
# Print output
scores = cross_val_score(MultinomialNB(alpha=a), X, y_data, cv=kf, n_jobs=None, scoring='f1_micro')
st.write(f'K-Fold test: {scores}')
st.write(f'Mean: {scores.mean().round(3)}')
st.write(f'Std: {scores.std().round(3)}')
st.markdown("### Save Model")
st.markdown("""```
# file name
filename = 'tmp/dna_classifier.sav'
pickle.dump(classifier, open(filename, 'wb'))"""
)
# file name
filename = 'tmp/dna_classifier.sav'
pickle.dump(classifier, open(filename, 'wb'))
st.markdown("### Load and Test Model")
st.code("""
# load the model from disk
dna_classifier = 'tmp/dna_classifier.sav'
loaded_model = pickle.load(open(dna_classifier, 'rb'))
dna_vectorizer = 'tmp/dna_vectorizer.sav'
loaded_vectorizer = pickle.load(open(dna_vectorizer, 'rb'))
result = loaded_model.score(X_test, y_test)
print(result)
loaded_model.predict(X_test[24])
#### This is the Label Map
class_labels = {"0" : "G protein coupled receptors", "1" : "Tyrosine kinase", "2" :"Tyrosine phosphatase", "3" : "Synthetase", "4" : "Synthase", "5" : "Ion channel", "6" : "Transcription factor"}
sequence = uploaded_data.iloc[70].sequence
print(sequence)
# Predict class
predict_sequence_class(sequence, cv, loaded_model, class_labels)
""", language='python'
)
# load the model from disk
dna_classifier = 'tmp/dna_classifier.sav'
loaded_model = pickle.load(open(dna_classifier, 'rb'))
dna_vectorizer = 'tmp/dna_vectorizer.sav'
loaded_vectorizer = pickle.load(open(dna_vectorizer, 'rb'))
result = loaded_model.score(X_test, y_test)
st.write(result)
loaded_model.predict(X_test[24])
#### This is the Label Map
class_labels = {"0" : "G protein coupled receptors", "1" : "Tyrosine kinase", "2" :"Tyrosine phosphatase", "3" : "Synthetase", "4" : "Synthase", "5" : "Ion channel", "6" : "Transcription factor"}
sequence = uploaded_data.iloc[70].sequence
#sequence
# Predict class
st.write(predict_sequence_class(sequence, cv, loaded_model, class_labels))
st.markdown("### Performing Hyper parameter Tunning for both CountVectorizer and Multinomial Naive Bayes Classifier")
st.code("""
# ngram_range values to test
ngram_range_params = [(1,1), (1,2), (1,3), (1,4), (2,2),(2,3),(2,4),(3,3),(3,4),(4,4)]
# alpa values to test
alpha_params = [1,0.1,0.01,0.001,0.0001,0.00001]
# initialize dictionary of best parameters
best_parameters = {"best_alpha" : None, "best_ngram_range" : None}
best_score = 0
for i in range(len(ngram_range_params)):
ngram_range = ngram_range_params[i]
cv = CountVectorizer(ngram_range=ngram_range)
x = cv.fit_transform(texts)
clf = GridSearchCV(MultinomialNB(), {'alpha': alpha_params}, cv = 5, return_train_score=False)
clf.fit(x, y_data)
if clf.best_score_ > best_score:
best_score = clf.best_score_
best_parameters["best_alpha"] = clf.best_params_["alpha"]
best_parameters["best_ngram_range"] = ngram_range_params[i]
pd.DataFrame(best_parameters)""", language='python'
)
# ngram_range values to test
ngram_range_params = [(1,1), (1,2), (1,3), (1,4), (2,2),(2,3),(2,4),(3,3),(3,4),(4,4)]
# alpa values to test
alpha_params = [1,0.1,0.01,0.001,0.0001,0.00001]
# initialize dictionary of best parameters
best_parameters = {"best_alpha" : None, "best_ngram_range" : None}
best_score = 0
for i in range(len(ngram_range_params)):
ngram_range = ngram_range_params[i]
cv = CountVectorizer(ngram_range=ngram_range)
x = cv.fit_transform(texts)
clf = GridSearchCV(MultinomialNB(), {'alpha': alpha_params}, cv = 5, return_train_score=False)
clf.fit(x, y_data)
if clf.best_score_ > best_score:
best_score = clf.best_score_
best_parameters["best_alpha"] = clf.best_params_["alpha"]
best_parameters["best_ngram_range"] = ngram_range_params[i]
st.dataframe(pd.DataFrame(best_parameters))
st.markdown("***GET JUPYTER NOTEBOOK HERE FROM GITHUB*** ")
st.markdown("[jupyter notebook](https://raw.githubusercontent.com/CyrilleMesue/dna-seq-classifier/main/DNA%20Sequencing%20and%20applying%20Classifier.ipynb)")
st.markdown("***DONE!!!***")
# st.markdown("### Save Models")
# vec name
# v_filepath = 'tmp/dna_vectorizer.sav'
# pickle.dump(cv, open(v_filepath, 'wb'))
# st.download_button("Download Vectorizer Model", open(v_filepath, 'rb'), file_name="dna_vectorizer.sav")
|
import os
from multiprocessing import Pool
from torch.cuda import is_available
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import logging
import anomalytransfer as at
from glob import glob
from utils import run_time
from typing import Sequence, Tuple, Dict, Optional
import pandas as pd
import numpy as np
import torch
from sklearn.metrics import precision_recall_curve
from tqdm import tqdm
logging.basicConfig(level=logging.INFO, format='[%(asctime)s [%(levelname)s]] %(message)s')
config = at.utils.config()
CLUSTER_OUTPUT = config.get("CLUSTERING", "output")
EPOCHS = config.getint("CLUSTERING", "epochs")
BASE_EPOCHS = config.getint('TRANSFER_LEARNING', 'base_epochs')
DATA_EPOCHS = config.getint('TRANSFER_LEARNING', 'data_epochs')
INPUT = config.get('TRANSFER_LEARNING', 'input')
OUTPUT = config.get('TRANSFER_LEARNING', 'output')
MODEL_PATH = config.get('TRANSFER_LEARNING', 'model_path')
RATIO = config.getfloat('TRANSFER_LEARNING', 'ratio')
RAW_INPUT = config.get("CLUSTERING_PREPROCESSING", "input")
def _get_latent_vectors(x: np.ndarray) -> np.ndarray:
x = torch.as_tensor(x)
seq_length = x.shape[1]
input_dim = x.shape[2]
model = at.clustering.LatentTransformer(
seq_length=seq_length, input_dim=input_dim)
model.fit(x, epochs=EPOCHS, verbose=0)
model.save(os.path.join(OUTPUT, 'model.pt'))
return model.transform(x)
def cluster_data(path: str) -> Tuple[str, str]:
base = None
data = None
for item in os.listdir(path):
item_path = os.path.join(path, item)
if os.path.isdir(item_path):
data = item_path
else:
base = item_path
if base is None or data is None:
raise ValueError('Base path or data path not found')
return base, data
def make_base_model(kpi: at.transfer.data.KPI, epochs: int):
kpi.complete_timestamp()
kpi, _, _ = kpi.standardize()
model = at.transfer.models.AnomalyDetector()
model.fit(kpi=kpi.no_labels(), epochs=epochs, verbose=0)
return model
def train_test(train_kpi: at.transfer.data.KPI,
epochs: int,
test_kpi: at.transfer.data.KPI = None,
mask: Optional[Sequence] = None,
**kwargs) -> float:
model = at.transfer.models.AnomalyDetector()
if mask is not None:
model.load_partial(path=kwargs.get('model_path'), name=kwargs.get('base_kpi').name, mask=mask)
model.freeze(mask)
model.fit(kpi=train_kpi.no_labels(), epochs=epochs, verbose=0)
model.unfreeze(mask)
model.fit(kpi=train_kpi.no_labels(), epochs=epochs, verbose=0)
if test_kpi is not None and test_kpi.labels is not None:
anomaly_scores = model.predict(test_kpi, verbose=0)
results = at.utils.get_test_results(labels=test_kpi.labels,
scores=anomaly_scores,
missing=test_kpi.missing,
use_spot=False)
at.utils.log_test_results(name=test_kpi.name, results=results)
return results['f1score']
else:
return None
def _ignore_missing(series_list: Sequence, missing: np.ndarray) -> Tuple[np.ndarray, ...]:
ret = []
for series in series_list:
series = np.copy(series)
ret.append(series[missing != 1])
return tuple(ret)
def get_test_results(
timestamps: np.ndarray,
labels: np.ndarray,
scores: np.ndarray,
missing: np.ndarray,
values: np.ndarray,
window_size: int = 120,
**kwargs) -> Dict:
timestamps = timestamps[window_size - 1:]
labels = labels[window_size - 1:]
scores = scores[window_size - 1:]
missing = missing[window_size - 1:]
values = values[window_size - 1:]
adjusted_timestamps, adjusted_labels, adjusted_scores, adjusted_values = _ignore_missing(
[timestamps, labels, scores, values], missing=missing
)
adjusted_scores = at.utils.adjust_scores(
labels=adjusted_labels, scores=adjusted_scores)
precision, recall, th = precision_recall_curve(adjusted_labels, adjusted_scores, pos_label=1)
f1_score = 2 * precision * recall / (precision + recall + 1e-6)
arg_max = np.argmax(f1_score)
best_precision, best_recall, best_f1_score = precision[arg_max], recall[arg_max], f1_score[arg_max]
threshold = th[arg_max]
return best_f1_score
def main(finetune_num=200):
print(finetune_num)
# with torch.cuda.device(torch.device(f"cuda:{finetune_num//200%2}")):
clusters = os.listdir(INPUT)
base_values = []
base_models = []
for cluster in tqdm(clusters, total=len(clusters)):
base, data = cluster_data(os.path.join(INPUT, cluster))
base_kpi = at.utils.load_kpi(base)
base_kpi.complete_timestamp()
base_kpi, _, _ = base_kpi.standardize()
base_model = make_base_model(base_kpi, BASE_EPOCHS)
base_models.append(base_model)
dt = pd.read_csv(base)
base_values.append(dt["value"])
file_list = at.utils.file_list(RAW_INPUT)
cluster_values = []
finetune_values = []
test_kpis = []
names = []
for file in file_list:
data_kpi = at.utils.load_kpi(file)
data_kpi.complete_timestamp()
data_kpi, _, _ = data_kpi.standardize()
filename = at.utils.filename(file)
names.append(filename)
# split idx
ts = data_kpi.timestamps
ts = ts % (60 * 60 * 24)
split_idx = np.where(ts <= 60)[0]
_, data_kpi = data_kpi.split_by_idx(split_idx[0], window_size=1)
# split to [for cluster] and [for finetune]
ts = data_kpi.timestamps
ts = ts % (60 * 60 * 24)
split_idx = np.where(ts <= 60)[0]
cluster_value, finetune_value = data_kpi.split_by_idx(split_idx[1], window_size=1)
finetune_value, test_value = finetune_value.split_by_idx(finetune_num, window_size=1)
cluster_values.append(cluster_value.values)
finetune_values.append(finetune_value)
test_kpis.append(test_value)
# get latent var
base_values = np.asarray(base_values, dtype=np.float32)[..., None]
base_feature = _get_latent_vectors(base_values)
cluster_values = np.asarray(cluster_values, dtype=np.float32)[..., None]
cluster_feature = _get_latent_vectors(cluster_values)
tmp_result = {name: 0 for name in names}
tmp_result["num_of_points"] = finetune_num
for i, (ft, finetune, test_kpi, name) in enumerate(zip(cluster_feature, finetune_values, test_kpis, names)):
cluster_idx = np.argmin(np.sum((ft - base_feature)**2, axis=1))
base_model = base_models[cluster_idx]
# base_model.fit(kpi=finetune.no_labels(), epochs=DATA_EPOCHS, verbose=0)
anomaly_scores = base_model.predict(test_kpi, verbose=1)
f1_score = get_test_results(
timestamps=test_kpi.timestamps,
labels=test_kpi.labels,
scores=anomaly_scores,
missing=test_kpi.missing,
values=test_kpi.values
)
tmp_result[name] = f1_score
print(f"{i} - {name}")
return tmp_result
if __name__ == '__main__':
# for num in range(200, 5000, 200):
# main(num)
with Pool(1) as pool:
results = pool.map(main, range(200, 201, 200))
# results = pool.map(main, range(200, 201, 200))
final_result = pd.DataFrame(columns=list(results[0].keys()))
for res in results:
final_result = final_result.append(res, ignore_index=True)
final_result = final_result.sort_values("num_of_points")
final_result.to_csv("result.csv", index=False)
|
class ConfigException(Exception):
pass
class ConfigLoadException(ConfigException):
pass
class NoConfigOptionError(ConfigException):
pass
|
from flask import Flask
import sys
sys.path.append('../chordpro2html')
from chordpro2html import Parser
from songs import TWINKLE, LEAVES, TENSHI
p = Parser()
song = TWINKLE
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello():
return p.to_html(LEAVES) + p.to_html(TWINKLE) + p.to_html(TENSHI)
if __name__ == '__main__':
app.run(debug=True) |
"""
A set of classes that let you create interactive menus in the console
"""
from dataclasses import dataclass, field
from console.inputs import SelectionInput, SelectionInputOptions, InputResult
@dataclass
class MenuOptions(SelectionInputOptions):
recurring: bool = True
cancel_codes: tuple = field(default_factory=tuple)
class Menu(SelectionInput):
options_class = MenuOptions
options: MenuOptions = None
def __call__(self, prompt, commands, context=None, *args, **kwargs) -> InputResult:
return self._invoke(prompt, commands, context=context, *args, **kwargs)
def _invoke(self, prompt, commands, context=None, *args, **kwargs) -> InputResult:
if context is None:
context = {}
names = list(commands.keys())
result, selected_command_index = super()._invoke(prompt, names, *args, **kwargs)
if result == InputResult.SUCCESS:
commands.get(names[selected_command_index])(context)
return result
|
from enum import Enum
from typing import Optional
from semantic_version import SimpleSpec, Version
class ContractFeature(Enum):
SERVICES = "services"
MAX_TOKEN_NETWORKS = "max_token_networks"
INITIAL_SERVICE_DEPOSIT = "initial_service_deposit"
MS_NEEDS_TOKENNETWORK_REGISTRY = "ms_needs_tokennetwork_registry"
CONTRACT_FEATURE_VERSIONS = {
ContractFeature.SERVICES: SimpleSpec(">=0.8.0"),
ContractFeature.MAX_TOKEN_NETWORKS: SimpleSpec(">=0.9.0"),
ContractFeature.INITIAL_SERVICE_DEPOSIT: SimpleSpec(">=0.18.0"),
ContractFeature.MS_NEEDS_TOKENNETWORK_REGISTRY: SimpleSpec(">=0.22.0"),
}
def _matches_feature(feature: ContractFeature, version: Optional[str]) -> bool:
"""Returns a bool indicating whether the passed version matches the minimum required
version for the given feature."""
if version is None:
# contracts_version == None means the stock version in development.
return True
return CONTRACT_FEATURE_VERSIONS[feature].match(Version(version))
def contracts_version_with_max_token_networks(version: Optional[str]) -> bool:
return _matches_feature(ContractFeature.MAX_TOKEN_NETWORKS, version)
def contracts_version_provides_services(version: Optional[str]) -> bool:
return _matches_feature(ContractFeature.SERVICES, version)
def contracts_version_has_initial_service_deposit(version: Optional[str]) -> bool:
return _matches_feature(ContractFeature.INITIAL_SERVICE_DEPOSIT, version)
def contracts_version_monitoring_service_takes_token_network_registry(
version: Optional[str],
) -> bool:
return _matches_feature(ContractFeature.MS_NEEDS_TOKENNETWORK_REGISTRY, version)
|
import io
import re
import sys
from numba import njit
_INPUT_ = """\
hellospaceRhellospace
"""
#sys.stdin = io.StringIO(_INPUT_)
@njit
def solve(S):
T = ""
for i in range(len(S)):
s = S[i]
if s == 'R':
T = T[::-1]
else:
if len(T) > 0 and T[-1] == s:
T = T[:-1]
else:
T += s
return T
S = input()
print(solve(S))
|
from .._pixsfm._residuals import * # noqa F403
|
import pandas as pd
def field_variety(data):
"""
Computes the number of different fields that each paper is about by taking the number of fields in 'fields_of_study'
Input:
- df['fields_of_study']: dataframe (dataset); 'fields_of_study' column [pandas dataframe]
Output:
- Field variety: vector of field_variety for each paper of the given dataset [pandas series]
with field_variety [int]
"""
# Calculates how many fields a paper has
Field_variety = pd.Series([len(i) for i in data['fields_of_study']])
return Field_variety
|
import gzip
import io
import lz4.frame
import struct
import sys
from .event import Event
import proio.proto as proto
from .writer import magic_bytes
class Reader(object):
"""
Reader for proio files
This class can be used with the `with` statement, and it also may be used
as an iterator that sequentially iterates all events. A filename may be
omitted in favor of specifying `fileobj`.
:param string filename: name of input file to read
:param fileobj: file object to read from
:example:
.. code-block:: python
with proio.Reader('input.proio') as reader:
for event in reader:
...
"""
def __init__(self, filename = None, fileobj = None):
if filename is None:
if fileobj is not None:
self._stream_reader = fileobj
else:
self._stream_reader = io.BytesIO(b'')
else:
self._stream_reader = open(filename, 'rb')
self._close_file = True
self._bucket_reader = io.BytesIO(b'')
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def __iter__(self):
return self
def __next__(self):
"""
:return: the next event
:rtype: Event
"""
event = self._read_from_bucket(True)
if event is None:
raise StopIteration
return event
if sys.version_info[0] == 2:
def next(self):
return self.__next__()
def close(self):
"""
closes the underlying input file object.
"""
try:
if self._close_file:
self._stream_reader.close()
except:
pass
def next_header(self):
"""
returns the next event header. This is useful for scanning the
stream/file.
:return: the next event header
:rtype: Event
"""
self._read_bucket(sys.maxsize)
return self._bucket_header
def skip(self, n_events):
"""
skips the next `n_events` events.
:param int n_events: number of events to skip
:return: number of events skipped
:rtype: int
"""
try:
bucket_evts_left = self._bucket_header.nEvents - self._bucket_evts_read
except AttributeError:
bucket_evts_left = 0
n_skipped = 0
if n_events > bucket_evts_left:
n_skipped += bucket_evts_left
while True:
n = self._read_bucket(n_events - n_skipped)
if n == 0:
break
n_skipped += n
while n_skipped < n_events:
if self._read_from_bucket(False) == True:
n_skipped += 1
else:
break
return n_skipped
def seek_to_start(self):
"""
seeks, if possible, to the start of the input file object. This can be
used along with :func:`skip` to directly access events.
"""
if self._stream_reader.seekable():
self._stream_reader.seek(0, 0)
self._bucket_reader = io.BytesIO(b'')
self._bucket_header = None
self._bucket_evts_read = 0
def _read_from_bucket(self, do_unmarshal = True):
proto_size_buf = self._bucket_reader.read(4)
if len(proto_size_buf) != 4:
self._read_bucket()
proto_size_buf = self._bucket_reader.read(4)
if len(proto_size_buf) != 4:
return
proto_size = struct.unpack("I", proto_size_buf)[0]
proto_buf = self._bucket_reader.read(proto_size)
if len(proto_buf) != proto_size:
return
self._bucket_evts_read += 1
if do_unmarshal:
event_proto = proto.Event.FromString(proto_buf)
return Event(proto_obj = event_proto)
return True
def _read_bucket(self, max_skip_events = 0):
self._bucket_evts_read = 0
events_skipped = 0
self._bucket_header = None
n = self._sync_to_magic()
if n < len(magic_bytes):
return events_skipped
header_size = struct.unpack("I", self._stream_reader.read(4))[0]
header_string = self._stream_reader.read(header_size)
if len(header_string) != header_size:
return events_skipped
self._bucket_header = proto.BucketHeader.FromString(header_string)
if self._bucket_header.nEvents > max_skip_events:
bucket = self._stream_reader.read(self._bucket_header.bucketSize)
else:
self._bucket_reader = io.BytesIO(b'')
events_skipped = self._bucket_header.nEvents
try:
self._stream_reader.seek(self._bucket_header.bucketSize, 1)
except OSError:
self._stream_reader.read(self._bucket_header.bucketSize)
return events_skipped
if len(bucket) != self._bucket_header.bucketSize:
return events_skipped
if self._bucket_header.compression == proto.BucketHeader.GZIP:
self._bucket_reader = gzip.GzipFile(fileobj = io.BytesIO(bucket), mode = 'rb')
elif self._bucket_header.compression == proto.BucketHeader.LZ4:
try:
uncomp_bytes, _ = lz4.frame.decompress(bucket)
except ValueError:
uncomp_bytes = lz4.frame.decompress(bucket)
self._bucket_reader = io.BytesIO(uncomp_bytes)
else:
self._bucket_reader = io.BytesIO(bucket)
return events_skipped
def _sync_to_magic(self):
n_read = 0
while True:
magic_byte = self._stream_reader.read(1)
if len(magic_byte) != 1:
return -1
n_read += 1
if magic_byte == magic_bytes[0]:
goodSeq = True
for i in range(1, len(magic_bytes)):
magic_byte = self._stream_reader.read(1)
if len(magic_byte) != 1:
return -1
n_read += 1
if magic_byte != magic_bytes[i]:
goodSeq = False
break
if goodSeq:
break
return n_read
|
version = "3.38"
|
from fairseq.models.transformer_lm import TransformerLanguageModel
from fairseq.models.transformer_autoencoders import TransformerAutoencoders
import torch
import numpy as np
import torch.nn.functional as F
from random import randint
from midi_preprocess import encode_midi, decode_midi
import utils
def temperature_sampling(x, temperature, topk):
logits = x.cpu().detach().numpy()[0]
probs = np.exp(logits / temperature) / np.sum(np.exp(logits / temperature))
if topk == 1:
prediction = np.argmax(probs)
else:
sorted_index = np.argsort(probs)[::-1]
candi_index = sorted_index[:topk]
candi_probs = [probs[i] for i in candi_index]
# normalize probs
candi_probs /= sum(candi_probs)
# choose by predicted probs
prediction = np.random.choice(candi_index, size=1, p=candi_probs)[0]
return x.new([prediction]).int()[None]
def topk_sampling(x, topk):
logits = x.cpu().detach().numpy()[0]
probs = logits
if topk == 1:
prediction = np.argmax(probs)
else:
sorted_index = np.argsort(probs)[::-1]
candi_index = sorted_index[:topk]
candi_probs = [probs[i] for i in candi_index]
# normalize probs
candi_probs /= sum(candi_probs)
# choose by predicted probs
prediction = np.random.choice(candi_index, size=1, p=candi_probs)[0]
return x.new([prediction]).int()[None]
custom_lm = (
TransformerLanguageModel.from_pretrained(
"/mnt/zhangyi/checkpoints/transformer_music_lm_remi_cov", "checkpoint_best.pt",
)
.cuda()
.half()
.eval()
)
model = custom_lm.models[0]
for i in range(0, 100):
l = 2048
a = []
s = 1
ss = "<time_shift,0>"
if len(ss) == 0:
input_sequence = custom_lm.encode(" ".join(encode_midi("primer.mid")))[:-1]
with open("data/mae.test.tokens", "r") as fl:
ss = fl.readline().strip().split()[3 : 3 + 2048]
prev_input_seq = custom_lm.encode(" ".join(ss))[:-1]
else:
input_sequence = custom_lm.encode(ss)[:-1]
print(len(input_sequence))
input_tensor = torch.LongTensor(input_sequence).cuda().unsqueeze(0)
# prev_input_tensor = torch.LongTensor(prev_input_seq).cuda().unsqueeze(0)
print("ok")
a = custom_lm.decode(torch.LongTensor(input_sequence).cuda()).split()
print(input_sequence.shape)
try:
flg = 0
for ind in range(len(input_sequence), l):
x = model(input_tensor[-2000:, :])[0]
y = x.clone().detach()
x = F.softmax(x, dim=2)[:, -1, :]
if flg:
xx = F.softmax(y, dim=2)
xx = xx.topk(1, dim=2)[1]
flg -= 1
decode_midi(
custom_lm.decode(xx[0, :, 0]).split(), file_path="final2.mid"
)
if False:
distrib = torch.distributions.categorical.Categorical(probs=x[None])
next_token = distrib.sample()
elif False:
next_token = x.topk(1)[1]
elif False:
next_token = temperature_sampling(x, 1.2, 5)
else:
next_token = topk_sampling(x, 5)
input_tensor = torch.cat([input_tensor[:, :], next_token], dim=1)
a.append(custom_lm.decode(next_token))
if ind % 100 == 0:
print("saving {}".format(ind))
with open("fl.txt", "w") as fl:
print(" ".join(a), file=fl)
except Exception as e:
print(e)
print("Abort lenght {}".format(len(a)))
try:
decode_midi(a, file_path="remi_midi/{}.mid".format(i))
except:
utils.write_midi(a, None, "remi_cov_top1/{}.mid".format(i), None)
|
import sys
from xhtml2pdf import pisa
def convert_html_to_pdf(source_html, output_filename):
with open(output_filename, "w+b") as result_file:
pisa_status = pisa.CreatePDF(source_html, dest=result_file)
return pisa_status.err
if __name__ == "__main__":
if len(sys.argv) != 3:
print("re-render.py <html> <pdf>")
sys.exit(1)
pisa.showLogging()
with open(sys.argv[1]) as source_html:
convert_html_to_pdf(source_html.read(), sys.argv[2])
|
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class Vidchecker8(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'audio_tests': 'AudioConfigs',
'file_tests': 'FileConfig',
'mxf_test': 'MxfTest',
'video_tests': 'VideoConfigs'
}
attribute_map = {
'audio_tests': 'audio_tests',
'file_tests': 'file_tests',
'mxf_test': 'mxf_test',
'video_tests': 'video_tests'
}
def __init__(self, audio_tests=None, file_tests=None, mxf_test=None, video_tests=None, local_vars_configuration=None): # noqa: E501
"""Vidchecker8 - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._audio_tests = None
self._file_tests = None
self._mxf_test = None
self._video_tests = None
self.discriminator = None
if audio_tests is not None:
self.audio_tests = audio_tests
if file_tests is not None:
self.file_tests = file_tests
if mxf_test is not None:
self.mxf_test = mxf_test
if video_tests is not None:
self.video_tests = video_tests
@property
def audio_tests(self):
"""Gets the audio_tests of this Vidchecker8. # noqa: E501
:return: The audio_tests of this Vidchecker8. # noqa: E501
:rtype: AudioConfigs
"""
return self._audio_tests
@audio_tests.setter
def audio_tests(self, audio_tests):
"""Sets the audio_tests of this Vidchecker8.
:param audio_tests: The audio_tests of this Vidchecker8. # noqa: E501
:type: AudioConfigs
"""
self._audio_tests = audio_tests
@property
def file_tests(self):
"""Gets the file_tests of this Vidchecker8. # noqa: E501
:return: The file_tests of this Vidchecker8. # noqa: E501
:rtype: FileConfig
"""
return self._file_tests
@file_tests.setter
def file_tests(self, file_tests):
"""Sets the file_tests of this Vidchecker8.
:param file_tests: The file_tests of this Vidchecker8. # noqa: E501
:type: FileConfig
"""
self._file_tests = file_tests
@property
def mxf_test(self):
"""Gets the mxf_test of this Vidchecker8. # noqa: E501
:return: The mxf_test of this Vidchecker8. # noqa: E501
:rtype: MxfTest
"""
return self._mxf_test
@mxf_test.setter
def mxf_test(self, mxf_test):
"""Sets the mxf_test of this Vidchecker8.
:param mxf_test: The mxf_test of this Vidchecker8. # noqa: E501
:type: MxfTest
"""
self._mxf_test = mxf_test
@property
def video_tests(self):
"""Gets the video_tests of this Vidchecker8. # noqa: E501
:return: The video_tests of this Vidchecker8. # noqa: E501
:rtype: VideoConfigs
"""
return self._video_tests
@video_tests.setter
def video_tests(self, video_tests):
"""Sets the video_tests of this Vidchecker8.
:param video_tests: The video_tests of this Vidchecker8. # noqa: E501
:type: VideoConfigs
"""
self._video_tests = video_tests
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Vidchecker8):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Vidchecker8):
return True
return self.to_dict() != other.to_dict()
|
import logging
import time
from collections import deque
from itertools import count
import numpy as np
from qulab import BaseDriver
from .AlazarTechWrapper import (AlazarTechDigitizer, AutoDMA, DMABufferArray,
configure, initialize)
from .exception import AlazarTechError
log = logging.getLogger(__name__)
def getSamplesPerRecode(numOfPoints):
samplesPerRecord = (numOfPoints // 64) * 64
if samplesPerRecord < numOfPoints:
samplesPerRecord += 64
return samplesPerRecord
def getExpArray(f_list, numOfPoints, weight=None, sampleRate=1e9):
e = []
t = np.arange(0, numOfPoints, 1) / sampleRate
if weight is None:
weight = np.ones(numOfPoints)
for f in f_list:
e.append(weight * np.exp(-1j * 2 * np.pi * f * t))
return np.asarray(e).T / numOfPoints
class Driver(BaseDriver):
def __init__(self, systemID=1, boardID=1, config=None, **kw):
super().__init__(**kw)
self.dig = AlazarTechDigitizer(systemID, boardID)
self.config = dict(n=1024,
sampleRate=1e9,
f_list=[50e6],
weight=None,
repeats=512,
maxlen=512,
ARange=1.0,
BRange=1.0,
trigLevel=0.0,
triggerDelay=0,
triggerTimeout=0,
recordsPerBuffer=64,
bufferCount=512)
self.config['e'] = getExpArray(self.config['f_list'], self.config['n'],
self.config['weight'],
self.config['sampleRate'])
self.config['samplesPerRecord'] = getSamplesPerRecode(self.config['n'])
if config is not None:
self.set(**config)
initialize(self.dig)
configure(self.dig, **self.config)
def set(self, **cmd):
if 'n' in cmd:
cmd['samplesPerRecord'] = getSamplesPerRecode(cmd['n'])
self.config.update(cmd)
if self.config['repeats'] % self.config['recordsPerBuffer'] != 0:
self.config['repeats'] = (
self.config['repeats'] // self.config['recordsPerBuffer'] +
1) * self.config['recordsPerBuffer']
if any(key in ['f_list', 'n', 'weight', 'sampleRate'] for key in cmd):
self.config['e'] = getExpArray(self.config['f_list'],
self.config['n'],
self.config['weight'],
self.config['sampleRate'])
if any(key in [
'ARange', 'BRange', 'trigLevel', 'triggerDelay',
'triggerTimeout'
] for key in cmd):
configure(self.dig, **self.config)
def setValue(self, name, value):
self.set(**{name: value})
def getValue(self, name):
return self.config.get(name, None)
def _aquireData(self, samplesPerRecord, repeats, buffers, recordsPerBuffer,
timeout):
with AutoDMA(self.dig,
samplesPerRecord,
repeats=repeats,
buffers=buffers,
recordsPerBuffer=recordsPerBuffer,
timeout=timeout) as h:
yield from h.read()
def getData(self, fft=False, avg=False, timeout=None):
samplesPerRecord = self.config['samplesPerRecord']
recordsPerBuffer = self.config['recordsPerBuffer']
repeats = self.config['repeats']
e = self.config['e']
n = e.shape[0]
maxlen = self.config['maxlen']
if timeout is None:
timeout = self.timeout
A, B = [], []
retry = 0
while retry < 2:
try:
for index , (chA, chB) in zip(count(), self._aquireData(
samplesPerRecord,
repeats=repeats,
buffers=None,
recordsPerBuffer=recordsPerBuffer,
timeout=timeout)):
A_lst = chA.reshape((recordsPerBuffer, samplesPerRecord))
B_lst = chB.reshape((recordsPerBuffer, samplesPerRecord))
if fft:
A_lst = (A_lst[:, :n]).dot(e)
B_lst = (B_lst[:, :n]).dot(e)
A.append(A_lst)
B.append(B_lst)
if repeats == 0 and index*recordsPerBuffer >= maxlen:
break
A = np.asarray(A)
B = np.asarray(B)
A = A.flatten().reshape(A.shape[0]*A.shape[1], A.shape[2])
B = B.flatten().reshape(B.shape[0]*B.shape[1], B.shape[2])
if avg:
return A.mean(axis=0), B.mean(axis=0)
else:
return A, B
except AlazarTechError as err:
log.exception(err.msg)
if err.code == 518:
raise SystemExit(2)
else:
pass
time.sleep(0.1)
retry += 1
else:
raise SystemExit(1)
def getIQ(self, avg=False, timeout=None):
return self.getData(True, avg, timeout=timeout)
def getTraces(self, avg=True, timeout=None):
return self.getData(False, avg, timeout=timeout)
|
#Faça um programa que leia o sexo de uma pessoas, mas só aceite valores 'M' e 'F'.
#Caso esteja errado, peça a digitação novamente até terum valor correto.
sexo = str(input('Informe o sexo: [M/F]')).strip().upper()[0]
while sexo not in 'FfMm':
sexo = str(input('Dados invalidos, Por favor infome seu sexo: ')).strip().upper()[0]
if sexo == 'M':
print('o sexo é Masculino')
else:
print('O sexo é Feminino') |
"""satellite-3d setup."""
from setuptools import setup, find_packages
with open("satellite_3d/__init__.py") as f:
for line in f:
if line.find("__version__") >= 0:
version = line.split("=")[1].strip()
version = version.strip('"')
version = version.strip("'")
break
# Runtime requirements.
inst_reqs = [
"rasterio[s3]",
"rio-tiler",
"rio_tiler_mvt",
"lambda_proxy"
]
extra_reqs = {}
setup(
name="satellite-3d",
version=version,
description=u"Skeleton of a python AWS Lambda function",
python_requires=">=3",
classifiers=[
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="AWS-Lambda Python",
author=u"",
author_email="",
url="",
license="BSD",
packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=inst_reqs,
extras_require=extra_reqs,
)
|
from setuptools import setup, find_packages
setup(
name='PYLin',
version='0.1',
packages=find_packages(exclude=['tests*']),
license='MIT',
description='YLin-Python-Package,
long_description=open('README.txt').read(),
install_requires=['numpy'],
url='https://github.com/BillMills/python-package-example',
author='Yao Lin',
author_email='[email protected]'
)
|
def Boxing(box_number, box_size, object_volume_array, base_index):
a_index = base_index
object_counter = 0
for b_n in range(box_number): # filling each box
total_objects_volume = 0
while total_objects_volume <= box_size and a_index < len(object_volume_array):
# until box is full or no object left in array
total_objects_volume += object_volume_array[a_index]
a_index += 1
object_counter += 1
if total_objects_volume > box_size: # in case counter counts one extra object
a_index -= 1
object_counter -= 1
return a_index, object_counter # returns index of last object in boxes and numbers of objects
if __name__ == '__main__':
while 1: # get and check inputs : they should be positive integers
try:
n, m, k = map(int, input().split())
if n < 0 or m < 0 or k < 0:
print("wrong input! input is not a positive integer! please try again:")
continue
a = input().split()
try:
a = [int(x) for x in a]
negative = 0
for x in a:
if x < 0:
negative += 1
if negative > 0:
print("wrong input! input is not a positive integer! please try again:")
continue
except ValueError:
print("wrong input! input is not a integer! please try again:")
continue
except ValueError:
print("wrong input! input is not a integer! please try again:")
continue
break
if len(a) != n: # in case input array size is wrong
print("wrong input! array size is wrong!")
else:
Base_index = 0
last_index_in_box = 0
object_count = 0
while last_index_in_box < len(a):
# until last object in boxes is the last object in array
last_index_in_box, object_count = Boxing(m, k, a, Base_index)
Base_index += 1
print(object_count)
|
from pathlib import Path
import pathlib
import cv2
import numpy as np
import math
from util import _get_furniture_info
def place_multi_furniture(furniture_obj_dir="./data/basic_furniture/", wall_objs_dir="./data/mid/panel_384478_洋室/", room_scale_factor=1):
"""compute each wall's smoothness"""
if not isinstance(wall_objs_dir, pathlib.PosixPath):
wall_objs_dir = Path(wall_objs_dir)
image_files = list(wall_objs_dir.glob("*.jpg"))
wall2smoothness = {}
for image_file in image_files:
if "wall" in str(image_file):
ima = cv2.imread(str(image_file))
gray_img = cv2.cvtColor(ima, cv2.COLOR_BGR2GRAY)
wall2smoothness[image_file.stem] = cv2.Laplacian(gray_img, cv2.CV_64F).var()
wall2smoothness = sorted(wall2smoothness.items(), key=lambda x: x[1])
walls = []
for wall_name, smoothness in wall2smoothness:
current_wall_obj = wall_objs_dir / (wall_name+".obj")
wall_coords, wall_width, vn, vn_axis, vn_direnction = _cal_wall_width(current_wall_obj, room_scale_factor)
walls.append({"wall_name":wall_name, "smoothness":smoothness, "wall_coords":wall_coords, "wall_width":wall_width, "vn":vn, "vn_axis":vn_axis, "vn_direnction":vn_direnction})
for wall in walls:
print(wall)
if not isinstance(furniture_obj_dir, pathlib.PosixPath):
furniture_obj_dir = Path(furniture_obj_dir)
furniture_objs = list(furniture_obj_dir.glob("*.obj"))
"""sort the furniture objs by its size"""
furniture_obj_volume = [[furniture_obj] + list(_get_furniture_info(furniture_obj)) for furniture_obj in furniture_objs]
furniture_obj_volume.sort(key=lambda x:x[-1], reverse=True)
furniture_obj_file2transform_info = {}
for furniture_obj, furniture_axis2width, volume in furniture_obj_volume:
print()
print(furniture_obj)
print(furniture_axis2width)
if furniture_axis2width["y"] < 0.05:
location_slide = np.zeros(3)
location_slide[0] = -furniture_axis2width["x"]/2
location_slide[1] = -furniture_axis2width["z"]/2
furniture_obj_file2transform_info[furniture_obj] = {"location":location_slide, "rotation":0}
continue
for wall in walls:
# check if the wall is wider than the width of the furniture
if wall["wall_width"] > furniture_axis2width["x"]:
wall_width_margin = wall["wall_width"] - furniture_axis2width["x"]
rotation_angle = np.arctan2(wall["vn"][1], wall["vn"][0]) - np.arctan2(1, 0)
# print((int(vn[0]+math.copysign(0.5,vn[0])), int(vn[1]+math.copysign(0.5,vn[1]))))
wall_vn_rounded_X = int(wall["vn"][0]+math.copysign(0.5,wall["vn"][0])) # round the wall's normal vector along X-axis
wall_vn_rounded_Y = int(wall["vn"][1]+math.copysign(0.5,wall["vn"][1])) # round the wall's normal vector along Y-axis
# corner = nv2corner_location_func[(int(wall["vn"][0]+math.copysign(0.5,wall["vn"][0])), int(wall["vn"][1]+math.copysign(0.5,wall["vn"][1])))](wall["wall_coords"])
corner = nv2corner_location_func[(wall_vn_rounded_X, wall_vn_rounded_Y)](wall["wall_coords"])
location_slide = np.zeros(3)
location_slide[0] = corner[0]
location_slide[1] = corner[1]
print(wall["wall_width"])
wall["wall_width"] -= (furniture_axis2width["x"] + 0.1)
print(wall["wall_coords"])
if wall_vn_rounded_X==0 and wall_vn_rounded_Y==1: wall["wall_coords"][0,0] += (furniture_axis2width["x"] + 0.1)
elif wall_vn_rounded_X==0 and wall_vn_rounded_Y==-1: wall["wall_coords"][0,0] -= (furniture_axis2width["x"] + 0.1)
elif wall_vn_rounded_X==1 and wall_vn_rounded_Y==0: wall["wall_coords"][0,1] -= (furniture_axis2width["x"] + 0.1)
elif wall_vn_rounded_X==-1 and wall_vn_rounded_Y==0: wall["wall_coords"][0,1] += (furniture_axis2width["x"] + 0.1)
print(wall["wall_width"])
print(wall["wall_coords"])
# print(wall_coords)
# print(rotation_angle / 3.14 * 180)
# print(corner)
# print(current_wall_obj)
# return location_slide, rotation_angle
furniture_obj_file2transform_info[furniture_obj] = {"location":location_slide, "rotation":rotation_angle}
break
return furniture_obj_file2transform_info
def _cal_wall_width(obj_filepath, room_scale_factor):
fin = open(str(obj_filepath), "r", encoding="utf-8")
lines = fin.readlines()
coords = np.zeros((2,2))
i = 0
for line in lines:
if len(line.split()) == 0:
continue
if i <= 1 and line.split()[0] == "v" and float(line.split()[3]) == 0: # refer only coordinates on the floor
coords[i,:] = np.array([float(line.split()[1]), float(line.split()[2])])
i += 1
if line.split()[0] == "vn":
vn = np.array([float(vn) for vn in line.split()[1:]])
vn_axis = np.argmin(1.0 - np.abs(vn))
vn_direnction = 1.0 if vn[vn_axis] > 0 else -1.0
wall_width = np.max([np.abs(coords[0,0] - coords[1,0]), np.abs(coords[0,1] - coords[1,1])])
new_coords = np.zeros((2,2))
if vn_axis == 0 and vn_direnction == 1: new_coords[0],new_coords[1] = coords[np.argmax(coords[:,1])], coords[np.argmin(coords[:,1])] # wall facing +x
elif vn_axis == 0 and vn_direnction == -1: new_coords[0],new_coords[1] = coords[np.argmin(coords[:,1])], coords[np.argmax(coords[:,1])] # wall facing -x
elif vn_axis != 0 and vn_direnction == 1: new_coords[0],new_coords[1] = coords[np.argmin(coords[:,0])], coords[np.argmax(coords[:,0])] # wall facing +y
elif vn_axis != 0 and vn_direnction == -1: new_coords[0],new_coords[1] = coords[np.argmax(coords[:,0])], coords[np.argmin(coords[:,0])] # wall facing -y
return new_coords*room_scale_factor, wall_width*room_scale_factor, vn, "xyz"[vn_axis], vn_direnction
# def _get_furniture_info(furniture_obj_filepath):
# """obj file parser
# input: path to a furniture_obj file (furniture size is written before hand during the preprocess)
# output: axis2width: dict ex) {"x": 0.5 , "y": 0.5, "z":0.5}, volume: float
# """
# with open(str(furniture_obj_filepath), "r", encoding="utf-8") as f:
# lines = f.readlines()
# axis2width = {}
# volume = 1
# for line in lines:
# if line.split()[0] == "###":
# axis2width[line.split()[2]] = float(line.split()[3])
# volume *= float(line.split()[3])
# return axis2width, volume
nv2corner_location_func = {
(0,1): lambda wall_coords: [min(wall_coords[:, 0])+0.1, wall_coords[np.argmin(wall_coords[:, 0]), 1]+0.1], # the wall is facing +y direction, return left bottom corner
(0,-1): lambda wall_coords: [max(wall_coords[:, 0])-0.1, wall_coords[np.argmax(wall_coords[:, 0]), 1]-0.1], # the wall is facing -y direction, return right top corner
(1,0): lambda wall_coords: [wall_coords[np.argmax(wall_coords[:, 1]), 0]+0.1, max(wall_coords[:, 1])-0.1], # the wall is facing +x direction, return right top corner
(-1,0): lambda wall_coords: [wall_coords[np.argmin(wall_coords[:, 1]), 0]-0.1, min(wall_coords[:, 1])+0.1], # the wall is facing -x direction, return left bottom corner
}
def place_multi_furniture_scoring(furniture_obj_dir="./data/basic_furniture/", wall_objs_dir="./data/mid/panel_384478_洋室/", room_scale_factor=1):
def place_one_furniture(furniture_obj="./data/Nitori_obj/デスク 6200227_edit.obj", wall_objs_dir="./data/mid/panel_384478_洋室/", room_scale_factor=1.3):
furniture_axis2width, volume = _get_furniture_info(furniture_obj)
if not isinstance(wall_objs_dir, pathlib.PosixPath):
wall_objs_dir = Path(wall_objs_dir)
image_files = list(wall_objs_dir.glob("*.jpg"))
# print(image_files)
wall2smoothness = {}
for image_file in image_files:
if "wall" in str(image_file):
ima = cv2.imread(str(image_file))
gray_img = cv2.cvtColor(ima, cv2.COLOR_BGR2GRAY)
wall2smoothness[image_file.stem] = cv2.Laplacian(gray_img, cv2.CV_64F).var()
wall2smoothness = sorted(wall2smoothness.items(), key=lambda x: x[1])
for wall_name, smoothness in wall2smoothness:
current_wall_obj = wall_objs_dir / (wall_name+".obj")
wall_coords, wall_width, vn, vn_axis, vn_direnction = _cal_wall_width(current_wall_obj, room_scale_factor)
# check if the wall is wider than the width of the furniture
if wall_width > furniture_axis2width["x"]:
wall_width_margin = wall_width - furniture_axis2width["x"]
rotation_angle = np.arctan2(vn[1], vn[0]) - np.arctan2(1, 0)
# print((int(vn[0]+math.copysign(0.5,vn[0])), int(vn[1]+math.copysign(0.5,vn[1]))))
corner = nv2corner_location_func[(int(vn[0]+math.copysign(0.5,vn[0])), int(vn[1]+math.copysign(0.5,vn[1])))](wall_coords)
location_slide = np.zeros(3)
location_slide[0] = corner[0]
location_slide[1] = corner[1]
print(wall_coords)
print(rotation_angle / 3.14 * 180)
print(corner)
print(current_wall_obj)
return location_slide, rotation_angle
return None
if __name__ == '__main__':
res = place_multi_furniture()
for k,v in res.items():
print(k,v)
|
"""
WSGI config for django_practice project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
import sys
import logging
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_practice.settings")
#sys.path.append('/home/ubuntu/projects/django_practice')
#sys.path.append('/home/ubuntu/projects/django_practice/django_practice')
import pymysql
pymysql.install_as_MySQLdb()
application = get_wsgi_application()
def applicationxx(environ, start_response):
status = '200 OK'
output = u'mod_wsgi.application_group = %s' % sys.path
#output = u'mod_wsgi.application_group = %s' % repr(environ['mod_wsgi.application_group'])
response_headers = [('Content-type', 'text/plain'),
('Content-Length', str(len(output)))]
start_response(status, response_headers)
return [output.encode('UTF-8')]
|
#!/usr/bin/env python3
from __future__ import annotations
import queue
import time
import pandas as pd
import mecademicpy.mx_robot_def as mx_def
from .robot_trajectory_files import RobotTrajectories
from pathlib import PurePath
# 2nd values of this dict are taken from controller.cpp HandleSetRealTimeMonitoring() -> ParseStatusCodeString()
# dict and put in UpperCamelCase for convenience (all column names in logged dataframe will be in the format of these
# values)
robot_rt_data_to_real_time_monit = {
'rt_target_joint_pos': (mx_def.MX_ST_RT_TARGET_JOINT_POS, 'TargetJointPos'),
'rt_target_cart_pos': (mx_def.MX_ST_RT_TARGET_CART_POS, 'TargetCartPos'),
'rt_target_joint_vel': (mx_def.MX_ST_RT_TARGET_JOINT_VEL, 'TargetJointVel'),
'rt_target_joint_torq': (mx_def.MX_ST_RT_TARGET_JOINT_TORQ, 'TargetJointTorq'), # Unused in RobotState right now
'rt_target_cart_vel': (mx_def.MX_ST_RT_TARGET_CART_VEL, 'TargetCartVel'),
'rt_target_conf': (mx_def.MX_ST_RT_TARGET_CONF, 'TargetConf'),
'rt_target_conf_turn': (mx_def.MX_ST_RT_TARGET_CONF_TURN, 'TargetConfTurn'),
'rt_joint_pos': (mx_def.MX_ST_RT_JOINT_POS, 'JointPos'),
'rt_cart_pos': (mx_def.MX_ST_RT_CART_POS, 'CartPos'),
'rt_joint_vel': (mx_def.MX_ST_RT_JOINT_VEL, 'JointVel'),
'rt_joint_torq': (mx_def.MX_ST_RT_JOINT_TORQ, 'JointTorq'),
'rt_cart_vel': (mx_def.MX_ST_RT_CART_VEL, 'CartVel'),
'rt_conf': (mx_def.MX_ST_RT_CONF, 'Conf'),
'rt_conf_turn': (mx_def.MX_ST_RT_CONF_TURN, 'ConfTurn'),
'rt_accelerometer': (mx_def.MX_ST_RT_ACCELEROMETER, 'Accel'),
'rt_gripper_force': (mx_def.MX_ST_RT_GRIPPER_FORCE, 'GripperForce'), # Unused in RobotState right now
'rt_wrf': (mx_def.MX_ST_RT_WRF, 'Wrf'),
'rt_trf': (mx_def.MX_ST_RT_TRF, 'Trf'),
'rt_checkpoint': (mx_def.MX_ST_RT_CHECKPOINT, 'Checkpoint'),
'rt_external_tool_status': (mx_def.MX_ST_RT_EXTTOOL_STATUS, 'ExtToolStatus'),
'rt_valve_state': (mx_def.MX_ST_RT_VALVE_STATE, 'ValveState'),
'rt_gripper_state': (mx_def.MX_ST_RT_GRIPPER_STATE, 'GripperState'),
'': (mx_def.MX_ST_RT_CYCLE_END, 'CycleEnd') # Should not be used, handled by Robot class when it uses the logger
}
class _RobotTrajectoryLogger:
"""Class to handle logging robot state to file.
Attributes
----------
file_name : str
Name of file produced by logger
fields : dict of strings
Fields to be logged. Key: attribute name in 'RobotState'. Value: Equivalent UpperCamelCase string or enum value
used in 'SetRealTimeMonitoring'
command_queue : queue
Queue to store sent commands.
element_width : int
Each numerical element will have this width.
timestamp_element_width: int
Each timestamp will have this width
done_logging: bool
'write_fields' wont log more robot states when this is True. Set to True by 'end_log'
logging_commands: bool
Indicate if sent commands are being logged
expanded_fields:
Elements of 'fields', but expanded to have a name for each sub-element of corresponding robot states
data_dict:
Keys: timestamps. Values: robot state stored at moment corresponding to timestamp
robot_trajectories: RobotTrajectories object
Contains robot states logged data and information about the robot used during logging
"""
def __init__(self,
robot_info,
robot_rt_data,
fields: list[str] = None,
file_name: str = None,
file_path: str = None,
record_time: bool = True,
monitoring_interval: float = None):
"""Initialize class.
Parameters
----------
robot_info : RobotInfo
Contains robot information.
fields : list of strings
List of fields to be logged.
robot_rt_data : RobotRtData object
Contains state of robot.
file_name: string or None
Log file name
If None, file name will be built with date/time and robot information (robot type, serial, version).
file_path : string or None
Path to save the zipped file that contains logged data + robot info in, respectively, csv and json file.
If not provided, file will be saved in working directory.
record_time : bool
If true, current time will also be recorded in the text file. (Time is also available in filename.)
monitoring_interval: float
Indicates rate at which state from robot is received on monitor port. Unit: seconds
"""
current_date_time = time.strftime('%Y-%m-%d-%H-%M-%S')
serial_number_or_blank = ('_serial_' + robot_info.serial) if robot_info.serial else ""
# Add unique name to file path.
if file_name:
self.file_name = file_name
else:
self.file_name = (f'{robot_info.model}_R{robot_info.revision}_'
f'v{robot_info.version.short_version}_'
f'log_{current_date_time}{serial_number_or_blank}')
self.file_path = file_path
# If fields argument is None, log all compatible fields.
self.fields = dict()
if fields is None:
if robot_info.rt_message_capable:
for attr in vars(robot_rt_data):
if attr.startswith('rt_'):
self.fields[attr] = robot_rt_data_to_real_time_monit[attr][1]
else:
# Only the following fields are available if platform is not rt monitoring capable.
self.fields = {
'rt_target_joint_pos': robot_rt_data_to_real_time_monit['rt_target_joint_pos'][1],
'rt_target_cart_pos': robot_rt_data_to_real_time_monit['rt_target_cart_pos'][1]
}
else:
for field in fields:
for key, val in robot_rt_data_to_real_time_monit.items():
if (isinstance(field, str) and field.lower() == val[1].lower()) or field == val[0]:
self.fields[key] = val[1]
break
# Set attributes.
self.command_queue = queue.Queue()
self.element_width = 10
self.timestamp_element_width = 15
self.done_logging = False
self.logging_commands = True
self.expanded_fields = []
self.data_dict = dict() # Key: timestamp, Value: List of all corresponding robot_rt_data values
self.robot_trajectories = RobotTrajectories()
# Write robot information.
# Maybe robot information could be stored as a RobotInfo object in robot_trajectories?
self.robot_trajectories.robot_context.robot_information.append(dict())
for attr in ['model', 'revision', 'version']:
self.robot_trajectories.robot_context.robot_information[0][attr] = f'{getattr(robot_info, attr)}'
if robot_info.serial is not None:
self.robot_trajectories.robot_context.robot_information[0]['serial_number'] = f'{robot_info.serial}'
if record_time:
self.robot_trajectories.robot_context.robot_information[0]['time_recorded'] = f'{current_date_time}'
if monitoring_interval:
self.robot_trajectories.robot_context.robot_information[0]['monitoring_interval'] = f'{monitoring_interval}'
# Write headers for logged data
self.write_field_and_element_headers(robot_info)
def get_timestamp_data(self, robot_rt_data, field):
""" Return timestamp data object associated with the specific field (or None).
Parameters
----------
robot_rt_data : RobotRtData object
Current state of robot to get timestamp_data from
field : String
Name of the field to get timestamp_data for.
"""
if field == 'rt_accelerometer':
index = 5 # For now, only index 5 supported (joint 5's accelerometer)
accel_dict = getattr(robot_rt_data, field)
if index not in accel_dict:
return None
field_attr = accel_dict[index]
else:
field_attr = getattr(robot_rt_data, field)
return field_attr
def write_field_and_element_headers(self, robot_info):
"""Write the full field name and element name in each column.
Parameters
----------
robot_info : RobotInfo
Information about the robot, such as model name and number of joints.
"""
def assemble_with_prefix(field, names):
return [field + '_' + str(x) for x in names]
# Write full name for each field.
for key, value in self.fields.items():
if (key.endswith('joint_pos') or key.endswith('joint_vel') or key.endswith('joint_torq')):
# Write field name followed by joint number. For example: "TargetJointPos_1".
self.expanded_fields.extend(assemble_with_prefix(value, range(robot_info.num_joints)))
elif key.endswith('cart_pos') or key.endswith('wrf') or key.endswith('trf'):
self.expanded_fields.extend(assemble_with_prefix(value, ['X', 'Y', 'Z', 'Alpha', 'Beta', 'Gamma']))
elif key.endswith('cart_vel'):
self.expanded_fields.extend(
assemble_with_prefix(value, ['X_Dot', 'Y_Dot', 'Z_Dot', 'Omega_X', 'Omega_Y', 'Omega_Z']))
elif key.endswith('rt_accelerometer'):
self.expanded_fields.extend(assemble_with_prefix(value, ['X', 'Y', 'Z']))
elif key.endswith('conf_turn'):
self.expanded_fields.append(value)
elif key.endswith('conf'):
self.expanded_fields.extend(assemble_with_prefix(value, ['Shoulder', 'Elbow', 'Wrist']))
elif key.endswith('checkpoint'):
self.expanded_fields.append(value)
elif key.endswith('rt_external_tool_status'):
self.expanded_fields.extend(assemble_with_prefix(value, ['model', 'present', 'homed', 'error']))
elif key.endswith('rt_valve_state'):
self.expanded_fields.extend(assemble_with_prefix(value, ['holding', 'limits']))
elif key.endswith('rt_gripper_state'):
self.expanded_fields.extend(assemble_with_prefix(value, ['valve1', 'valve2']))
else:
raise ValueError(f'Missing formatting for field: {key}')
def write_fields(self, timestamp, robot_rt_data):
"""Write fields to file.
Parameters
----------
timestamp : numeric
The timestamp of the current data.
robot_rt_data : RobotRtData object
This object contains the current robot state.
"""
if self.done_logging:
return
# First write the timestamp
formatted_tim = f'{timestamp:{self.timestamp_element_width}}'
self.data_dict[formatted_tim] = []
for field in self.fields:
# For each field, write each value with appropriate spacing.
ts_data = self.get_timestamp_data(robot_rt_data, field)
if ts_data is None:
continue
self.data_dict[formatted_tim].extend([f'{x:{self.element_width}}' for x in ts_data.data])
def stop_logging_commands(self):
"""Stops saving sent commands to log"""
self.logging_commands = False
def end_log(self, ignore_checkpoints=True):
""" Write all accumulated sent commands and close file.
Return
--------
string
Filename where logged info can be found
"""
self.done_logging = True
self.robot_trajectories.robot_df_hist.output_dfs.append(
pd.DataFrame.from_dict(self.data_dict, orient='index', columns=self.expanded_fields))
# Write all sent commands.
while not self.command_queue.empty():
command = self.command_queue.get()
if ignore_checkpoints and command.startswith('SetCheckpoint'):
continue
self.robot_trajectories.robot_context.sent_commands.append(command)
self.robot_trajectories.to_file(self.file_name, file_path=self.file_path)
if self.file_path:
return PurePath.joinpath(PurePath(self.file_path), self.file_name)
else:
return PurePath(self.file_name)
|
# For reference, see <https://github.com/quinlan-lab/hts-python/blob/master/hts/htsffi.py>, even though it uses deprecated .verify()
import cffi
import os.path
cxx_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'x.cpp')
with open(cxx_path) as f:
src = f.read()
ffibuilder = cffi.FFI()
ffibuilder.set_source('pheweb.load.cffi._x',
src,
source_extension='.cpp',
extra_compile_args=['--std=c++11'],
libraries=['z'], # needed on Linux but not macOS
)
ffibuilder.cdef('''
int cffi_make_matrix(char *sites_filepath, char *augmented_pheno_glob, char *matrix_filepath);
int cffi_bgzip_file(char *in_filepath, char *out_filepath, char *prepend_bytes);
''')
|
import textakel
from flask import Flask
from flask import jsonify
from flask import render_template
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/api/v1/<string:function_name>/<path:s>")
def get_text(function_name, s):
s = textakel.takel(function_name, s)
response = {"text": s}
return jsonify(response)
@app.route("/api")
def get_functions():
functions_names = textakel.get_functions()
response = {"functions": functions_names}
return jsonify(response)
if __name__ == "__main__":
app.run(debug=True)
|
import math
from server.federation import alignment
from server.eAd import paillier
import pandas as pd
import numpy as np
import time
# import rsa
global theta_a,ra,alpha
# 设置权重参数的初始值
theta_a = None
rsa_len = 1112
ppk_a, psk_a = paillier.gen_key()
scal = 1000
alpha = 0.1
def cal_ua(x,theta):
temp1 = np.dot(theta.T, x)
return int(temp1)
def bytes2int(raw_bytes: bytes) -> int:
r"""Converts a list of bytes or an 8-bit string to an integer.
When using unicode strings, encode it to some encoding like UTF8 first.
>>> (((128 * 256) + 64) * 256) + 15
8405007
>>> bytes2int(b'\x80@\x0f')
8405007
"""
return int.from_bytes(raw_bytes, 'big', signed=False)
def int2bytes(number: int, fill_size: int = 0) -> bytes:
"""
Convert an unsigned integer to bytes (big-endian)::
Does not preserve leading zeros if you don't specify a fill size.
:param number:
Integer value
:param fill_size:
If the optional fill size is given the length of the resulting
byte string is expected to be the fill size and will be padded
with prefix zero bytes to satisfy that length.
:returns:
Raw bytes (base-256 representation).
:raises:
``OverflowError`` when fill_size is given and the number takes up more
bytes than fit into the block. This requires the ``overflow``
argument to this function to be set to ``False`` otherwise, no
error will be raised.
"""
if number < 0:
raise ValueError("Number must be an unsigned integer: %d" % number)
bytes_required = max(1, math.ceil(number.bit_length() / 8))
if fill_size > 0:
return number.to_bytes(fill_size, 'big')
return number.to_bytes(bytes_required, 'big')
def generate_random(n):
return np.trunc(np.random.rand(n)*(scal**3)*10+(scal**3))# todo
def lr1(ubb_list,ppk_b):
lamb = 0.5
x_a = alignment.x
n = alignment.x.shape[1] # 特征个数
global theta_a
if theta_a is None:
theta_a = pd.Series(np.ones(n))
theta = theta_a.apply(lambda x: int(x * scal))
uaa_list = []
# gradA_pb = theta.apply(lambda x : int(paillier.encipher((-lamb * 2 * (scal ** 2) * x),ppk_b)))
gradA_pb = pd.Series(np.zeros(n)).apply(lambda x: int(paillier.encipher((-lamb * 2 * (scal ** 2) * x), ppk_b)))
print(x_a.shape[0])
time_start = time.time()
for i in range(x_a.shape[0]):
# 计算Uaa
xa = x_a.iloc[i].apply(lambda x: int(x*scal))
ua = cal_ua(xa,theta)
uaa = int(paillier.encipher(ua,ppk_a))
uaa_list.append(uaa)
# 计算Uab
uab = int(paillier.encipher(ua, ppk_b))
# 计算Garb
ubb = ubb_list[i]
u_pb = paillier.plus(uab,ubb,ppk_b)
u_pb_i = [paillier.multiply(u_pb,x,ppk_b) for x in xa]
for num,ux in enumerate(u_pb_i):
gradA_pb[num] = paillier.plus(ux,gradA_pb[num],ppk_b)
if i % 1000 == 0:
print('%.f%%' % (i/x_a.shape[0]*100))
global ra
ra = generate_random(n)
ra_pb = [int(paillier.encipher(int(r),ppk_b)) for r in ra]
for num,r in enumerate(ra_pb):
gradA_pb[num] = paillier.plus(r, gradA_pb[num], ppk_b)
gradA_pb = list(gradA_pb)
print('uaa和garb的计算耗时:',time.time()-time_start)
return [gradA_pb, uaa_list,ppk_a]
def lr2(gradB_pa, gradA_r):
# 给B解密
time_start = time.time()
gradB_r = []
for grad in gradB_pa:
gradB_r.append(paillier.decipher(grad, ppk_a, psk_a))
print('Gar解密耗时:', time.time() - time_start)
x_a = alignment.x
# gar消除随机数
gradA = gradA_r - ra
grad = gradA / 4 / (scal ** 3)/x_a.shape[0]
print('当前梯度为',grad)
global theta_a,alpha
theta_a = theta_a - alpha * grad
# alpha *= 0.98
print('学习率:', alpha)
print('theta_a',theta_a)
return gradB_r
def sigmoid(x):
return 1/(1 + np.exp(-x))
def save(id):
model = theta_a
model.to_csv(r'./server/data_storage/model_%s.csv' % id)
def pre(ub_list):
x_a = alignment.x
global theta_a
ua_list = []
for i in range(x_a.shape[0]):
# 计算Uaa
xa = x_a.iloc[i]
ua = np.dot(theta_a.T, xa)
ua_list.append(ua)
u_list = np.array(ua_list) + np.array(ub_list)
pred = pd.Series(u_list).apply(lambda x: sigmoid(x))
return pred.tolist()
|
from wrangle.wrangle import wrangle_function
t_test, p_value, df = wrangle_function('sleepdata')
print(f'T-test: {t_test} \nP-value: {p_value} \nDF-head: {df.head(3)}')
|
# =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Radu Serban
# =============================================================================
#
# Demonstration of using a RigidTerrain constructed from different patches.
#
# The vehicle reference frame has Z up, X towards the front of the vehicle, and
# Y pointing to the left.
#
# =============================================================================
import pychrono as chrono
import pychrono.vehicle as veh
import pychrono.irrlicht as irr
#// =============================================================================
def main():
#print("Copyright (c) 2017 projectchrono.org\nChrono version: ", CHRONO_VERSION , "\n\n")
# Create the HMMWV vehicle, set parameters, and initialize
my_hmmwv = veh.HMMWV_Full()
my_hmmwv.SetContactMethod(chrono.ChContactMethod_NSC)
my_hmmwv.SetChassisFixed(False);
my_hmmwv.SetInitPosition(chrono.ChCoordsysD(chrono.ChVectorD(-10, -2, 0.6), chrono.ChQuaternionD(1, 0, 0, 0)))
my_hmmwv.SetPowertrainType(veh.PowertrainModelType_SIMPLE)
my_hmmwv.SetDriveType(veh.DrivelineTypeWV_AWD)
my_hmmwv.SetTireType(veh.TireModelType_TMEASY)
my_hmmwv.SetTireStepSize(tire_step_size)
my_hmmwv.Initialize()
my_hmmwv.SetChassisVisualizationType(veh.VisualizationType_NONE)
my_hmmwv.SetSuspensionVisualizationType(veh.VisualizationType_PRIMITIVES)
my_hmmwv.SetSteeringVisualizationType(veh.VisualizationType_PRIMITIVES)
my_hmmwv.SetWheelVisualizationType(veh.VisualizationType_MESH)
my_hmmwv.SetTireVisualizationType(veh.VisualizationType_MESH)
# Create the terrain with multiple patches
terrain = veh.RigidTerrain(my_hmmwv.GetSystem())
patch1_mat = chrono.ChMaterialSurfaceNSC()
patch1_mat.SetFriction(0.9)
patch1_mat.SetRestitution(0.01)
patch1 = terrain.AddPatch(patch1_mat, chrono.ChVectorD(-16, 0, 0), chrono.ChVectorD(0, 0, 1), 32, 20)
patch1.SetColor(chrono.ChColor(0.8, 0.8, 0.5))
patch1.SetTexture(veh.GetDataFile("terrain/textures/tile4.jpg"), 20, 20)
patch2_mat = chrono.ChMaterialSurfaceNSC()
patch2_mat.SetFriction(0.9)
patch2_mat.SetRestitution(0.01)
patch2 = terrain.AddPatch(patch1_mat, chrono.ChVectorD(16, 0, 0.15), chrono.ChVectorD(0, 0, 1), 32, 30);
patch2.SetColor(chrono.ChColor(1.0, 0.5, 0.5))
patch2.SetTexture(veh.GetDataFile("terrain/textures/concrete.jpg"), 20, 20)
patch3_mat = chrono.ChMaterialSurfaceNSC()
patch3_mat.SetFriction(0.9)
patch3_mat.SetRestitution(0.01)
patch3 = terrain.AddPatch(patch3_mat, chrono.ChCoordsysD(chrono.ChVectorD(0, -42, 0), chrono.QUNIT),
veh.GetDataFile("terrain/meshes/bump.obj"), "hills_mesh")
patch3.SetColor(chrono.ChColor(0.5, 0.5, 0.8))
patch3.SetTexture(veh.GetDataFile("terrain/textures/dirt.jpg"), 6.0, 6.0)
patch4_mat = chrono.ChMaterialSurfaceNSC()
patch4_mat.SetFriction(0.9)
patch4_mat.SetRestitution(0.01)
patch4 = terrain.AddPatch(patch4_mat, chrono.ChCoordsysD(chrono.ChVectorD(0, 42, 0), chrono.QUNIT),
veh.GetDataFile("terrain/height_maps/bump64.bmp"), "field_mesh", 64.0, 64.0, 0.0, 3.0)
patch4.SetTexture(veh.GetDataFile("terrain/textures/grass.jpg"), 6.0, 6.0)
# Create the vehicle Irrlicht interface
app = veh.ChWheeledVehicleIrrApp(my_hmmwv.GetVehicle(), 'HMMWV Rigid Terrain Demo', irr.dimension2du(1000,800))
app.SetSkyBox()
app.AddTypicalLights(irr.vector3df(30, -30, 100), irr.vector3df(30, 50, 100), 250, 130)
app.AddTypicalLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
app.SetChaseCamera(chrono.ChVectorD(0.0, 0.0, 0.75), 6.0, 0.5)
app.SetTimestep(step_size)
app.AssetBindAll()
app.AssetUpdateAll()
# Create the interactive driver system
driver = veh.ChIrrGuiDriver(app)
driver.SetSteeringDelta(0.02)
driver.SetThrottleDelta(0.02)
driver.SetBrakingDelta(0.06)
driver.Initialize()
realtime_timer = chrono.ChRealtimeStepTimer()
while (app.GetDevice().run()):
time = my_hmmwv.GetSystem().GetChTime()
# Draw scene
app.BeginScene()
app.DrawAll()
app.EndScene()
# Get driver inputs
driver_inputs = driver.GetInputs()
# Update modules (process inputs from other modules)
driver.Synchronize(time)
terrain.Synchronize(time)
my_hmmwv.Synchronize(time, driver_inputs, terrain)
app.Synchronize(driver.GetInputModeAsString(), driver_inputs)
# Advance simulation for one timestep for all modules
driver.Advance(step_size)
terrain.Advance(step_size)
my_hmmwv.Advance(step_size)
app.Advance(step_size)
# Spin in place for real time to catch up
realtime_timer.Spin(step_size)
return 0
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('path/to/data')
veh.SetDataPath(chrono.GetChronoDataPath() + 'vehicle/')
# Simulation step sizes
step_size = 2e-3;
tire_step_size = 1e-3;
main() |
import requests
import pandas as pd
import numpy as np
def fetch_url(url):
res = requests.get(url)
return res.json()
def fetch_comments(comment_url):
d = fetch_url(comment_url)
try:
nextPageToken = d["nextPageToken"]
except:
nextPageToken = None
comments = [d["items"][index]['snippet']['topLevelComment']['snippet']['textDisplay'] for index in
range(len(d["items"]))]
while nextPageToken:
comment_url = comment_url + '&pageToken=' + nextPageToken
d = fetch_url(comment_url)
try:
nextPageToken = d["nextPageToken"]
except:
nextPageToken = None
page_comments = [d["items"][index]['snippet']['topLevelComment']['snippet']['textDisplay'] for index in
range(len(d["items"]))]
comments.extend(page_comments)
return comments
def show_clusters(assign, corpus):
clusters_dictionary = {}
for i, c in enumerate(assign):
if c in clusters_dictionary:
clusters_dictionary[c].append(corpus[i])
else:
clusters_dictionary[c] = [corpus[i]]
return clusters_dictionary
|
from literal_distribution_thing import LiteralDistribution, exclude_subsets, PID_sets
from cana.boolean_node import BooleanNode
from binarize import to_binary
# set up variables
n_inputs = 2**3
rule = 131
output = to_binary(rule, n_inputs)
bn = BooleanNode.from_output_list(outputs=output)
# print(bn.input_symmetry())
# print(bn._two_symbols)
ld = LiteralDistribution(bn)
print('Output of ts_transitions:')
print(ld._get_ts_transitions())
# print('\n')
# print('Output of Literal Distribution')
dist = ld._literal_distribution()
# for t in dist:
# print('Transitions to', t)
# for key in dist[t]:
# print(key, dist[t][key])
# print('information assignment:')
ld._distributed = dist
info = ld._assign_information()
# for t in info:
# print(t, info[t])
# print(len(info)) |
import numpy as np, tensorflow as tf, aolib.util as ut, aolib.img as ig, os
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
import vgg, h5py
import sklearn.metrics
pj = ut.pjoin
full_dim = 256
crop_dim = 224
train_iters = 10000
batch_size = 32
#batch_size = 32
#base_lr = 1e-4
base_lr = 1e-4
gamma = 0.5
step_size = 1000
sample_dur_secs = 0.2
sample_fps = 60
gpu = '/gpu:0'
init_path = '../results/vgg_16.ckpt'
checkpoint_iters = 100
update_top_only = False
ed = tf.expand_dims
im_names = 'gel0_pre gel1_pre gel0_post gel1_post'.split()
def download_pretrained():
# https://github.com/tensorflow/models/tree/master/slim
ut.mkdir('../results')
ut.sys_check('wget http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz '
' -O ../results/vgg_16_2016_08_28.tar.gz')
ut.sys_check('cd ../results; tar -xzf vgg_16_2016_08_28.tar.gz')
def moving_avg(name, x, vals = {}, avg_win_size = 100):
ut.add_dict_list(vals, name, x)
return np.mean(vals[name][-avg_win_size:])
def read_example(rec_queue):
reader = tf.TFRecordReader()
k, s = reader.read(rec_queue)
feats = {
'gel0_pre' : tf.FixedLenFeature([], dtype=tf.string),
'gel1_pre' : tf.FixedLenFeature([], dtype=tf.string),
'gel0_post' : tf.FixedLenFeature([], dtype=tf.string),
'gel1_post' : tf.FixedLenFeature([], dtype=tf.string),
'is_gripping' : tf.FixedLenFeature([], tf.int64),
}
example = tf.parse_single_example(s, features = feats)
gel0_pre = tf.image.decode_png(example['gel0_pre'])
gel1_pre = tf.image.decode_png(example['gel1_pre'])
gel0_post = tf.image.decode_png(example['gel0_post'])
gel1_post = tf.image.decode_png(example['gel1_post'])
ims = [gel0_pre, gel1_pre, gel0_post, gel1_post]
for x in ims:
x.set_shape((full_dim, full_dim, 3))
combo = tf.concat([ed(x, 0) for x in ims], 0)
combo = tf.random_crop(combo, (shape(combo, 0), crop_dim,
crop_dim, shape(combo, 3)))
gel0_pre = combo[0]
gel1_pre = combo[1]
gel0_post = combo[2]
gel1_post = combo[3]
label = example['is_gripping']
return gel0_pre, gel1_pre, gel0_post, gel1_post, label
def read_data(path):
tf_files = [pj(path, 'train.tf')]
queue = tf.train.string_input_producer(tf_files)
gel0_pre, gel1_pre, gel0_post, gel1_post, labels = \
tf.train.shuffle_batch(read_example(queue),
batch_size = batch_size,
capacity = 2000, min_after_dequeue = 500)
return dict(gel0_pre = gel0_pre,
gel1_pre = gel1_pre,
gel0_post = gel0_post,
gel1_post = gel1_post), labels
def normalize_ims(im):
if type(im) == type(np.array([])):
im = im.astype('float32')
else:
im = tf.cast(im, tf.float32)
return -1. + (2./255) * im
def shape(x, d = None):
s = x.get_shape().as_list()
return s if d is None else s[d]
def write_data(out_dir, train_frac = 0.75, val_frac = 0.05):
ut.mkdir(out_dir)
base_data = '../data/grasp/'
ut.sys_check('find %s -name "*.hdf5" > %s/db_files.txt' % (base_data, out_dir))
all_db_files = ut.read_lines(pj(out_dir, 'db_files.txt'))
all_db_files = ut.shuffled_with_seed(all_db_files)
name_from_file = lambda x : '_'.join(x.split('/')[-1].split('_')[2:])
by_name = ut.accum_dict((name_from_file(x), x) for x in all_db_files)
names = ut.shuffled_with_seed(sorted(by_name.keys()))
num_names = len(all_db_files)
num_train = int(train_frac * num_names)
num_val = int(val_frac * num_names)
i = 0
train_names = names[i : num_train]
i += num_train
val_names = names[i : i + num_val]
i += num_val
test_names = names[i:]
for dset_name, names in [('train', train_names),
('val', val_names),
('test', test_names)]:
ut.write_lines(pj(out_dir, '%s_objects.txt' % dset_name), names)
tf_file = pj(out_dir, '%s.tf' % dset_name)
pk_file = pj(out_dir, '%s.pk' % dset_name)
if os.path.exists(tf_file):
os.remove(tf_file)
writer = tf.python_io.TFRecordWriter(tf_file)
data = []
for name in names:
for db_file in by_name[name]:
with h5py.File(db_file, 'r') as db:
def im(x):
x = np.array(x)
x = ig.scale(x, (256, 256), 1)
return ig.compress(x)
if 'is_gripping' in db:
label = int(np.array(db['is_gripping']))
elif 'Is gripping?' in db:
label = int(np.array(db['Is gripping?']))
else:
print 'Skipping: %s. Missing is_gripping' % db_file
print 'Keys:', ' '.join(db.keys())
continue
data.append({
'gel0_pre': im(db['GelSightA_image_pre_gripping']),
'gel1_pre': im(db['GelSightB_image_pre_gripping']),
'gel0_post': im(db['GelSightA_image_post_gripping']),
'gel1_post': im(db['GelSightB_image_post_gripping']),
'is_gripping' : label})
fbl = lambda x :tf.train.Feature(bytes_list = tf.train.BytesList(value = [x]))
feat = {
'gel0_pre': fbl(im(db['GelSightA_image_pre_gripping'])),
'gel1_pre': fbl(im(db['GelSightB_image_pre_gripping'])),
'gel0_post': fbl(im(db['GelSightA_image_post_gripping'])),
'gel1_post': fbl(im(db['GelSightB_image_post_gripping'])),
'is_gripping' : tf.train.Feature(int64_list = tf.train.Int64List(value = [label]))}
ex = tf.train.Example(features = tf.train.Features(feature = feat))
writer.write(ex.SerializeToString())
writer.close()
ut.save(pk_file, data)
print dset_name, '->', len(data), 'examples'
def make_model(inputs, train):
n = normalize_ims
logits = vgg.vgg_gel2(
n(inputs['gel0_pre']), n(inputs['gel0_post']),
n(inputs['gel1_pre']), n(inputs['gel1_post']),
is_training = train,
update_top_only = update_top_only,
num_classes = 2)
return logits
# def train(path, restore = False):
# config = tf.ConfigProto(allow_soft_placement = True)
# with tf.Graph().as_default(), tf.device(gpu), tf.Session(config = config) as sess:
# global_step = tf.get_variable(
# 'global_step', [], initializer =
# tf.constant_initializer(0), trainable = False)
# inputs, labels = read_data(path)
# logits = make_model(inputs, train = True)
# loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
# logits = logits, labels = labels)
# loss = tf.reduce_mean(loss)
# tf.summary.scalar('loss', loss)
# eq = tf.equal(tf.argmax(logits, 1), labels)
# acc = tf.reduce_mean(tf.cast(eq, tf.float32))
# tf.summary.scalar('acc', acc)
# lr = base_lr * gamma**(global_step // step_size)
# opt = tf.train.MomentumOptimizer(lr, 0.9)
# train_op = opt.minimize(loss, global_step = global_step)
# bn_ups = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# print 'Batch norm updates:', len(bn_ups)
# train_op = tf.group(train_op, *bn_ups)
# sess.run(tf.global_variables_initializer())
# var_list = slim.get_variables_to_restore()
# exclude = ['Adam', 'beta1_power', 'beta2_power', 'Momentum', 'global_step', 'logits', 'fc8']
# var_list = [x for x in var_list if \
# not any(name in x.name for name in exclude)]
# train_dir = pj(path, 'training')
# if restore:
# tf.train.Saver(var_list).restore(sess, tf.train.latest_checkpoint(train_dir))
# else:
# tf.train.Saver(var_list).restore(sess, init_path)
# #saver = tf.train.Saver()
# tf.train.start_queue_runners(sess = sess)
# summary_dir = ut.mkdir('../results/summary')
# print 'tensorboard --logdir=%s' % summary_dir
# sum_writer = tf.summary.FileWriter(summary_dir, sess.graph)
# while True:
# step = int(sess.run(global_step))
# if (step == 10 or step % checkpoint_iters == 0) or step == train_iters - 1:
# check_path = pj(ut.mkdir(train_dir), 'net.tf')
# print 'Saving:', check_path
# vs = slim.get_model_variables()
# tf.train.Saver(vs).save(sess, check_path,
# global_step = global_step)
# if step > train_iters:
# break
# merged = tf.summary.merge_all()
# if step % 1 == 0:
# [summary] = sess.run([merged])
# sum_writer.add_summary(summary, step)
# _, lr_val, loss_val, acc_val = sess.run([train_op, lr, loss, acc])
# if step % 10 == 0:
# print 'Iteration', step, 'lr = ', lr_val, \
# 'loss:', loss_val, 'acc:', acc_val
def train(path, restore = False):
config = tf.ConfigProto(allow_soft_placement = True)
with tf.Graph().as_default(), tf.device(gpu), tf.Session(config = config) as sess:
global_step = tf.get_variable('global_step', [], initializer =
tf.constant_initializer(0), trainable = False)
inputs, labels = read_data(path)
logits = make_model(inputs, train = True)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits, labels = labels)
loss = tf.reduce_mean(loss)
eq = tf.equal(tf.argmax(logits, 1), labels)
acc = tf.reduce_mean(tf.cast(eq, tf.float32))
lr = base_lr * gamma**(global_step // step_size)
opt = tf.train.MomentumOptimizer(lr, 0.9)
train_op = opt.minimize(loss, global_step = global_step)
bn_ups = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
print 'Batch norm updates:', len(bn_ups)
train_op = tf.group(train_op, *bn_ups)
sess.run(tf.global_variables_initializer())
var_list = slim.get_variables_to_restore()
exclude = ['Adam', 'beta1_power', 'beta2_power', 'Momentum', 'global_step', 'logits', 'fc8', 'fc6_', 'fc7_', 'conv6']
var_list = [x for x in var_list if \
not any(name in x.name for name in exclude)]
train_dir = pj(path, 'training')
if restore:
tf.train.Saver(var_list).restore(sess, tf.train.latest_checkpoint(train_dir))
else:
tf.train.Saver(var_list).restore(sess, init_path)
#saver = tf.train.Saver()
tf.train.start_queue_runners(sess = sess)
summary_dir = ut.mkdir('../results/summary')
print 'tensorboard --logdir=%s' % summary_dir
sum_writer = tf.summary.FileWriter(summary_dir, sess.graph)
while True:
step = int(sess.run(global_step))
if (step == 10 or step % checkpoint_iters == 0) or step == train_iters - 1:
check_path = pj(ut.mkdir(train_dir), 'net.tf')
print 'Saving:', check_path
#saver.save(sess, check_path, global_step = global_step)
vs = slim.get_model_variables()
# print 'Variables:'
# for x in vs:
# print x.name
tf.train.Saver(vs).save(sess, check_path, global_step = global_step)
if step > train_iters:
break
merged = tf.summary.merge_all()
if step % 1 == 0:
[summary] = sess.run([merged])
sum_writer.add_summary(summary, step)
_, lr_val, loss_val, acc_val = sess.run([train_op, lr, loss, acc])
if step % 10 == 0:
print 'Iteration', step, 'lr = ', lr_val, 'loss:', moving_avg('loss', loss_val), 'acc:', moving_avg('acc', acc_val)
class NetClf:
def __init__(self, model_file, gpu = '/cpu:0'):
self.sess = None
self.model_file = model_file
self.gpu = gpu
def __del__(self):
self.deinit()
def init(self):
if self.sess is None:
print 'Restoring:',self.model_file
with tf.device(self.gpu):
tf.reset_default_graph()
print self.gpu
tf.Graph().as_default()
self.sess = tf.Session()
s = (crop_dim, crop_dim, 3)
self.gel0_pre = tf.placeholder(tf.uint8, s, name = 'gel0_pre')
self.gel1_pre = tf.placeholder(tf.uint8, s, name = 'gel1_pre')
self.gel0_post = tf.placeholder(tf.uint8, s, name = 'gel0_post')
self.gel1_post = tf.placeholder(tf.uint8, s, name = 'gel1_post')
self.logits = make_model({k : ed(getattr(self, k), 0) for k in im_names}, train = False)
tf.train.Saver().restore(self.sess, self.model_file)
tf.get_default_graph().finalize()
def deinit(self):
if self.sess is not None:
self.sess.close()
self.sess = None
def format_im(self, im):
return ig.scale(im, (crop_dim, crop_dim), 1)#.astype('float32')
def predict(self, **kwargs):
self.init()
inputs = {}
for k in im_names:
inputs[getattr(self, k)] = self.format_im(kwargs[k])
[logits] = self.sess.run([self.logits], inputs)
return ut.softmax(logits[0])[1]
def test(path):
train_dir = pj(path, 'training')
check_path = tf.train.latest_checkpoint(train_dir)
print 'Restoring from:', check_path
net = NetClf(check_path, gpu)
data = ut.load(pj(path, 'test.pk'))
labels = []
probs = []
accs = []
for i in xrange(len(data)):
ex = data[i]
label = ex['is_gripping']
ex = {k : ig.uncompress(ex[k]) for k in im_names}
prob = net.predict(**ex)
print prob, label
pred = int(prob >= 0.5)
labels.append(label)
probs.append(prob)
accs.append(pred == label)
labels = np.array(labels, 'bool')
probs = np.array(probs, 'float32')
accs = np.array(accs)
print 'Accuracy:', np.mean(accs)
print 'mAP:', sklearn.metrics.average_precision_score(labels, probs)
def run(todo = 'all',
out_dir = '../results/grasp/grasp-v1',
restore = 0):
todo = ut.make_todo(todo, 'im train test')
if 'im' in todo:
write_data(out_dir)
if 'train' in todo:
train(out_dir, restore = restore)
if 'test' in todo:
test(out_dir)
|
import logging
from typing import Dict
from typing import List
import neo4j
from cartography.intel.jamf.util import call_jamf_api
from cartography.util import run_cleanup_job
from cartography.util import timeit
logger = logging.getLogger(__name__)
@timeit
def get_computer_groups(jamf_base_uri: str, jamf_user: str, jamf_password: str) -> List[Dict]:
return call_jamf_api("/computergroups", jamf_base_uri, jamf_user, jamf_password)
@timeit
def load_computer_groups(data: Dict, neo4j_session: neo4j.Session, update_tag: int) -> None:
ingest_groups = """
UNWIND {JsonData} as group
MERGE (g:JamfComputerGroup{id: group.id})
ON CREATE SET g.name = group.name,
g.firstseen = timestamp()
SET g.is_smart = group.is_smart,
g.lastupdated = {UpdateTag}
"""
groups = data.get("computer_groups")
neo4j_session.run(ingest_groups, JsonData=groups, UpdateTag=update_tag)
@timeit
def cleanup(neo4j_session: neo4j.Session, common_job_parameters: Dict) -> None:
run_cleanup_job('jamf_import_computers_cleanup.json', neo4j_session, common_job_parameters)
@timeit
def sync_computer_groups(
neo4j_session: neo4j.Session, update_tag: int, jamf_base_uri: str, jamf_user: str,
jamf_password: str,
) -> None:
groups = get_computer_groups(jamf_base_uri, jamf_user, jamf_password)
load_computer_groups(groups, neo4j_session, update_tag)
@timeit
def sync(
neo4j_session: neo4j.Session, jamf_base_uri: str, jamf_user: str, jamf_password: str,
common_job_parameters: Dict,
) -> None:
sync_computer_groups(neo4j_session, common_job_parameters['UPDATE_TAG'], jamf_base_uri, jamf_user, jamf_password)
|
from __future__ import absolute_import
from . import train_set
# Globally-importable utils.
from .train_set import train_A_x
from .train_set import train_B_x |
#!/usr/bin/env python
from yaml import load, dump
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from connection import Connection
from logging import Logging
import importlib
import datetime
import argparse
parser = argparse.ArgumentParser(description='Start workload.')
parser.add_argument('--test', action='store_true',
help="run without executing commands")
parser.add_argument('--verbose', action='store_true',
help="more verbose output")
args = parser.parse_args()
# Logging
logging = Logging(verbose=args.verbose)
# Open yaml file
f = open("deploy.yml", "r")
data = load(f, Loader=Loader)
# Connection
logging.message("# Init connection")
connectionConfig = data['connection']
connection = Connection(config=connectionConfig, logservice=logging, test=args.test)
connection.test()
# Create release name
today = datetime.datetime.today()
release_name = today.strftime('%Y-%m-%d_%H%M%S')
# TODO: Find nicer solution for this
data['deployment']['release_name'] = release_name
# Run tasks one by one
logging.message("# Init tasks")
tasklist = data['tasks']
for taskconfig in tasklist:
logging.message("! Task %s" % taskconfig['task'])
TaskClass = getattr(importlib.import_module("tasks"), taskconfig['task'])
task = TaskClass(connection, {
"config": taskconfig,
"deployment": data['deployment']
}, logging)
task.run()
|
from unittest import TestCase
from src.utils import order_diagnosis
from src.consequents import COMMON_CONSEQUENT_NAME
class ReferenceDiagnosisTest(TestCase):
FOR_SURE_LEVEL = 60
common_consequent_name = COMMON_CONSEQUENT_NAME
output = None
def _get_diagnosis(self):
if not self.medical_record:
self.skipTest('Attribute "medical_record" should be defined')
if not self.output:
self.skipTest('Should have a "output" set')
self.medical_record.compute()
return order_diagnosis(self.output, self.medical_record.output[COMMON_CONSEQUENT_NAME])
def _get_best_diagnosis(self):
return self._get_diagnosis()[0]
|
import matplotlib
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from numpy import random
# define initial population and coolperation
def Init_pop_one():
POP_ONE = []
# define chromosome -- random.shuffle(ChromosomeX)[K]
for i in range(POP_SIZE):
POP_ONE.append(random.sample(ChromosomeX, K))
return POP_ONE
def Init_pop_two():
POP_TWO = []
# define chromosome -- random.shuffle(ChromosomeX)[K]
for i in range(POP_SIZE):
POP_TWO.append(random.sample(ChromosomeX, K))
return POP_TWO
# define fitness function
def get_fitness(x):
sqrt = np.sqrt(K)
Eta = []
Upsilon = []
Sigma = []
Mu = []
maximum = max(x)
standard = x.std()
average = x.mean()
Eta.append(maximum)
Sigma.append(standard)
Mu.append(average)
for i in range(len(Eta)):
if Eta[i] < 0.5:
value = Sigma[i] / (2 * sqrt * Mu[i])
Upsilon.append(value)
else:
value = Sigma[i] / Mu[i]
Upsilon.append(value)
Coverage = sum(Eta)
Mutual_exclusivity = sum(Upsilon)
Weight = Coverage + Mutual_exclusivity
return Weight
# calculate the fitness of every individual in population
def pop_fitness_one(POP_ONE):
return [get_fitness(k) for k in POP_ONE]
def pop_fitness_two(POP_TWO):
return [get_fitness(k) for k in POP_TWO]
# define population crossover operator
def pop_one_crossover(pop_one_next):
new_pop_one = []
for father in pop_one_next:
mother = pop_one_next[np.random.randint(POP_SIZE)]
same_pop = [x for x in father if x in mother]
rest_pop = [y for y in (father + mother) if y not in same_pop]
new_pop_one.append(same_pop.append(rest_pop[0: len(rest_pop) / 2]))
new_pop_one.append(same_pop.append(rest_pop[len(rest_pop) / 2:]))
pop_one_next.remove(father)
pop_one_next.remove(mother)
return new_pop_one
def pop_two_crossover(pop_two_next):
new_pop_two = []
for father in pop_two_next:
mother = pop_two_next[np.random.randint(POP_SIZE)]
same_pop = [x for x in father if x in mother]
rest_pop = [y for y in (father + mother) if y not in same_pop]
new_pop_two.append(same_pop.append(rest_pop[0: len(rest_pop) / 2]))
new_pop_two.append(same_pop.append(rest_pop[len(rest_pop) / 2:]))
pop_two_next.remove(father)
pop_two_next.remove(mother)
return new_pop_two
# define cooperation crossover operator
def cooperation_crossover_one(cp_next, pop_two_new, pop_one_new):
for father in cp_next:
mother = cp_next[np.random.randint(POP_SIZE)]
same_pop = [x for x in father if x in mother]
rest_pop = [y for y in (father + mother) if y not in same_pop]
pop_one_new.append(same_pop.append(rest_pop[0: len(rest_pop) / 2]))
pop_two_new.append(same_pop.append(rest_pop[len(rest_pop) / 2:]))
cp_next.remove(father)
cp_next.remove(mother)
return pop_one_new, pop_two_new
# define a roulette selector
def selection_elitism(POP_ONE, POP_TWO, cp_init):
pop_parents_one = sorted(POP_ONE, key=lambda x: get_fitness(x), revers=True)
pop_parents_two = sorted(POP_TWO, key=lambda x: get_fitness(x), revers=True)
cp_parents = sorted(cp_init, key=lambda x: get_fitness(x), revers=True)
pop_one_next = pop_parents_one[0: int(POP_SIZE * 0.5)]
pop_two_next = pop_parents_two[0: int(POP_SIZE * 0.5)]
cp_next = cp_parents[0: int(POP_SIZE * 0.5)]
return pop_one_next, pop_two_next, cp_next
def pop_one_mutation(pop_new_one):
fit = []
compare = []
for i in pop_new_one:
if random.random() < Pm:
H = [x for x in random.randint(0, MAXG_G) if x not in pop_new_one[i]]
H = H[: 4]
H_set = random.sample(pop_new_one[i], np.sqrt(len(H)))
new_one = pop_new_one[i.remove(random.sample(pop_new_one[i], 1))]
for r in H_set:
compare.append(new_one.append(r))
for k in compare:
fit.append(get_fitness(k))
return new_one
if fit[0] > fit[1]:
pop_new_one[i] = compare[0]
else:
pop_new_one[i] = pop_new_one[i]
return pop_new_one
def pop_two_mutation(pop_new_one):
fit = []
compare = []
for i in pop_new_one:
if random.random() < Pm:
H = [x for x in random.randint(0, MAXG_G) if x not in pop_new_one[i]]
H = H[: 4]
H_set = random.sample(pop_new_one[i], np.sqrt(len(H)))
new_one = pop_new_one[i.remove(random.sample(pop_new_one[i], 1))]
for r in H_set:
compare.append(new_one.append(r))
for k in compare:
fit.append(get_fitness(k))
return new_one
if fit[0] > fit[1]:
pop_new_one[i] = compare[0]
else:
pop_new_one[i] = pop_new_one[i]
return pop_new_one
def CGA_MWS():
pop_one_init = Init_pop_one()
pop_two_init = Init_pop_two()
cp_init = pop_one_init | pop_two_init
gen = 0
t = 0
for g in MAXG_G:
while gen < g and t < MAXT:
# step 4 :selection and crossover in pop
pop_one_selection_elitism(pop_one_init, pop_two_init, cp_init)
pop_one_new = pop_one_crossover(pop_one_next)
pop_two_new = pop_two_crossover(pop_two_next)
# step 5 :selection and crossover in cooperation
cooperation_crossover_one(cp_next)
# step 6 :mutation
pop_one_mutation(pop_one_new)
pop_two_mutation(pop_two_new)
# step 7 :sort the pop
pop_one_new = sorted(pop_one_new, key=lambda x: get_fitness(x), reverse=True)[: POP_SIZE]
pop_two_new = sorted(pop_two_new, key=lambda x: get_fitness(x), reverse=True)[: POP_SIZE]
# step 8 :sort the union set
cp_next = sorted(cp_next | pop_one_new | pop_two_new, key=lambda x: get_fitness(x), reverse=True)[
: 2 * POP_SIZE]
union_pop = pop_one_new | pop_two_new
union_pop = sorted(union_pop, key=lambda x: get_fitness(x), reverse=True)
if get_fitness(pop_one_new[0]) > get_fitness(pop_two_new[-1]):
pop_two_new[-1] = pop_one_new[0]
elif get_fitness(pop_two_new[0]) > get_fitness(pop_one_new[-1]):
pop_one_new[-1] = pop_two_new[0]
elif get_fitness(union_pop[0]) > get_fitness(best):
best = union_pop[0]
t = -1
t += 1
gen += 1
if __name__ == "__main__":
K = int(input())
# set the parameters
MAXG_G = [1000, 5000, 10000]
MAXT = 10
POP_SIZE = MAXG_G / 4
Pm = 0.3
ChromosomeX = [c for c in range(MAXG_G)]
CGA_MWS()
|
"""
Copyright 2012 Ali Ok (aliokATapacheDOTorg)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import math
from trnltk.morphology.contextful.likelihoodmetrics.wordformcollocation.contextparsingcalculator import BaseContextParsingLikelihoodCalculator
from trnltk.morphology.model import formatter
logger = logging.getLogger('interpolatingCollocationLikelihoodCalculator')
class InterpolatingLikelihoodCalculator(BaseContextParsingLikelihoodCalculator):
ALPHA = 10
def __init__(self, wrapped_calculator):
"""
@type wrapped_calculator: BaseContextParsingLikelihoodCalculator
"""
self._wrapped_calculator = wrapped_calculator
def build_indexes(self):
self._wrapped_calculator.build_indexes()
def calculate_oneway_likelihood(self, target, context, target_comes_after, calculation_context=None):
if logger.isEnabledFor(logging.DEBUG):
if target_comes_after:
logger.debug(u" Calculating oneway likelihood of {1}, {0}".format(formatter.format_morpheme_container_for_simple_parseset(target),
[t[0].get_surface() if t else "<Unparsable>" for t in context]))
else:
logger.debug(u" Calculating oneway likelihood of {0}, {1}".format(formatter.format_morpheme_container_for_simple_parseset(target),
[t[0].get_surface() if t else "<Unparsable>" for t in context]))
context_len = len(context)
if calculation_context is not None:
calculation_context['interpolation'] = {'context_length': context_len, 'likelihood': {}, 'weight': {}, 'item': {}, 'part_weight': {}}
interpolation_weights = self._calculate_interpolation_weights(context_len)
total_likelihood = 0
for i in range(0, len(context)):
calculation_context_item = {} if calculation_context else None
context_part = context[context_len - i - 1:] if target_comes_after else context[0: i + 1]
part_likelihood = self._wrapped_calculator.calculate_oneway_likelihood(target, context_part, target_comes_after, calculation_context_item)
part_weight = part_likelihood * interpolation_weights[i]
total_likelihood += part_weight
if calculation_context is not None:
calculation_context['interpolation']['item'][i] = calculation_context_item
calculation_context['interpolation']['likelihood'][i] = part_likelihood
calculation_context['interpolation']['weight'][i] = interpolation_weights[i]
calculation_context['interpolation']['part_weight'][i] = part_weight
if calculation_context is not None:
calculation_context['sum_likelihood'] = total_likelihood
return total_likelihood
def _calculate_interpolation_weights(self, context_len):
denominator = 0
for i in range(0, context_len):
denominator += math.pow(self.ALPHA, i)
weights = []
for i in range(0, context_len):
nominator = math.pow(self.ALPHA, i)
weights.append(nominator / denominator)
return weights
|
# Copyright (c) 2015 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import exceptions as ks_exc
from neutron_lib.api.definitions import port as port_def
from neutron_lib.api.definitions import port_resource_request
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import qos as qos_apidef
from neutron_lib.api.definitions import qos_bw_limit_direction
from neutron_lib.api.definitions import qos_bw_minimum_ingress
from neutron_lib.api.definitions import qos_default
from neutron_lib.api.definitions import qos_port_network_policy
from neutron_lib.api.definitions import qos_pps_rule
from neutron_lib.api.definitions import qos_rule_type_details
from neutron_lib.api.definitions import qos_rules_alias
from neutron_lib.callbacks import events as callbacks_events
from neutron_lib.callbacks import registry as callbacks_registry
from neutron_lib.callbacks import resources as callbacks_resources
from neutron_lib import constants as nl_constants
from neutron_lib import context
from neutron_lib.db import api as db_api
from neutron_lib.db import resource_extend
from neutron_lib import exceptions as lib_exc
from neutron_lib.exceptions import qos as qos_exc
from neutron_lib.placement import client as pl_client
from neutron_lib.placement import utils as pl_utils
from neutron_lib.services.qos import constants as qos_consts
import os_resource_classes as orc
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.db import db_base_plugin_common
from neutron.extensions import qos
from neutron.objects import base as base_obj
from neutron.objects import network as network_object
from neutron.objects import ports as ports_object
from neutron.objects.qos import policy as policy_object
from neutron.objects.qos import qos_policy_validator as checker
from neutron.objects.qos import rule as rule_object
from neutron.objects.qos import rule_type as rule_type_object
from neutron.services.qos.drivers import manager
LOG = logging.getLogger(__name__)
@resource_extend.has_resource_extenders
class QoSPlugin(qos.QoSPluginBase):
"""Implementation of the Neutron QoS Service Plugin.
This class implements a Quality of Service plugin that provides quality of
service parameters over ports and networks.
"""
supported_extension_aliases = [
qos_apidef.ALIAS,
qos_bw_limit_direction.ALIAS,
qos_default.ALIAS,
qos_rule_type_details.ALIAS,
port_resource_request.ALIAS,
qos_bw_minimum_ingress.ALIAS,
qos_rules_alias.ALIAS,
qos_port_network_policy.ALIAS,
qos_pps_rule.ALIAS,
]
__native_pagination_support = True
__native_sorting_support = True
__filter_validation_support = True
def __init__(self):
super(QoSPlugin, self).__init__()
self.driver_manager = manager.QosServiceDriverManager()
self._placement_client = pl_client.PlacementAPIClient(cfg.CONF)
callbacks_registry.subscribe(
self._validate_create_port_callback,
callbacks_resources.PORT,
callbacks_events.PRECOMMIT_CREATE)
callbacks_registry.subscribe(
self._check_port_for_placement_allocation_change,
callbacks_resources.PORT,
callbacks_events.BEFORE_UPDATE)
callbacks_registry.subscribe(
self._validate_update_port_callback,
callbacks_resources.PORT,
callbacks_events.PRECOMMIT_UPDATE)
callbacks_registry.subscribe(
self._validate_update_network_callback,
callbacks_resources.NETWORK,
callbacks_events.PRECOMMIT_UPDATE)
callbacks_registry.subscribe(
self._validate_create_network_callback,
callbacks_resources.NETWORK,
callbacks_events.PRECOMMIT_CREATE)
@staticmethod
@resource_extend.extends([port_def.COLLECTION_NAME])
def _extend_port_resource_request(port_res, port_db):
"""Add resource request to a port."""
if isinstance(port_db, ports_object.Port):
qos_id = port_db.qos_policy_id or port_db.qos_network_policy_id
else:
qos_id = None
if port_db.get('qos_policy_binding'):
qos_id = port_db.qos_policy_binding.policy_id
elif port_db.get('qos_network_policy_binding'):
qos_id = port_db.qos_network_policy_binding.policy_id
port_res['resource_request'] = None
if not qos_id:
return port_res
if port_res.get('bulk'):
port_res['resource_request'] = {
'qos_id': qos_id,
'network_id': port_db.network_id,
'vnic_type': port_res[portbindings.VNIC_TYPE]}
return port_res
min_bw_rules = rule_object.QosMinimumBandwidthRule.get_objects(
context.get_admin_context(), qos_policy_id=qos_id)
resources = QoSPlugin._get_resources(min_bw_rules)
if not resources:
return port_res
segments = network_object.NetworkSegment.get_objects(
context.get_admin_context(), network_id=port_db.network_id)
traits = QoSPlugin._get_traits(port_res[portbindings.VNIC_TYPE],
segments)
if not traits:
return port_res
port_res['resource_request'] = {
'required': traits,
'resources': resources}
return port_res
@staticmethod
def _get_resources(min_bw_rules):
resources = {}
# NOTE(ralonsoh): we should move this translation dict to n-lib.
rule_direction_class = {
nl_constants.INGRESS_DIRECTION:
orc.NET_BW_IGR_KILOBIT_PER_SEC,
nl_constants.EGRESS_DIRECTION:
orc.NET_BW_EGR_KILOBIT_PER_SEC
}
for rule in min_bw_rules:
resources[rule_direction_class[rule.direction]] = rule.min_kbps
return resources
@staticmethod
def _get_traits(vnic_type, segments):
# TODO(lajoskatona): Change to handle all segments when any traits
# support will be available. See Placement spec:
# https://review.opendev.org/565730
first_segment = segments[0]
if not first_segment or not first_segment.physical_network:
return []
physnet_trait = pl_utils.physnet_trait(
first_segment.physical_network)
# NOTE(ralonsoh): we should not rely on the current execution order of
# the port extending functions. Although here we have
# port_res[VNIC_TYPE], we should retrieve this value from the port DB
# object instead.
vnic_trait = pl_utils.vnic_type_trait(vnic_type)
return [physnet_trait, vnic_trait]
@staticmethod
@resource_extend.extends([port_def.COLLECTION_NAME_BULK])
def _extend_port_resource_request_bulk(ports_res, noop):
"""Add resource request to a list of ports."""
min_bw_rules = dict()
net_segments = dict()
for port_res in ports_res:
if port_res.get('resource_request') is None:
continue
qos_id = port_res['resource_request'].pop('qos_id', None)
if not qos_id:
port_res['resource_request'] = None
continue
net_id = port_res['resource_request'].pop('network_id')
vnic_type = port_res['resource_request'].pop('vnic_type')
if qos_id not in min_bw_rules:
rules = rule_object.QosMinimumBandwidthRule.get_objects(
context.get_admin_context(), qos_policy_id=qos_id)
min_bw_rules[qos_id] = rules
resources = QoSPlugin._get_resources(min_bw_rules[qos_id])
if not resources:
continue
if net_id not in net_segments:
segments = network_object.NetworkSegment.get_objects(
context.get_admin_context(),
network_id=net_id)
net_segments[net_id] = segments
traits = QoSPlugin._get_traits(vnic_type, net_segments[net_id])
if not traits:
continue
port_res['resource_request'] = {
'required': traits,
'resources': resources}
return ports_res
def _get_ports_with_policy(self, context, policy):
networks_ids = policy.get_bound_networks()
ports_with_net_policy = ports_object.Port.get_objects(
context, network_id=networks_ids)
# Filter only this ports which don't have overwritten policy
ports_with_net_policy = [
port for port in ports_with_net_policy if
port.qos_policy_id is None
]
ports_ids = policy.get_bound_ports()
ports_with_policy = ports_object.Port.get_objects(
context, id=ports_ids)
return list(set(ports_with_policy + ports_with_net_policy))
def _validate_create_port_callback(self, resource, event, trigger,
payload=None):
context = payload.context
port_id = payload.resource_id
port = ports_object.Port.get_object(context, id=port_id)
policy_id = port.qos_policy_id or port.qos_network_policy_id
if policy_id is None:
return
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
self.validate_policy_for_port(context, policy, port)
def _check_port_for_placement_allocation_change(self, resource, event,
trigger, payload):
context = payload.context
orig_port = payload.states[0]
port = payload.latest_state
original_policy_id = orig_port.get(qos_consts.QOS_POLICY_ID)
if qos_consts.QOS_POLICY_ID not in port:
return
policy_id = port.get(qos_consts.QOS_POLICY_ID)
if policy_id == original_policy_id:
return
# Do this only for compute bound ports
if (nl_constants.DEVICE_OWNER_COMPUTE_PREFIX in
orig_port['device_owner']):
original_policy = policy_object.QosPolicy.get_object(
context.elevated(), id=original_policy_id)
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
self._change_placement_allocation(original_policy, policy,
orig_port)
def _prepare_allocation_needs(self, original_rule, desired_rule):
if (isinstance(desired_rule, rule_object.QosMinimumBandwidthRule) or
isinstance(desired_rule, dict)):
o_dir = original_rule.get('direction')
o_minkbps = original_rule.get('min_kbps')
d_minkbps = desired_rule.get('min_kbps')
d_dir = desired_rule.get('direction')
if o_dir == d_dir and o_minkbps != d_minkbps:
diff = d_minkbps - o_minkbps
# TODO(lajoskatona): move this to neutron-lib, see similar
# dict @l125.
if d_dir == 'egress':
drctn = orc.NET_BW_EGR_KILOBIT_PER_SEC
else:
drctn = orc.NET_BW_IGR_KILOBIT_PER_SEC
return {drctn: diff}
return {}
def _change_placement_allocation(self, original_policy, desired_policy,
orig_port):
alloc_diff = {}
original_rules = []
rp_uuid = orig_port['binding:profile'].get('allocation')
device_id = orig_port['device_id']
if original_policy:
original_rules = original_policy.get('rules')
if desired_policy:
desired_rules = desired_policy.get('rules')
else:
desired_rules = [{'direction': 'egress', 'min_kbps': 0},
{'direction': 'ingress', 'min_kbps': 0}]
any_rules_minbw = any(
[isinstance(r, rule_object.QosMinimumBandwidthRule)
for r in desired_rules])
if (not original_rules and any_rules_minbw) or not rp_uuid:
LOG.warning("There was no QoS policy with minimum_bandwidth rule "
"attached to the port %s, there is no allocation "
"record in placement for it, only the dataplane "
"enforcement will happen!", orig_port['id'])
return
for o_rule in original_rules:
if isinstance(o_rule, rule_object.QosMinimumBandwidthRule):
for d_rule in desired_rules:
alloc_diff.update(
self._prepare_allocation_needs(o_rule, d_rule))
if alloc_diff:
try:
self._placement_client.update_qos_minbw_allocation(
consumer_uuid=device_id, minbw_alloc_diff=alloc_diff,
rp_uuid=rp_uuid)
except ks_exc.Conflict:
raise qos_exc.QosPlacementAllocationConflict(
consumer=device_id, rp=rp_uuid)
def _validate_update_port_callback(self, resource, event, trigger,
payload=None):
context = payload.context
original_policy_id = payload.states[0].get(
qos_consts.QOS_POLICY_ID)
policy_id = payload.desired_state.get(qos_consts.QOS_POLICY_ID)
if policy_id is None or policy_id == original_policy_id:
return
updated_port = ports_object.Port.get_object(
context, id=payload.desired_state['id'])
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
self.validate_policy_for_port(context, policy, updated_port)
def _validate_create_network_callback(self, resource, event, trigger,
payload=None):
context = payload.context
network_id = payload.resource_id
network = network_object.Network.get_object(context, id=network_id)
policy_id = network.qos_policy_id
if policy_id is None:
return
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
self.validate_policy_for_network(context, policy, network_id)
def _validate_update_network_callback(self, resource, event, trigger,
payload=None):
context = payload.context
original_network = payload.states[0]
updated_network = payload.desired_state
original_policy_id = original_network.get(qos_consts.QOS_POLICY_ID)
policy_id = updated_network.get(qos_consts.QOS_POLICY_ID)
if policy_id is None or policy_id == original_policy_id:
return
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
self.validate_policy_for_network(
context, policy, network_id=updated_network['id'])
ports = ports_object.Port.get_objects(
context, network_id=updated_network['id'])
# Filter only this ports which don't have overwritten policy
ports = [
port for port in ports if port.qos_policy_id is None
]
self.validate_policy_for_ports(context, policy, ports)
def validate_policy(self, context, policy):
ports = self._get_ports_with_policy(context, policy)
self.validate_policy_for_ports(context, policy, ports)
def validate_policy_for_ports(self, context, policy, ports):
for port in ports:
self.validate_policy_for_port(context, policy, port)
def validate_policy_for_port(self, context, policy, port):
for rule in policy.rules:
if not self.driver_manager.validate_rule_for_port(
context, rule, port):
raise qos_exc.QosRuleNotSupported(rule_type=rule.rule_type,
port_id=port['id'])
def validate_policy_for_network(self, context, policy, network_id):
for rule in policy.rules:
if not self.driver_manager.validate_rule_for_network(
context, rule, network_id):
raise qos_exc.QosRuleNotSupportedByNetwork(
rule_type=rule.rule_type, network_id=network_id)
def reject_min_bw_rule_updates(self, context, policy):
ports = self._get_ports_with_policy(context, policy)
for port in ports:
# NOTE(bence romsics): In some cases the presence of
# 'binding:profile.allocation' is a more precise marker than
# 'device_owner' about when we have to reject min-bw related
# policy/rule updates. However 'binding:profile.allocation' cannot
# be used in a generic way here. Consider the case when the first
# min-bw rule is added to a policy having ports in-use. Those ports
# will not have 'binding:profile.allocation', but this policy
# update must be rejected.
if (port.device_owner is not None and
port.device_owner.startswith(
nl_constants.DEVICE_OWNER_COMPUTE_PREFIX)):
raise NotImplementedError(_(
'Cannot update QoS policies/rules backed by resources '
'tracked in Placement'))
@db_base_plugin_common.convert_result_to_dict
def create_policy(self, context, policy):
"""Create a QoS policy.
:param context: neutron api request context
:type context: neutron_lib.context.Context
:param policy: policy data to be applied
:type policy: dict
:returns: a QosPolicy object
"""
# NOTE(dasm): body 'policy' contains both tenant_id and project_id
# but only latter needs to be used to create QosPolicy object.
# We need to remove redundant keyword.
# This cannot be done in other place of stacktrace, because neutron
# needs to be backward compatible.
tenant_id = policy['policy'].pop('tenant_id', None)
if not policy['policy'].get('project_id'):
policy['policy']['project_id'] = tenant_id
policy_obj = policy_object.QosPolicy(context, **policy['policy'])
with db_api.CONTEXT_WRITER.using(context):
policy_obj.create()
self.driver_manager.call(qos_consts.CREATE_POLICY_PRECOMMIT,
context, policy_obj)
self.driver_manager.call(qos_consts.CREATE_POLICY, context, policy_obj)
return policy_obj
@db_base_plugin_common.convert_result_to_dict
def update_policy(self, context, policy_id, policy):
"""Update a QoS policy.
:param context: neutron api request context
:type context: neutron.context.Context
:param policy_id: the id of the QosPolicy to update
:param policy_id: str uuid
:param policy: new policy data to be applied
:type policy: dict
:returns: a QosPolicy object
"""
policy_data = policy['policy']
with db_api.CONTEXT_WRITER.using(context):
policy_obj = policy_object.QosPolicy.get_policy_obj(
context, policy_id)
policy_obj.update_fields(policy_data, reset_changes=True)
policy_obj.update()
self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT,
context, policy_obj)
self.driver_manager.call(qos_consts.UPDATE_POLICY,
context, policy_obj)
return policy_obj
def delete_policy(self, context, policy_id):
"""Delete a QoS policy.
:param context: neutron api request context
:type context: neutron.context.Context
:param policy_id: the id of the QosPolicy to delete
:type policy_id: str uuid
:returns: None
"""
with db_api.CONTEXT_WRITER.using(context):
policy = policy_object.QosPolicy(context)
policy.id = policy_id
policy.delete()
self.driver_manager.call(qos_consts.DELETE_POLICY_PRECOMMIT,
context, policy)
self.driver_manager.call(qos_consts.DELETE_POLICY,
context, policy)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy(self, context, policy_id, fields=None):
"""Get a QoS policy.
:param context: neutron api request context
:type context: neutron.context.Context
:param policy_id: the id of the QosPolicy to update
:type policy_id: str uuid
:returns: a QosPolicy object
"""
return policy_object.QosPolicy.get_policy_obj(context, policy_id)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policies(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
"""Get QoS policies.
:param context: neutron api request context
:type context: neutron.context.Context
:param filters: search criteria
:type filters: dict
:returns: QosPolicy objects meeting the search criteria
"""
filters = filters or dict()
pager = base_obj.Pager(sorts, limit, page_reverse, marker)
return policy_object.QosPolicy.get_objects(context, _pager=pager,
**filters)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_rule_type(self, context, rule_type_name, fields=None):
if not context.is_admin:
raise lib_exc.NotAuthorized()
return rule_type_object.QosRuleType.get_object(rule_type_name)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_rule_types(self, context, filters=None, fields=None,
sorts=None, limit=None,
marker=None, page_reverse=False):
if not filters:
filters = {}
return rule_type_object.QosRuleType.get_objects(**filters)
def supported_rule_type_details(self, rule_type_name):
return self.driver_manager.supported_rule_type_details(rule_type_name)
@property
def supported_rule_types(self):
return self.driver_manager.supported_rule_types
@db_base_plugin_common.convert_result_to_dict
def create_policy_rule(self, context, rule_cls, policy_id, rule_data):
"""Create a QoS policy rule.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param policy_id: the id of the QosPolicy for which to create the rule
:type policy_id: str uuid
:param rule_data: the rule data to be applied
:type rule_data: dict
:returns: a QoS policy rule object
"""
rule_type = rule_cls.rule_type
rule_data = rule_data[rule_type + '_rule']
with db_api.CONTEXT_WRITER.using(context):
# Ensure that we have access to the policy.
policy = policy_object.QosPolicy.get_policy_obj(context, policy_id)
checker.check_bandwidth_rule_conflict(policy, rule_data)
rule = rule_cls(context, qos_policy_id=policy_id, **rule_data)
checker.check_rules_conflict(policy, rule)
rule.create()
policy.obj_load_attr('rules')
self.validate_policy(context, policy)
if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH:
self.reject_min_bw_rule_updates(context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT,
context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy)
return rule
@db_base_plugin_common.convert_result_to_dict
def update_policy_rule(self, context, rule_cls, rule_id, policy_id,
rule_data):
"""Update a QoS policy rule.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param rule_id: the id of the QoS policy rule to update
:type rule_id: str uuid
:param policy_id: the id of the rule's policy
:type policy_id: str uuid
:param rule_data: the new rule data to update
:type rule_data: dict
:returns: a QoS policy rule object
"""
rule_type = rule_cls.rule_type
rule_data = rule_data[rule_type + '_rule']
with db_api.CONTEXT_WRITER.using(context):
# Ensure we have access to the policy.
policy = policy_object.QosPolicy.get_policy_obj(context, policy_id)
# Ensure the rule belongs to the policy.
checker.check_bandwidth_rule_conflict(policy, rule_data)
rule = policy.get_rule_by_id(rule_id)
rule.update_fields(rule_data, reset_changes=True)
checker.check_rules_conflict(policy, rule)
rule.update()
policy.obj_load_attr('rules')
self.validate_policy(context, policy)
if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH:
self.reject_min_bw_rule_updates(context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT,
context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy)
return rule
def _get_policy_id(self, context, rule_cls, rule_id):
with db_api.CONTEXT_READER.using(context):
rule_object = rule_cls.get_object(context, id=rule_id)
if not rule_object:
raise qos_exc.QosRuleNotFound(policy_id="", rule_id=rule_id)
return rule_object.qos_policy_id
def update_rule(self, context, rule_cls, rule_id, rule_data):
"""Update a QoS policy rule alias. This method processes a QoS policy
rule update, where the rule is an API first level resource instead of a
subresource of a policy.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param rule_id: the id of the QoS policy rule to update
:type rule_id: str uuid
:param rule_data: the new rule data to update
:type rule_data: dict
:returns: a QoS policy rule object
:raises: qos_exc.QosRuleNotFound
"""
policy_id = self._get_policy_id(context, rule_cls, rule_id)
rule_data_name = rule_cls.rule_type + '_rule'
alias_rule_data_name = 'alias_' + rule_data_name
rule_data[rule_data_name] = rule_data.pop(alias_rule_data_name)
return self.update_policy_rule(context, rule_cls, rule_id, policy_id,
rule_data)
def delete_policy_rule(self, context, rule_cls, rule_id, policy_id):
"""Delete a QoS policy rule.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param rule_id: the id of the QosPolicy Rule to delete
:type rule_id: str uuid
:param policy_id: the id of the rule's policy
:type policy_id: str uuid
:returns: None
"""
with db_api.CONTEXT_WRITER.using(context):
# Ensure we have access to the policy.
policy = policy_object.QosPolicy.get_policy_obj(context, policy_id)
rule = policy.get_rule_by_id(rule_id)
rule.delete()
policy.obj_load_attr('rules')
if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH:
self.reject_min_bw_rule_updates(context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT,
context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy)
def delete_rule(self, context, rule_cls, rule_id):
"""Delete a QoS policy rule alias. This method processes a QoS policy
rule delete, where the rule is an API first level resource instead of a
subresource of a policy.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param rule_id: the id of the QosPolicy Rule to delete
:type rule_id: str uuid
:returns: None
:raises: qos_exc.QosRuleNotFound
"""
policy_id = self._get_policy_id(context, rule_cls, rule_id)
return self.delete_policy_rule(context, rule_cls, rule_id, policy_id)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy_rule(self, context, rule_cls, rule_id, policy_id,
fields=None):
"""Get a QoS policy rule.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param rule_id: the id of the QoS policy rule to get
:type rule_id: str uuid
:param policy_id: the id of the rule's policy
:type policy_id: str uuid
:returns: a QoS policy rule object
:raises: qos_exc.QosRuleNotFound
"""
with db_api.CONTEXT_READER.using(context):
# Ensure we have access to the policy.
policy_object.QosPolicy.get_policy_obj(context, policy_id)
rule = rule_cls.get_object(context, id=rule_id)
if not rule:
raise qos_exc.QosRuleNotFound(policy_id=policy_id, rule_id=rule_id)
return rule
def get_rule(self, context, rule_cls, rule_id, fields=None):
"""Get a QoS policy rule alias. This method processes a QoS policy
rule get, where the rule is an API first level resource instead of a
subresource of a policy
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param rule_id: the id of the QoS policy rule to get
:type rule_id: str uuid
:returns: a QoS policy rule object
:raises: qos_exc.QosRuleNotFound
"""
policy_id = self._get_policy_id(context, rule_cls, rule_id)
return self.get_policy_rule(context, rule_cls, rule_id, policy_id)
# TODO(QoS): enforce rule types when accessing rule objects
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy_rules(self, context, rule_cls, policy_id, filters=None,
fields=None, sorts=None, limit=None, marker=None,
page_reverse=False):
"""Get QoS policy rules.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param policy_id: the id of the QosPolicy for which to get rules
:type policy_id: str uuid
:returns: QoS policy rule objects meeting the search criteria
"""
with db_api.CONTEXT_READER.using(context):
# Ensure we have access to the policy.
policy_object.QosPolicy.get_policy_obj(context, policy_id)
filters = filters or dict()
filters[qos_consts.QOS_POLICY_ID] = policy_id
pager = base_obj.Pager(sorts, limit, page_reverse, marker)
return rule_cls.get_objects(context, _pager=pager, **filters)
|
#!/usr/bin/env python
# license removed for brevity
import sys
import os
current_folder = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_folder)
modules_folder = os.path.join(current_folder, "..")
sys.path.append(modules_folder)
main_folder = os.path.join(modules_folder, "..")
sys.path.append(main_folder)
import time
import json
from config.console_formatter import Console_Formatter
from bs4 import BeautifulSoup
from Web_Browser_Driver import Web_Browser_Driver
from Web_Browser_Driver_Keys import Web_Browser_Driver_Keys
from Auto_Browser import Auto_Browser
from Login_Breaker import Login_Breaker
from Friend_Link_Parser import Friend_Link_Parser
from Post_Data_Parser import Post_Data_Parser
from Search_Person_Parser import Search_Person_Parser
from User_Information_Database import User_Information_Database
class Facebook_Finder:
##PUBLIC
version = "1.0"
agent_header = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"
user_infomation_database = User_Information_Database()
post_data_parser = Post_Data_Parser()
friend_link_parser = Friend_Link_Parser()
search_person_parser = Search_Person_Parser()
cookies_path = os.path.join(current_folder, "cookies.json")
is_info = True
banner_personal_profile_locate = '個人檔案'
banner_personal_profile_main_page_locate = '首頁'
headline_post_locate = '動態時報'
headline_about_locate = '關於'
headline_friends_locate = '朋友'
headline_photos_locate = '相片'
search_headline_posts = '貼文'
search_headline_persons = '人物'
search_headline_photos = '相片'
search_headline_videos = '影片'
##PRIVATE
program_name_ = __name__
console_formatter_ = Console_Formatter()
parser_ = None
page_links = {}
browser = None
auto_browser = None
cookies = None
is_login = False
def link(self, address):
self.browser.access_website(address)
def login(self, account, passwd, wait_time=10):
if self.is_info :
msg = "Starting login as {} ...".format(account)
print(self.console_formatter_.NOTIFY(self.program_name_, msg))
self.is_login = Login_Breaker(self.browser, self.is_info)(account, passwd, seperate=False, wait_time=wait_time)
if self.is_login == False:
if self.is_info :
msg = "Returning back ..."
print(self.console_formatter_.FATAL(self.program_name_, msg))
self.browser.backward()
else:
if self.is_info :
msg = "Login successfully !"
print(self.console_formatter_.INFO(self.program_name_, msg))
return self.is_login
'''
def person_parser(self):
if self.is_info :
msg = "Personal infomation parsing ..."
print(self.console_formatter_.INFO(self.program_name_, msg))
links = BeautifulSoup(self.browser.get_source(), "lxml").find_all('a')
person_name = None
is_name_find = False
for link in links:
if 'title' in link.attrs:
if link.get('title') == "{}".format(self.banner_personal_profile_locate):
self.person_profile.add_person(link.text)
self.person_profile.add_info(link.text, 'href', link.get('href'))
person_name = link.text
is_name_find = True
break
if not is_name_find:
if self.is_info :
msg = "Personal infomation parsing fail ! Cannot find personal infomation link !"
print(self.console_formatter_.FATAL(self.program_name_, msg))
return False
if self.is_info :
msg = "Entering personal infomation page : {} ...".format(person_name)
print(self.console_formatter_.INFO(self.program_name_, msg))
self.link(self.person_profile.profile[person_name]['href'])
self.parse_personal_page()
return True
'''
def parse_personal_page(self):
if self.is_info :
msg = "Parsing page ..."
print(self.console_formatter_.INFO(self.program_name_, msg))
bs_ = BeautifulSoup(self.browser.get_source(), "lxml")
user_name = bs_.find("a", {'class' : '_2nlw _2nlv'}).text
user_link = bs_.find("a", {'class' : '_2nlw _2nlv'})['href']
self.user_infomation_database.add_user(user_link, user_name)
headline = bs_.find("div", id='fbTimelineHeadline')
headline_post = headline.find(text=self.headline_post_locate).find_parent("a")
headline_about = headline.find(text=self.headline_about_locate).find_parent("a")
headline_friends = headline.find(text=self.headline_friends_locate).find_parent("a")
headline_photos = headline.find(text=self.headline_photos_locate).find_parent("a")
self.page_links['headline'] = [headline]
if headline_post != None:
self.page_links['headline'].append([self.headline_post_locate, headline_post])
self.page_links[self.headline_post_locate] = headline_post
if headline_about != None:
self.page_links['headline'].append([self.headline_about_locate, headline_about])
self.page_links[self.headline_about_locate] = headline_about
if headline_friends != None:
self.page_links['headline'].append([self.headline_friends_locate, headline_friends])
self.page_links[self.headline_friends_locate] = headline_friends
if headline_photos != None:
self.page_links['headline'].append([self.headline_photos_locate, headline_photos])
self.page_links[self.headline_photos_locate] = headline_photos
if headline_friends != None:
friends_lists = self.friend_parser()
self.user_infomation_database.add_info(user_link, 'friends', friends_lists)
if headline_post != None:
posts_lists = self.post_parser()
self.user_infomation_database.add_info(user_link, 'posts', posts_lists)
return self.page_links
def post_parser(self, browse_counts=64):
if self.is_info :
msg = "Parsing posts ..."
print(self.console_formatter_.INFO(self.program_name_, msg))
if not self.headline_post_locate in self.page_links:
if self.is_info :
msg = "Cannot find posts link !"
print(self.console_formatter_.WARN(self.program_name_, msg))
return None
self.link(self.page_links[self.headline_post_locate]['href'])
self.auto_browser.auto_end(browse_counts=browse_counts)
post_lists = self.post_data_parser.add_source(self.browser.get_source())
for post_list in post_lists:
self.page_links[self.headline_post_locate].append(post_list)
if self.is_info :
msg = "Posts found : {} : {} ".format(post_list, post_lists[post_list]['article'] if 'article' in post_lists[post_list] else "")
print(self.console_formatter_.NOTIFY(self.program_name_, msg))
for reply in post_lists[post_list]['replys']:
msg = "---- {}".format(reply)
print(self.console_formatter_.NOTIFY(self.program_name_, msg))
if self.is_info :
msg = "Parsing posts done !"
print(self.console_formatter_.INFO(self.program_name_, msg))
return post_lists
def friend_parser(self, browse_counts=64):
if self.is_info :
msg = "Parsing friends ..."
print(self.console_formatter_.INFO(self.program_name_, msg))
if not self.headline_friends_locate in self.page_links:
if self.is_info :
msg = "Cannot find friends link !"
print(self.console_formatter_.WARN(self.program_name_, msg))
return None
self.link(self.page_links[self.headline_friends_locate]['href'])
self.auto_browser.auto_end(browse_counts=browse_counts)
friends_lists = self.friend_link_parser.add_source(self.browser.get_source())
for href_link in friends_lists:
#self.page_links[self.headline_friends_locate].append([friends_lists[href_link], href_link])
if self.is_info :
msg = "Friends : {} {}".format(friends_lists[href_link], href_link)
print(self.console_formatter_.NOTIFY(self.program_name_, msg))
if self.is_info :
msg = "Parsing friends done !"
print(self.console_formatter_.INFO(self.program_name_, msg))
return friends_lists
def enter_personal_information(self, link_name=None):
link_name = self.banner_personal_profile_locate if link_name == None else link_name
if self.is_info :
msg = "Entering personal profile information ..."
print(self.console_formatter_.INFO(self.program_name_, msg))
if self.browser.find_xpath("//a[@title='{}']".format(link_name)):
self.browser.click()
return True
else:
if self.is_info :
msg = "Cannot find personal profile information link !"
print(self.console_formatter_.WARN(self.program_name_, msg))
return False
def enter_personal_main_page(self, link_name=None):
link_name = self.banner_personal_profile_main_page_locate if link_name == None else link_name
if self.is_info :
msg = "Entering personal profile main page links : {} ...".format(link_name)
print(self.console_formatter_.INFO(self.program_name_, msg))
if self.browser.find_link_text("{}".format(link_name)):
self.browser.click()
return True
else:
if self.is_info :
msg = "Cannot find personal profile main page links !".format(link_name)
print(self.console_formatter_.WARN(self.program_name_, msg))
return False
def user_search(self, user_name, browse_counts=64):
if self.is_info :
msg = "Searching users ..."
print(self.console_formatter_.INFO(self.program_name_, msg))
if not self.search(user_name):
if self.is_info :
msg = "Searching fail !"
print(self.console_formatter_.WARN(self.program_name_, msg))
return None
self.browser.click_link(self.search_headline_persons)
self.auto_browser.auto_page_down(browse_counts)
search_person_lists = self.search_person_parser.add_source(self.browser.get_source())
for search_person in search_person_lists:
if self.is_info :
msg = "User : {} link={}".format(search_person_lists[search_person][0], search_person)
print(self.console_formatter_.NOTIFY(self.program_name_, msg))
if self.is_info :
msg = "Searching users done !"
print(self.console_formatter_.INFO(self.program_name_, msg))
return search_person_lists
def search(self, text):
if self.is_info :
msg = "Searching {} ...".format(text)
print(self.console_formatter_.INFO(self.program_name_, msg))
if not self.browser.search(text):
if self.is_info :
msg = "Searching {} fail !".format(text)
print(self.console_formatter_.WARN(self.program_name_, msg))
return False
else:
return True
def screenshot(self, file_name):
self.browser.screenshot(file_name)
def wait(self, tm):
self.browser.wait(tm)
def get_source(self):
return self.browser.get_source()
def get_cookies(self):
self.cookies = self.browser.get_cookies()
return self.cookies
def save_cookies(self, path):
if self.is_info :
msg = "Saving cookies : {} ...".format(path)
print(self.console_formatter_.INFO(self.program_name_, msg))
with open(path, "w") as f:
json.dump(json.dumps(self.cookies), f)
return
def load_cookies(self, path):
if self.is_info :
msg = "Checking cookies : {} ...".format(path)
print(self.console_formatter_.INFO(self.program_name_, msg))
if not self.check_path(path):
if self.is_info:
msg = "Cookies : {} not found !".format(self.cookies_path)
print(self.console_formatter_.WARN(self.program_name_, msg))
return False
else:
if self.is_info :
msg = "Loading cookies : {} ...".format(path)
print(self.console_formatter_.INFO(self.program_name_, msg))
with open(path,"r") as f:
self.cookies = json.loads(json.load(f))
for cookie in self.cookies:
self.browser.add_cookie(cookie)
return True
def set_browser(self, browser):
self.browser = browser
self.auto_browser = Auto_Browser(self.browser)
def get_browser(self):
return self.browser
def check_path(self, path):
return os.path.exists(path)
def __init__(self, browser=None, is_cookies_clear=True, is_notifications=False, is_window=True, is_info=True, is_debug=True, **kwargs):
self.is_info = is_info
if self.is_info :
msg = "Initializing ..."
print(self.console_formatter_.INFO(self.program_name_, msg))
self.browser = Web_Browser_Driver(is_cookies_clear=is_cookies_clear, is_notifications=is_notifications,
is_window=is_window, is_info=is_debug,
**kwargs) if browser == None else browser
self.auto_browser = Auto_Browser(self.browser, is_info=is_debug)
self.user_infomation_database.load_database()
if not is_cookies_clear:
self.load_cookies(self.cookies_path)
self.link("http://www.facebook.com")
def __del__(self):
self.get_cookies()
self.save_cookies(self.cookies_path)
self.user_infomation_database.save_database()
if self.is_info :
msg = "Closing ..."
print(self.console_formatter_.WARN(self.program_name_, msg))
def main(**kwargs):
ff = Facebook_Finder(is_cookies_clear=True, is_debug=True, **kwargs)
#ff.login("[email protected]", "123")
ff.login("[email protected]", "f2mPqDDG")
search_user_list = ff.user_search("吳音寧")
#ff.link("https://www.facebook.com/profile.php?id=100001277912128&__tn__=%2Cd-]-h-R&eid=ARBo_xeaJ8T0r8X6IQFxWM99sqIXjOpxCdTxL9g5s1dVhTKT1kJj44yQKvXMy1QNnx7pNQ6mK57MzBdk")
#ff.link("https://www.facebook.com/profile.php?id=100022934512189")
##ff.link("https://www.facebook.com/chen0817")
#ff.link("https://www.facebook.com/groups/451357468329757/?jazoest=2651001208210110412052665652120821001147665108731081051021078111868715776110715210810852651197711411010566768910065586510012079120113814597119578010410472116896948114861065253116104979811212210612210649121104102881201047611210511111065")
#ff.parse_personal_page()
#ff.enter_personal_main_page()
#ff.enter_personal_main_page_links("關於")
#ff.search("Kelly")
#if ff.get_browser().find_tag_name("a"):
#ff.get_browser().click()
#ff.find_link_text("天氣預報", is_contact=False)
#wb.access_website("https://world.taobao.com/product/%E6%B7%98%E5%AF%B6%E5%A4%A7%E9%99%B8.htm")
#wb.find_link_text("淘寶網首頁", is_contact=False)
#ff.click()
#print(wb.get_source())
time.sleep(23)
if __name__ == "__main__":
args = sys.argv[1:]
kwargs = {}
for i in range(0, len(args),2):
kwargs[args[i]] = args[i+1]
main(**kwargs)
|
from rest_framework.serializers import ModelSerializer
from garments.models import BaseGarment
from garments.models import Garment
from garments.models import GarmentType
from garments.models import GarmentFabric
from garments.models import Colorway
from garments.models import GarmentSize
from garments.models import GarmentGender
from garments.models import GarmentImage
from seasons.serializers import DropDateSerializer
class GarmentTypeSerializer(ModelSerializer):
class Meta:
model = GarmentType
fields = ('pk', 'name', 'description',)
class GarmentFabricSerializer(ModelSerializer):
class Meta:
model = GarmentFabric
fields = ('name', 'description',)
class ColorwaySerializer(ModelSerializer):
class Meta:
model = Colorway
fields = ('name', 'description',)
class GarmentSizeSerializer(ModelSerializer):
class Meta:
model = GarmentSize
fields = ('size', 'length', 'description',)
class GarmentGenderSerializer(ModelSerializer):
class Meta:
model = GarmentGender
fields = ('gender',)
class BaseGarmentSerializer(ModelSerializer):
type = GarmentTypeSerializer(many=False, read_only=True)
fabric = GarmentFabricSerializer(many=False, read_only=True)
drop_date = DropDateSerializer(many=False, read_only=True)
class Meta:
model = BaseGarment
fields = ('description','name','type', 'fabric', 'drop_date')
class GarmentSerializer(ModelSerializer):
base_garment = BaseGarmentSerializer(many=False, read_only=True)
color = ColorwaySerializer(many=False, read_only=True)
size = GarmentSizeSerializer(many=False, read_only=True)
class Meta:
model = Garment
fields = ('uuid', 'base_garment', 'color', 'size', 'quantity', 'price',)
|
warning2 = "Your email has not been verified, so if you forget your password, you will not be able to reset it."
warning = "Your email has not been linked, so if you forget your password, you will not be able to reset it."
|
import warnings
from collections.abc import MutableMapping
MSG_ERROR_OPER = "Unsupported operand type(s) for {} and 'Polynomial'"
MSG_ERROR_ARG = "Polynomial() takes least one argument (0 given)"
MSG_ERROR_VAL = "{} must be 'int' or 'float' not '{}'"
MSG_ERROR_KW = "Unexpedted keyword argument: '{}'"
MSG_WARNING = "Repeated degree, replaced first assigned value"
class DictProxy(MutableMapping, dict):
"""Copy of the ``dict`` with the ``__setitem__`` method modified
"""
def __getitem__(self, key):
return dict.__getitem__(self, key)
def __setitem__(self, key, value):
"""Set items key:value with check
"""
self._check_keys(key)
self._check_values(value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
def __iter__(self):
return dict.__iter__(self)
def __len__(self):
return dict.__len__(self)
def __contains__(self, x):
return dict.__contains__(self, x)
def _check_keys(self, key):
"""Check the key have the form "x0", "x1", ..., "xn" when n is a
positive integer. Raise an error otherwise.
"""
if key[0] == "x" and key[1:].isdecimal():
return key
else:
raise TypeError(MSG_ERROR_KW.format(key))
def _check_values(self, value):
"""Check that the value is 'int' or 'float'. Raise an error otherwise.
"""
if not isinstance(value, int) and not isinstance(value, float):
s = value.__class__.__name__
raise TypeError(MSG_ERROR_VAL.format("Coefficients", s))
class Polynomial():
"""Create a polynomial object.
Polynomial provides numerical methods '+', '-', '*' and ()
.. versionadded:: 1.0
Instance
--------
It is possible to initialize in these three ways:
Polymian(*args) --> new polynomial initialized from args,
values must be real numbers. Example: p = Polynomial(1, 2, 3)
Polymian(**kwargs) --> new polynomial initialized with the xn=value pairs
in the keyword argument list, x is the symbol and n is the degree and
values must be real numbers. Example: p = Polynomial(x7 = 1, x3 = 2)
Polymian(*args, **kwargs) --> new polynomial initialized following the
above rules. Example: p = Polynomial(1, 2, 3, x9 = 4)
Parameters
----------
coef : *args or/and **kwargs
Series coefficients in order of increasing degree, the
coefficients for the missing degrees are considering zero, i.e.:
``(1, 2, 3)`` gives ``1*x^1 + 2*x^2 + 3*x^3``
``(x7 = 1, x3 = 2)`` gives ``2*x^3 + 1*x^7``
``(1, 2, 3, x9 = 4)`` gives ``1*x^1 + 2*x^2 + 3*x^3 + 4*x^9``
Attributes
----------
coef : dict
Dictionary with keys ``xi`` for degree ``i`` and its corresponding
coefficient as value. This attribute can not reassigned, but it can
be updated with `xi=value` pair. Otherwise it will give an error
degree : int
Degree polynomial (the highest of the degrees of individual terms)
This attribute can not reassigned, but it can be updated when
coefficients are updated.
"""
_superscript_map = {"0": "⁰", "1": "¹", "2": "²", "3": "³", "4": "⁴",
"5": "⁵", "6": "⁶", "7": "⁷", "8": "⁸", "9": "⁹"}
def __init__(self, *args, **kwargs):
"""
Parameters
----------
*args : iterable, list
Series coefficients in order of increasing degree
*kwargs : iterable, dict
Series coefficients with degree in keys
Raises
------
TypeError
If no parameters are passed or `xi=value` pair is not
the required form. Check Instance section.
"""
there_coef_in_args = len(args) != 0
there_coef_in_kwargs = len(kwargs) != 0
if not there_coef_in_args and not there_coef_in_kwargs:
raise TypeError(MSG_ERROR_ARG)
self._coef = DictProxy()
if there_coef_in_args:
self._coef.update({"x%d" % i: v for i, v in enumerate(args)})
if there_coef_in_kwargs:
self._parse_kwargs(kwargs)
@property
def degree(self):
return self._get_max_degree()
@property
def coef(self):
return self._coef
def _parse_kwargs(self, kwargs):
"""Update coef attribute with kwargs.
If coef already has a value assigned to a grade, it is
replaced and a warning is displayed.
"""
for k, v in kwargs.items():
if self._coef.get(k, None) is not None:
warnings.warn(MSG_WARNING, Warning, stacklevel=1)
self._coef[k] = v
def _get_max_degree(self):
_max_degree_expr = max(self.coef.keys(), key=self._get_degree)
return self._get_degree(_max_degree_expr)
def _get_degree(self, expr):
"""Get degree of expr 'str' as 'int'.
"""
return int(expr[1:])
def _all_degrees(self):
return [self._get_degree(k) for k in self.coef.keys()]
def _result_degrees(self, other):
"""Information of the coefficients and degrees for polynomial add/sub
Add/Sub between polynomials can be done by add/sub like terms.
If P(x) = `3x^9 + 5x^1 + 2x^0` degrees P_d = (9,1,0), in the same way
R(x) = `x^1 + x^0` R_d =(1,0). The resultant degrees is P_d U R_d or
(9,1,0)
Parameters
----------
other : Polynomial
Object of the same class as `self` for add/sub
Returns
-------
coef1 : dict
The coefficients of `self`
coef2 : dict
The coefficients of `other`
result_keys: dict
Degrees of the resulting polynomial according to the degrees of
`self` and `other`
"""
coef1, degree1 = self.coef, self._all_degrees()
coef2, degree2 = other.coef, other._all_degrees()
result_degrees = [d for d in degree1 if d not in degree2] + degree2
result_keys = ["x%d" % i for i in result_degrees]
return coef1, coef2, result_keys
def _result_degrees_mul(self, other):
"""Information of the coefficients and degrees for polynomial mul
Mul between polynomials can be done by mul each terms and
sum like terms.
If P(x) = `3x^9 + 5x^1 + 2x^0` degrees P_d = (9,1,0), in the same way
R(x) = `x^1 + x^0` R_d =(1,0) so resultant oper is (P_d_i, R_d_j) for
i,j in P_d, R_d ((9,1),(9,0),(1,1),(1,0),(0,1),(0,0))
Ther resultant degrees sum(resultant oper) unique values, so
(10,9,2,1,0)
Parameters
----------
other : Polynomial
Object of the same class as `self` for mul
Returns
-------
coef1 : dict
The coefficients of `self`
coef2 : dict
The coefficients of `other`
result_keys: dict
Degrees of the resulting polynomial according to the degrees of
`self` and `other`
result_oper: list
Combination of degrees in mul according to the degrees of
`self` and `other`
"""
coef1, degree1 = self.coef, self._all_degrees()
coef2, degree2 = other.coef, other._all_degrees()
result_degrees = []
result_oper = []
for d in degree1:
result_degrees += [d + d_ for d_ in degree2
if d + d_ not in result_degrees]
result_oper.extend([(d, d_) for d_ in degree2])
result_keys = ["x%d" % i for i in result_degrees]
return coef1, coef2, result_keys, result_oper
def _mul_pol(self, value):
"""Polynomial Multiplication
Multiplication between polynomials can be summarized, where P_i and R_i
are the coefficients of P(x), R(x):
`x^0` = `P_0*R_0`
`x^1` = `P_1*R_0 + P_0*R_1`
`x^2` = `P_2*R_1 + P_1*R_1 P_0*R_2`
....
`x^n` = `P_n*R_1 + P_(n-1)*R_1 + ...+ P_0*R_n`
With result_keys we select the `x^n` those are actually calculated,
and with result_oper the coefficients other than 0 for optimization.
Parameters
----------
other : Polynomial
Object of the same class as `self` for mul
Returns
-------
Polynomial
Degrees of the resulting polynomial according to the degrees of
`self` and `other`
result_oper: list
Combination of degrees in mul according to the degrees of
`self` and `other`
"""
coef1, coef2, result_keys, comb = self._result_degrees_mul(value)
result_coef = {}
for k in result_keys:
deg = self._get_degree(k)
comb_deg = [c for c in comb if sum(c) == deg]
result_coef[k] = 0
for i, j in comb_deg:
getc1 = coef1.get("x%d" % i, 0)
getc2 = coef2.get("x%d" % j, 0)
result_coef[k] += getc1*getc2
result_coef = {k: v for k, v in result_coef.items()
if v != 0}
return Polynomial(**result_coef)
def _mul_scalar(self, value):
"""Polynomial scalar multiplication
"""
coef1, degree1 = self.coef, self._all_degrees()
result_keys = ["x%d" % i for i in degree1]
result_coef = {k: value * coef1.get(k, 0) for k in result_keys}
result_coef = {k: v for k, v in result_coef.items()
if v != 0}
return Polynomial(**result_coef)
def __repr__(self):
"""Nice print of the polynomial
"""
nice_str = ""
keys = list(self.coef.keys())
keys.sort(key=self._get_degree)
keys = keys[::-1]
for k in keys:
v = self.coef[k]
deg = str(self._get_degree(k))
nice_deg = "".join([self._superscript_map[s] for s in deg])
nice_deg = k[0] + nice_deg
nice_val = "%.2f%s" % (abs(v), nice_deg)
if k == "x"+str(self.degree):
nice_str += nice_val
else:
nice_str += " + " + nice_val if v > 0 else " - " + nice_val
return nice_str[:-2]
def __call__(self, value):
"""Polynomial evaluation
Parameters
----------
value : int/float
Object to be checked, must be 'float'/'int'
Returns
-------
float
Result P(value)
"""
if not isinstance(value, int) and not isinstance(value, float):
s = value.__class__.__name__
raise TypeError(MSG_ERROR_VAL.format("Value", s))
result = 0
for k, v in self.coef.items():
degree = self._get_degree(k)
result += v * value ** degree
return result
def __add__(self, value):
"""Numerical method '+'
Parameters
----------
other : anything
Object to be checked, must the same class as `self`
Returns
-------
Polynomial
`self` + `other`
"""
if not isinstance(value, self.__class__):
s = value.__class__.__name__
raise TypeError(MSG_ERROR_OPER.format(" +: '%s' " % s))
coef1, coef2, result_keys = self._result_degrees(value)
result_coef = {k: coef1.get(k, 0) + coef2.get(k, 0)
for k in result_keys}
result_coef = {k: v for k, v in result_coef.items()
if v != 0}
return Polynomial(**result_coef)
def __sub__(self, value):
"""Numerical method '-'
Parameters
----------
other : anything
Object to be checked, must the same class as `self`
Returns
-------
Polynomial
`self` - `other`
"""
if not isinstance(value, self.__class__):
s = value.__class__.__name__
raise TypeError(MSG_ERROR_OPER.format(" -: '%s' " % s))
coef1, coef2, result_keys = self._result_degrees(value)
result_coef = {k: coef1.get(k, 0) - coef2.get(k, 0)
for k in result_keys}
result_coef = {k: v for k, v in result_coef.items()
if v != 0}
return Polynomial(**result_coef)
def __mul__(self, value):
"""Numerical method '*'
Parameters
----------
other : anything
Object to be checked, must the same class as `self`
or 'float'/'int'
Returns
-------
Polynomial
`self` * `other`
"""
if isinstance(value, int) or isinstance(value, float):
return self._mul_scalar(value)
elif isinstance(value, self.__class__):
return self._mul_pol(value)
else:
s = value.__class__.__name__
raise TypeError(MSG_ERROR_OPER.format(" *: '%s' " % s))
def __rmul__(self, value):
"""Numerical method '*'
Parameters
----------
other : anything
Object to be checked, must the same class as `self`
or 'float'/'int'
Returns
-------
Polynomial
`other` * `self`
"""
if isinstance(value, int) or isinstance(value, float):
return self._mul_scalar(value)
else:
s = value.__class__.__name__
raise TypeError(MSG_ERROR_OPER.format(" *: '%s' " % s))
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import legacy_page_test
from telemetry.page import shared_page_state
from page_sets.rendering import rendering_story
from page_sets.rendering import story_tags
from page_sets.system_health import platforms
class RepaintDesktopPage(rendering_story.RenderingStory):
ABSTRACT_STORY = True
TAGS = [story_tags.REPAINT_DESKTOP]
SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY
def __init__(self,
page_set,
name_suffix='',
extra_browser_args=None,
shared_page_state_class=shared_page_state.SharedMobilePageState):
super(RepaintDesktopPage, self).__init__(
page_set=page_set,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args,
shared_page_state_class=shared_page_state_class)
def RunPageInteractions(self, action_runner):
action_runner.WaitForJavaScriptCondition(
'document.readyState == "complete"', timeout=30)
action_runner.ExecuteJavaScript(
'chrome.gpuBenchmarking.setRasterizeOnlyVisibleContent();')
mode = 'viewport'
width = None
height = None
args = {}
args['mode'] = mode
if width:
args['width'] = width
if height:
args['height'] = height
# Enqueue benchmark
action_runner.ExecuteJavaScript("""
window.benchmark_results = {};
window.benchmark_results.id =
chrome.gpuBenchmarking.runMicroBenchmark(
"invalidation_benchmark",
function(value) {},
{{ args }}
);
""",
args=args)
micro_benchmark_id = action_runner.EvaluateJavaScript(
'window.benchmark_results.id')
if not micro_benchmark_id:
raise legacy_page_test.MeasurementFailure(
'Failed to schedule invalidation_benchmark.')
with action_runner.CreateInteraction('Repaint'):
action_runner.RepaintContinuously(seconds=5)
action_runner.ExecuteJavaScript("""
window.benchmark_results.message_handled =
chrome.gpuBenchmarking.sendMessageToMicroBenchmark(
{{ micro_benchmark_id }}, {
"notify_done": true
});
""",
micro_benchmark_id=micro_benchmark_id)
class RepaintAmazon2018Page(RepaintDesktopPage):
"""
Why: #1 world commerce website by visits; #3 commerce in the US by time spent
"""
BASE_NAME = 'repaint_amazon'
YEAR = '2018'
URL = 'http://vmiura.github.io/snapit-pages/snapit_amazon.html'
class RepaintCNN2018Page(RepaintDesktopPage):
"""# Why: #2 news worldwide"""
BASE_NAME = 'repaint_cnn'
YEAR = '2018'
URL = 'http://vmiura.github.io/snapit-pages/snapit_cnn.html'
class RepaintFacebook2018Page(RepaintDesktopPage):
"""Why: #3 (Alexa global)"""
BASE_NAME = 'repaint_facebook'
YEAR = '2018'
URL = 'http://vmiura.github.io/snapit-pages/snapit_facebook.html'
class RepaintGoogleSearch2018Page(RepaintDesktopPage):
"""Why: Top Google property; a Google tab is often open"""
BASE_NAME = 'repaint_google_search'
YEAR = '2018'
URL = 'http://vmiura.github.io/snapit-pages/snapit_google_search.html'
class RepaintInstagram2018Page(RepaintDesktopPage):
"""Why: A top social site"""
BASE_NAME = 'repaint_instagram'
YEAR = '2018'
URL = 'http://vmiura.github.io/snapit-pages/snapit_instagram.html'
class RepaintReddit2018Page(RepaintDesktopPage):
"""Why: #1 news worldwide"""
BASE_NAME = 'repaint_reddit'
YEAR = '2018'
URL = 'http://vmiura.github.io/snapit-pages/snapit_reddit.html'
class RepaintTheVerge2018Page(RepaintDesktopPage):
"""Why: Top tech blog"""
BASE_NAME = 'repaint_theverge'
YEAR = '2018'
URL = 'http://vmiura.github.io/snapit-pages/snapit_theverge.html'
class RepaintTwitter2018Page(RepaintDesktopPage):
"""Why: A top social site"""
BASE_NAME = 'repaint_twitter'
YEAR = '2018'
URL = 'http://vmiura.github.io/snapit-pages/snapit_twitter.html'
class RepaintWikipedia2018Page(RepaintDesktopPage):
"""Why: #5 (Alexa global)"""
BASE_NAME = 'repaint_wikipedia'
YEAR = '2018'
URL = 'http://vmiura.github.io/snapit-pages/snapit_wikipedia.html'
class RepaintYahoo2018Page(RepaintDesktopPage):
"""Why: #9 (Alexa global)"""
BASE_NAME = 'repaint_yahoo_homepage'
YEAR = '2018'
URL = 'http://vmiura.github.io/snapit-pages/snapit_yahoo_homepage.html'
|
# -*- coding: UTF-8 -*-
from resources.lib.modules import control
from resources.lib.api import colorChoice
AddonID = control.AddonID
AddonTitle = control.AddonTitle
SelectDialog = control.SelectDialog
Execute = control.execute
Notify = control.Notify
CustomColor = control.setting('my_ColorChoice')
if CustomColor == '': CustomColor = 'none'
def mySpareMenu():
my_options = ['[B]Option1[/B]', '[B]Option2[/B]', '[B]Option3[/B]', '[B]Option4[/B]', '[B]Option5[/B]', '[ [B]Close[/B] ]']
selectList = []
for Item in my_options:
selectList.append(colorChoice.colorString(Item, CustomColor))
mychoice = SelectDialog(AddonTitle, selectList, key=False)
mychoice = mychoice.replace('[COLOR %s]' % (CustomColor),'').replace('[/COLOR]','')
if mychoice == '[B]Option1[/B]': Notify(AddonTitle, 'Testing Option1...')
elif mychoice == '[B]Option2[/B]': Notify(AddonTitle, 'Testing Option2...')
elif mychoice == '[B]Option3[/B]': Notify(AddonTitle, 'Testing Option3...')
elif mychoice == '[B]Option4[/B]': Notify(AddonTitle, 'Testing Option4...')
elif mychoice == '[B]Option5[/B]':
Notify(AddonTitle, 'Testing Option5...')
elif mychoice == '[ [B]Close[/B] ]' :
return
|
class Solution:
def kWeakestRows(self, mat: List[List[int]], k: int) -> List[int]:
ans = []; i = 0
for row in mat:
ans.append((i, row.count(1))); i+= 1
ans = sorted(ans, key = lambda element: (element[1], element[0]))
ans = [x[0] for x in ans]
return ans[:k] |
from abc import ABCMeta
from typing import Dict, List, Union
from urllib.parse import urlparse
ArgumentsType = Dict[str, Union[int, float, str]]
class BaseObject(metaclass=ABCMeta):
__slots__ = []
def __eq__(self, other):
if type(self) != type(other): # pylint: disable=unidiomatic-typecheck
return False
for name in self.__slots__:
if getattr(self, name) != getattr(other, name):
return False
return True
def __repr__(self):
return '<{} {}>'.format(
type(self).__name__,
' '.join('{}={}'.format(name, getattr(self, name)) for name in self.__slots__)
)
class Exchange(BaseObject):
__slots__ = ['name', 'type', 'durable', 'auto_delete', 'internal', 'arguments']
def __init__(self, name: str, type: str = 'topic', durable: bool = True, auto_delete: bool = False,
internal: bool = False, arguments: ArgumentsType = None):
self.name = name
self.type = type
self.durable = durable
self.auto_delete = auto_delete
self.internal = internal
self.arguments = arguments
class QueueBinding(BaseObject):
__slots__ = ['exchange', 'routing_key', 'arguments']
def __init__(self, exchange: Exchange, routing_key: str, arguments: ArgumentsType = None):
self.exchange = exchange
self.routing_key = routing_key
self.arguments = arguments
class Queue(BaseObject):
__slots__ = ['name', 'bindings', 'durable', 'exclusive', 'auto_delete', 'arguments']
def __init__(self, name: str, bindings: List[QueueBinding] = None, durable: bool = True, exclusive: bool = False,
auto_delete: bool = False, arguments: ArgumentsType = None):
self.name = name
self.bindings = bindings
self.durable = durable
self.exclusive = exclusive
self.auto_delete = auto_delete
self.arguments = arguments
class ConnectionParams(BaseObject):
__slots__ = ['host', 'port', 'username', 'password', 'virtual_host']
def __init__(self, host: str = 'localhost', port: int = 5672, username: str = 'guest', password: str = 'guest',
virtual_host: str = '/'):
self.host = host
self.port = port
self.username = username
self.password = password
self.virtual_host = virtual_host
@classmethod
def from_string(cls, connection_string: str) -> 'ConnectionParams':
parse_result = urlparse(connection_string)
assert parse_result.scheme == 'amqp', 'Scheme must be amqp'
kwargs = {
'virtual_host': parse_result.path[1:] if parse_result.path else None
}
if parse_result.hostname:
kwargs['host'] = parse_result.hostname
if parse_result.port:
kwargs['port'] = int(parse_result.port)
if parse_result.username:
kwargs['username'] = parse_result.username
if parse_result.password:
kwargs['password'] = parse_result.password
return cls(**kwargs)
|
import momaf_dataset
import transformers
import bert_regressor
import sys
import re
import torch
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--load-from",default=None,help="Path to a model")
parser.add_argument("--field",default="content-noyearnopers",help="content-orig, content-noyear, content-noyear-nopers")
parser.add_argument("--sep",default=False, action="store_true",help="populate with SEP")
args = parser.parse_args()
dataset=momaf_dataset.load_dataset("momaf_nonames.jsonl") #this is a list of three datasets: train,dev,test
## Tokenizer loaded from AutoTokenizer
## Creating the model from the desired transformer model
if args.load_from:
model=bert_regressor.BertRegressor.from_pretrained(args.load_from)
tokenizer=transformers.AutoTokenizer.from_pretrained(args.load_from)
def encode_dataset(d):
txt=d[args.field] #WATCH OUT THIS GLOBAL VARIABLE
if args.sep:
txt=re.sub(r"([.?])\s+([A-ZÄÅÖ])",r"\1 [SEP] \2",txt)
return tokenizer(txt,truncation=True)
def make_year_target(d):
return {"target":(d["year"]-1970)/10.0}
for k in dataset:
dataset[k]=dataset[k].map(encode_dataset)
dataset[k]=dataset[k].map(make_year_target)
with torch.no_grad():
for e in dataset["test"]:
o=model(torch.tensor(e["input_ids"],device=model.device).unsqueeze(0),torch.tensor(e["attention_mask"],device=model.device).unsqueeze(0))
p=o.prediction[0][0].item()*10+1970
print(e["url"],e["year"],p,p-e["year"],sep="\t")
# train_args = transformers.TrainingArguments('out.ckpt',evaluation_strategy='steps',eval_steps=30, logging_strategy='steps',save_strategy='steps',save_steps=30,save_total_limit=3,
# learning_rate=args.lr,per_device_train_batch_size=args.bsize,gradient_accumulation_steps=args.grad_acc,max_steps=args.steps, logging_steps=5, label_names=["target"],load_best_model_at_end=True,warmup_steps=150)
# if args.pretrain_frozen:
# for param in model.bert.parameters():
# param.requires_grad=False
# trainer = transformers.Trainer(model,train_args,train_dataset=dataset['train'],eval_dataset=dataset['validation'],tokenizer=tokenizer,callbacks=[transformers.EarlyStoppingCallback(early_stopping_patience=3)])
# trainer.train()
# if args.save_to:
# trainer.save_model(args.save_to)
|
class BaseBuilding(object):
"""
boilder-plate class used to initiale the various building in
municipality
"""
def __init__(self, location=None, land_rate=500):
if not isinstance(location, str) and location is not None:
raise TypeError("Location should be of type str")
if not isinstance(land_rate, (float, int, long)):
raise TypeError("Land rate should be of type numeric")
self.__location = location
self.__land_rate = land_rate
def set_location(self, location):
if not isinstance(location, str):
raise TypeError("Location should be of type str")
self.__location = location
def set_land_rate(self, land_rate):
if not isinstance(land_rate, (float, int, long)):
raise TypeError("Land rate should be of type numeric")
self.__land_rate = land_rate
def get_location(self):
return self.__location
def get_land_rate(self):
return self.__land_rate
class CommercialBuilding(BaseBuilding):
"""
This building provides space for offices and warehouses
Land rate is based on the available floor space
"""
def __init__(self, location=None, floor_space=0):
if not isinstance(floor_space, (float, int, long)):
raise TypeError("Floor Space should be of type numeric")
super(self.__class__, self).__init__(location)
self.__floor_space = floor_space
def set_floor_space(self, floor_space):
self.__floor_space = floor_space
def get_floor_space(self):
return self.__floor_space
def get_land_rate(self):
return self.__floor_space * 30
class ResidentialBuilding(BaseBuilding):
"""
This building that provide space for housing
Land rate depends on the numver of availabe units
"""
def __init__(self, location=None, num_units=0):
if not isinstance(num_units, (float, int, long)):
raise TypeError("Land rate should be of type numeric")
super(self.__class__, self).__init__(location)
self.__num_units = num_units
def set_num_units(self, num_units):
self.__num_units == num_units
def get_num_units(self):
return self.__num_units
def get_land_rate(self):
"""land rate = num_unit * 20"""
return self.__num_units * 20
class Utilities(BaseBuilding):
"""
This building are owned by the municipality hence pay
no land rate
"""
def __init__(self, location=None, utility_name=None):
if not isinstance(utility_name, str) and utility_name is not None:
raise TypeError("Utlity hould be of type str")
super(self.__class__, self).__init__(location)
self.__utility_name = utility_name
def set_land_rate(self, land_rate):
raise NotImplementedError("Utility Building owned pay no land rate")
def set_utility(self, utility_name):
self.__utility_name = utility_name
def get_utility(self):
return self.__utility_name
def get_land_rate(self):
return 0
|
import gzip
import json
MAX_LUNATION = 102657
MIN_LUNATION = -21014
def generate(args, model):
print(
f'> Exporting {MAX_LUNATION - MIN_LUNATION + 1:d} lunations '
f'to {args.path_to_json_output:s}'
)
with gzip.open(args.path_to_json_output, 'wt') as f:
f.write(json.dumps(list(map(model.predict, range(MIN_LUNATION, MAX_LUNATION)))))
|
import torch
from torch import nn as nn
from transformers import BertConfig
from transformers import BertModel
from transformers import BertPreTrainedModel
from spert import sampling
from spert import util
def get_token(h: torch.tensor, x: torch.tensor, token: int):
"""
获得特定的token嵌入(例如[CLS])。
:param h: 句子经过bert后的隐藏层向量【batch_size, seq_length, emb_size] eg: [2,49,768]
:type h:
:param x: encodings后的id,[batch_size, seq_length]
:type x: eg: tensor([[ 101, 1130, 2668, 117, 170, 7141, 1107, 5043, 1276, 2132,
11374, 5425, 1104, 23637, 2499, 7206, 18638, 117, 1103, 4806,
15467, 1104, 1697, 5107, 119, 102],
[ 101, 1130, 12439, 117, 1103, 2835, 2084, 1104, 1103, 1244,
1311, 117, 19936, 139, 119, 10011, 117, 1108, 1255, 1107,
8056, 117, 3197, 119, 102, 0]], device='cuda:0')
:param token: eg: 101, cls的id
:type token:int
:return:
:rtype:
"""
# emb_size: 768
emb_size = h.shape[-1]
# 合并前2个维度
token_h = h.view(-1, emb_size)
# 拉平x
flat = x.contiguous().view(-1)
# 获取给定token的上下文嵌入。获取token_h中是和flat中的tokenid等于给定token的向量
token_h = token_h[flat == token, :]
# token_h: 【batch_size, embedding_dim]
return token_h
class SpERT(BertPreTrainedModel):
""" 基于跨度的模型来联合提取实体和关系"""
VERSION = '1.1'
def __init__(self, config: BertConfig, cls_token: int, relation_types: int, entity_types: int,
size_embedding: int, prop_drop: float, freeze_transformer: bool, max_pairs: int = 100):
super(SpERT, self).__init__(config)
# 首先加载Bert模型
self.bert = BertModel(config)
# 新建分类层, 关系分类, relation_types是关系label的数量,这里是5,5个关系
self.rel_classifier = nn.Linear(config.hidden_size * 3 + size_embedding * 2, relation_types)
self.entity_classifier = nn.Linear(config.hidden_size * 2 + size_embedding, entity_types)
# 实体的大小进行embedding
self.size_embeddings = nn.Embedding(100, size_embedding)
self.dropout = nn.Dropout(prop_drop)
self._cls_token = cls_token
self._relation_types = relation_types
self._entity_types = entity_types
# 考虑的最大的关系数量
self._max_pairs = max_pairs
# weight initialization
self.init_weights()
if freeze_transformer:
print("设置冻结transformer权重")
# freeze all transformer weights
for param in self.bert.parameters():
param.requires_grad = False
def _forward_train(self, encodings: torch.tensor, context_masks: torch.tensor, entity_masks: torch.tensor,
entity_sizes: torch.tensor, relations: torch.tensor, rel_masks: torch.tensor):
"""
# encodings: [batch_size, padding后的seq_length], token变成id后的内容
# context_masks: [batch_size, padding后的seq_length】, 样本的实际的长度的mask
# entity_masks:[batch_size,实体数量,padding后的seq_length] 实体的在句子位置mask
# entity_sizes: [batch_size,实体数量】 每个实体的长度
# 'entity_types',[batch_size,,实体数量】
# 'rels',[batch_size, 关系数量,头实体和尾实体的位置索引】, eg[2,6,2], 6是包含了正负样本
# 'rel_masks',[batch_size,关系数量,padding后的seq_length] 关系在样本中的位置,即2个实体之间的词
# 'rel_types',[batch_size,关系数量,关系的标签总数] one-hot了的关系
# 'entity_sample_masks',[batch_size,实体数量], padding后
# 'rel_sample_masks'[batch_size,关系数量],padding后,样本1可能有10个关系,样本2有3个关系,那么样本2就有7个FALSE
从最后一个transformer层获得上下文的token嵌入。 训练
:param encodings: 【batch_size,seq_length]
:type encodings:
:param context_masks:【batch_size,seq_length
:type context_masks:
:param entity_masks:【batch_size,实体数量, seq_length]
:type entity_masks:
:param entity_sizes:【batch_size,实体数量]
:type entity_sizes:
:param relations:【batch_size,关系数量,头实体和尾实体的位置索引]
:type relations:
:param rel_masks:【batch_size,关系数量, seq_length], 关系在样本中的位置,即2个实体之间的词
:type rel_masks:
:return:
:rtype:
"""
context_masks = context_masks.float() #context_masks转换成float格式
h = self.bert(input_ids=encodings, attention_mask=context_masks)['last_hidden_state']
# h是取最后一层隐藏层参数, h的shape, [batch_size, seq_length, hidden_size]
batch_size = encodings.shape[0]
# 实体分类,实体的大小的embedding, [batch_size,实体数量】-->[batch_size,实体数量,size_hidden_size】,eg: [2,104,25]
size_embeddings = self.size_embeddings(entity_sizes) # embed entity candidate sizes
# entity_clf [ batch_size, entity_num, entity_label_num], eg: [2,102,5] 实体分类
# entity_spans_pool [ batch_size, entity_num, embedding_size], eg: [2,102,768] 实体跨度中经过了最大池化, 如果一个实体中有3个词,找出最大的那个词代表这个实体
entity_clf, entity_spans_pool = self._classify_entities(encodings, h, entity_masks, size_embeddings)
# 关系分类, 可能的self._max_pairs最大的关系数量, h_large 【batch_size, 关系数量,seq_length, hidden_size]
h_large = h.unsqueeze(1).repeat(1, max(min(relations.shape[1], self._max_pairs), 1), 1, 1)
# 初始一个分类的tensor, [batch_size, 关系数量,关系的label的总数], eg:[2,42, 5]
rel_clf = torch.zeros([batch_size, relations.shape[1], self._relation_types]).to(
self.rel_classifier.weight.device)
# 获取关系的logits
# 分块处理以减少内存使用
for i in range(0, relations.shape[1], self._max_pairs):
# 对关系候选者进行分类, entity_spans_pool[batch_size,实体数量,hidden_size], size_embeddings实体的长度的embedding [batch_size,实体数量,size_hidden_size]
# relations:【batch_size,关系数量,头实体和尾实体的位置索引] 关系的头实体和尾实体在实体向量中的位置索引
# 'rel_masks',[batch_size,关系数量,padding后的seq_length] 关系在样本中的位置,即2个实体之间的词
# h_large 【batch_size, 关系数量,seq_length, hidden_size]
chunk_rel_logits = self._classify_relations(entity_spans_pool, size_embeddings,
relations, rel_masks, h_large, i)
# 分块后的关系的预测结果
rel_clf[:, i:i + self._max_pairs, :] = chunk_rel_logits
# 返回实体和关系预测的logits
return entity_clf, rel_clf
def _forward_inference(self, encodings: torch.tensor, context_masks: torch.tensor, entity_masks: torch.tensor,
entity_sizes: torch.tensor, entity_spans: torch.tensor, entity_sample_masks: torch.tensor):
"""
从最后一个transformer层获取上下文的token嵌入
context_masks = {Tensor: (1, 26)} tensor([[True, True, True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True, True, True, True,\n True, True]])
encodings = {Tensor: (1, 26)} tensor([[ 101, 1130, 12439, 117, 1103, 4186, 2084, 1104, 1103, 1244,\n 1311, 117, 25427, 156, 119, 4468, 117, 1108, 1255, 1107,\n 4221, 16836, 117, 3197, 119, 102]])
entity_masks = {Tensor: (1, 185, 26)} tensor([[[False, True, False, ..., False, False, False],\n [False, False, True, ..., False, False, False],\n [False, False, False, ..., False, False, False],\n ...,\n [False, False, False, ..., False, False, False],\n
entity_sample_masks = {Tensor: (1, 185)} tensor([[True, True, True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True, True, True, True,\n True
entity_sizes = {Tensor: (1, 185)} tensor([[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n 3,
entity_spans = {Tensor: (1, 185, 2)} tensor([[[ 1, 2],\n [ 2, 3],\n [ 3, 4],\n [ 4, 5],\n [ 5, 6],\n [ 6, 7],\n [ 7, 8],\n [ 8, 9],\n [ 9, 10],\n [10, 11],\n [11, 12],\n [12, 13],\n [13, 15],\n
self = {SpERT} SpERT(\n (bert): BertModel(\n (embeddings): BertEmbeddings(\n (word_embeddings): Embedding(28996, 768, padding_idx=0)\n (position_embeddings): Embedding(512, 768)\n (token_type_embeddings): Embedding(2, 768)\n (LayerNorm): LayerNorm((768,
:param encodings: token转换成id, [batch_size, seq_length]
:type encodings:
:param context_masks: 样本的实际的长度的mask, [batch_size, seq_length]
:type context_masks:
:param entity_masks: [batch_size,实体数量,padding后的seq_length] 实体的在句子位置mask, 实体数量是枚举的所有的可能
:type entity_masks: [batch_size,实体数量], 枚举的实体从最小到最大的跨度
:param entity_sizes:
tensor([[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10]])
:type entity_sizes:
:param entity_spans: [batch_size,实体数量, 2], 2表示头实体和尾实体的位置
:type entity_spans:
:param entity_sample_masks: [batch_size,实体数量], 样本1可能有10个实体,样本2有3个实体,那么样本2就有7个FALSE
:type entity_sample_masks:
:return:
:rtype:
"""
context_masks = context_masks.float()
h = self.bert(input_ids=encodings, attention_mask=context_masks)['last_hidden_state']
batch_size = encodings.shape[0]
# ctx_size:序列长度
ctx_size = context_masks.shape[-1]
#实体分类, size_embeddings, [batch_size,实体数量,25】
size_embeddings = self.size_embeddings(entity_sizes) # embed entity candidate sizes
# entity_clf: [batch_size, 实体数量,实体的labels总数]
# entity_spans_pool: [batch_size,实体数量,hidden_size]
entity_clf, entity_spans_pool = self._classify_entities(encodings, h, entity_masks, size_embeddings)
# 忽略不构成关系的实际实体的实体候选对象(基于分类器), entity_spans: [batch_size,实体数量, 2], 2表示头实体和尾实体的位置
# relations: 【batch_size,关系的数量,2】 2是头实体尾实体的位置
# rel_masks: 【batch_size,关系的数量,seq_length] 2个实体之间的词的位置的mask
# rel_sample_masks: [batch_size,关系数量],padding后,样本1可能有10个关系,样本2有3个关系,那么样本2就有7个FALSE
relations, rel_masks, rel_sample_masks = self._filter_spans(entity_clf, entity_spans,
entity_sample_masks, ctx_size)
# # 关系分类, 可能的self._max_pairs最大的关系数量, h_large 【batch_size, 关系数量,seq_length, hidden_size]
rel_sample_masks = rel_sample_masks.float().unsqueeze(-1)
h_large = h.unsqueeze(1).repeat(1, max(min(relations.shape[1], self._max_pairs), 1), 1, 1)
# 初始一个分类的tensor, [batch_size, 关系数量,关系的label的总数], eg:[1,6, 5]
rel_clf = torch.zeros([batch_size, relations.shape[1], self._relation_types]).to(
self.rel_classifier.weight.device)
# obtain relation logits
# 对关系进行分批次预测。这里叫分块预测
for i in range(0, relations.shape[1], self._max_pairs):
#预测关系
chunk_rel_logits = self._classify_relations(entity_spans_pool, size_embeddings,
relations, rel_masks, h_large, i)
#对关系logits进行二分类
chunk_rel_clf = torch.sigmoid(chunk_rel_logits)
# 预测结果加入
rel_clf[:, i:i + self._max_pairs, :] = chunk_rel_clf
# 只要mask的关系部分
rel_clf = rel_clf * rel_sample_masks # mask
#对实体进行分类结果进行softmax
entity_clf = torch.softmax(entity_clf, dim=2)
# 实体分类的logits,rel_clf关系分类的logits, relations带有每个实体的关系的位置信息
return entity_clf, rel_clf, relations
def _classify_entities(self, encodings, h, entity_masks, size_embeddings):
"""
最大池化实体候选跨度, 然后进行对实体分类
:param encodings: [batch_size, seq_length] eg: [2,26]
:type encodings:
:param h: 句子经过bert后的隐藏层向量, [batch_size, seq_length, embedding_size], eg: [2,26,768]
:type h:
:param entity_masks: [batch_size, entity_num, seq_length], eg: [2,104,26], 例如entity_num是实体的数量, 包括正样本和负样本
:type entity_masks:
:param size_embeddings: # 对实体的长度的embedding, [batch_size, entity_num实体个数, embedding_size], eg: [2,106,25]
:type size_embeddings:
:return:
:rtype:
"""
# eg: entity_masks增加一个维度,然盘是否为0, m维度 eg: [2,104,26,1]
m = (entity_masks.unsqueeze(-1) == 0).float() * (-1e30)
# 获取实体的向量, [batch_size, entity_num, seq_length, hidden_size], [2,104,26,768]
entity_spans_pool = m + h.unsqueeze(1).repeat(1, entity_masks.shape[1], 1, 1)
# 对实体跨度进行最大池化, entity_spans_pool: [batch_size, entity_num,hidden_size], eg: [2,104,768]
entity_spans_pool = entity_spans_pool.max(dim=2)[0]
# 获得作为候选上下文表示的cls token, entity_ctx: [batch_size, hidden_size]
entity_ctx = get_token(h, encodings, self._cls_token)
# 创建候选表示,包括背景、最大集合跨度和尺寸嵌入, 拼接这些表示,
# entity_ctx 是cls的表示
# entity_ctx.unsqueeze(1).repeat(1, entity_spans_pool.shape[1], 1) 表示给每个实体都是复制一个cls
entity_repr = torch.cat([entity_ctx.unsqueeze(1).repeat(1, entity_spans_pool.shape[1], 1),
entity_spans_pool, size_embeddings], dim=2)
entity_repr = self.dropout(entity_repr)
# 对候选实体进行分类, entity_repr: 【batch_size, entity_num, embedding_size(拼接的向量)]
# entity_clf, shape: [batch_size, entity_num, entity_labels_num]
entity_clf = self.entity_classifier(entity_repr)
# entity_spans_pool: 对实体跨度进行最大池化, entity_spans_pool: [batch_size, entity_num,hidden_size], eg: [2,104,768]
return entity_clf, entity_spans_pool
def _classify_relations(self, entity_spans, size_embeddings, relations, rel_masks, h, chunk_start):
"""
# 对关系候选者进行分类
:param entity_spans: entity_spans_pool [batch_size,实体数量,hidden_size], 代表这个实体的向量
:type entity_spans:
:param size_embeddings: size_embeddings实体的长度的embedding [batch_size,实体数量,size_hidden_size]
:type size_embeddings:
:param relations: 【batch_size,关系数量,头实体和尾实体的位置索引] 关系的头实体和尾实体在实体向量中的位置索引
:type relations:
:param rel_masks: [batch_size,关系数量,padding后的seq_length] 关系在样本中的位置,即2个实体之间的词
:type rel_masks:
:param h: 【batch_size, 关系数量,seq_length, hidden_size]
:type h:
:param chunk_start: ???, eg:0,
:type chunk_start: int
:return:
:rtype:
"""
batch_size = relations.shape[0]
# 如果可能关系的数量大于我们预设的最大关系的对数,那么对关系进行拆分
if relations.shape[1] > self._max_pairs:
relations = relations[:, chunk_start:chunk_start + self._max_pairs]
rel_masks = rel_masks[:, chunk_start:chunk_start + self._max_pairs]
h = h[:, :relations.shape[1], :]
# 获取实体候选表示对, entity_spans: [batch_size,实体数量,hidden_size],
# relations: [batch_size, 关系数量,头实体和尾实体的位置在实体列表中信息]
# entity_pairs: [batch_size, 关系数量,头实体和尾实体的位置,hidden_size]
# entity_pairs:每个关系对应的头尾实体的向量
entity_pairs = util.batch_index(entity_spans, relations)
# 合并后2个维度, [batch_size, 关系数量,头实体和尾实体的位置 * hidden_size]
entity_pairs = entity_pairs.view(batch_size, entity_pairs.shape[1], -1)
# 每个实体的长度的位置,对应成关系
# size_embeddings 【batch_size, 实体数量, size_hidden_size】
# relations: [batch_size, 关系数量,头实体和尾实体的位置在实体列表中信息]
# size_pair_embeddings 维度 [batch_size, 关系数量,头实体和尾实体的位置, size_hidden_size], eg: [2,6,2,25]
size_pair_embeddings = util.batch_index(size_embeddings, relations)
# 合并后2个维度, [batch_size, 关系数量,头实体和尾实体的位置, size_hidden_size]
size_pair_embeddings = size_pair_embeddings.view(batch_size, size_pair_embeddings.shape[1], -1)
# 关系上下文(实体候选对之间的上下文)
# mask非实体候选token, rel_masks: [batch_size,关系数量,padding后的seq_length] 关系在样本中的位置,即2个实体之间的词
# m: [batch_size,关系数量,padding后的seq_length, 1]
m = ((rel_masks == 0).float() * (-1e30)).unsqueeze(-1)
# h和rel_ctx的形状都是【batch_size, 关系数量,seq_length, hidden_size],m+h的形状还是m的形状
rel_ctx = m + h
#最大池化,rel_ctx: 维度【batch_size,关系数量, hidden_size】
rel_ctx = rel_ctx.max(dim=2)[0]
# 将相邻或相邻候选实体的上下文向量设置为零, rel_ctx: 维度【batch_size,关系数量, hidden_size】
rel_ctx[rel_masks.to(torch.uint8).any(-1) == 0] = 0
# 创建关系候选表示,包括上下文、最大池化实体候选对和对应的实体size表示的嵌入向量
# rel_ctx: eg: [2,20,768], entity_pairs: [2,20,1539], size_pair_embeddings: [2,20,50]
# rel_repr 是3个向量的拼接, [2,20,2354], 2354是3个隐藏向量维度+ 2个实体的size的维度
rel_repr = torch.cat([rel_ctx, entity_pairs, size_pair_embeddings], dim=2)
rel_repr = self.dropout(rel_repr)
#关系分类,
chunk_rel_logits = self.rel_classifier(rel_repr)
return chunk_rel_logits
def _filter_spans(self, entity_clf, entity_spans, entity_sample_masks, ctx_size):
"""
:param entity_clf: [batch_size, 实体数量,实体的label总数]
:type entity_clf:
:param entity_spans: [batch_size, 实体数量,2】 头尾实体的位置
:type entity_spans:
:param entity_sample_masks: [batch_size, 实体数量]
:type entity_sample_masks:
:param ctx_size: 序列长度
:type ctx_size: int
:return:
:rtype:
"""
batch_size = entity_clf.shape[0]
# entity_logits_max: [batch_size, 实体数量], 找出分类出的实体
entity_logits_max = entity_clf.argmax(dim=-1) * entity_sample_masks.long() # get entity type (including none)
batch_relations = []
batch_rel_masks = []
batch_rel_sample_masks = []
for i in range(batch_size):
rels = []
rel_masks = []
sample_masks = []
# 将跨度分类为实体, shape为1, eg: tensor([31, 56, 84]), 获取最可能为实体的位置的索引
non_zero_indices = (entity_logits_max[i] != 0).nonzero().view(-1)
# 找出实体的span,即实体的头尾的位置
non_zero_spans = entity_spans[i][non_zero_indices].tolist()
# 实体的索引也变成列表
non_zero_indices = non_zero_indices.tolist()
#创建关系和mask,枚举实体对,把所有实体两两配对
for i1, s1 in zip(non_zero_indices, non_zero_spans):
for i2, s2 in zip(non_zero_indices, non_zero_spans):
if i1 != i2:
rels.append((i1, i2))
rel_masks.append(sampling.create_rel_mask(s1, s2, ctx_size))
sample_masks.append(1)
if not rels:
#情况1:分类为实体的跨度不超过两个,那么就没有关系,随便创建一个
batch_relations.append(torch.tensor([[0, 0]], dtype=torch.long))
batch_rel_masks.append(torch.tensor([[0] * ctx_size], dtype=torch.bool))
batch_rel_sample_masks.append(torch.tensor([0], dtype=torch.bool))
else:
# 情况2:分类为实体的两个以上跨度,那么创建关系的tensor
batch_relations.append(torch.tensor(rels, dtype=torch.long))
batch_rel_masks.append(torch.stack(rel_masks))
batch_rel_sample_masks.append(torch.tensor(sample_masks, dtype=torch.bool))
# 获取设备
device = self.rel_classifier.weight.device
# 关系进行padding
batch_relations = util.padded_stack(batch_relations).to(device)
batch_rel_masks = util.padded_stack(batch_rel_masks).to(device)
batch_rel_sample_masks = util.padded_stack(batch_rel_sample_masks).to(device)
return batch_relations, batch_rel_masks, batch_rel_sample_masks
def forward(self, *args, inference=False, **kwargs):
"""
args的内容,字典格式
# encodings: [batch_size, padding后的seq_length], token变成id后的内容
# context_masks: [batch_size, padding后的seq_length】, 样本的实际的长度的mask
# entity_masks:[batch_size,实体数量,padding后的seq_length] 实体的在句子位置mask
# entity_sizes: [batch_size,实体数量】 每个实体的长度
# 'entity_types',[batch_size,,实体数量】
# 'rels',[batch_size, 关系数量,头实体和尾实体的位置索引】, eg[2,6,2], 6是包含了正负样本
# 'rel_masks',[batch_size,关系数量,padding后的seq_length] 关系在样本中的位置,即2个实体之间的词
# 'rel_types',[batch_size,关系数量,关系的标签总数] one-hot了的关系
# 'entity_sample_masks',[batch_size,实体数量], padding后
# 'rel_sample_masks'[batch_size,关系数量],padding后,样本1可能有10个关系,样本2有3个关系,那么样本2就有7个FALSE
:param args:
:type args:
:param inference:
:type inference:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
if not inference:
# 训练模式
return self._forward_train(*args, **kwargs)
else:
#推理模式
return self._forward_inference(*args, **kwargs)
# Model access
_MODELS = {
'spert': SpERT,
}
def get_model(name):
return _MODELS[name]
|
"""
config_utils.py
---------------
Utility functions for parsing manim config files.
"""
__all__ = [
"_run_config",
"_paths_config_file",
"_from_command_line",
"finalized_configs_dict",
]
import argparse
import configparser
import os
import sys
import colour
from .. import constants
from ..utils.tex import TexTemplate, TexTemplateFromFile
def _parse_file_writer_config(config_parser, args):
"""Parse config files and CLI arguments into a single dictionary."""
# By default, use the CLI section of the digested .cfg files
default = config_parser["CLI"]
# This will be the final file_writer_config dict exposed to the user
fw_config = {}
# Handle input files and scenes. Note these cannot be set from
# the .cfg files, only from CLI arguments.
# If a subcommand is given, manim will not render a video and
# thus these specific input/output files are not needed.
if not (hasattr(args, "subcommands")):
fw_config["input_file"] = args.file
fw_config["scene_names"] = (
args.scene_names if args.scene_names is not None else []
)
fw_config["output_file"] = args.output_file
# Handle all options that are directly overridden by CLI
# arguments. Note ConfigParser options are all strings and each
# needs to be converted to the appropriate type. Thus, we do this
# in batches, depending on their type: booleans and strings
for boolean_opt in [
"preview",
"show_in_file_browser",
"sound",
"leave_progress_bars",
"write_to_movie",
"save_last_frame",
"save_pngs",
"save_as_gif",
"write_all",
"disable_caching",
"flush_cache",
"log_to_file",
]:
attr = getattr(args, boolean_opt)
fw_config[boolean_opt] = (
default.getboolean(boolean_opt) if attr is None else attr
)
# for str_opt in ['media_dir', 'video_dir', 'tex_dir', 'text_dir']:
for str_opt in ["media_dir"]:
attr = getattr(args, str_opt)
fw_config[str_opt] = os.path.relpath(default[str_opt]) if attr is None else attr
attr = getattr(args, "log_dir")
fw_config["log_dir"] = (
os.path.join(fw_config["media_dir"], default["log_dir"])
if attr is None
else attr
)
dir_names = {
"video_dir": "videos",
"images_dir": "images",
"tex_dir": "Tex",
"text_dir": "texts",
}
for name in dir_names:
fw_config[name] = os.path.join(fw_config["media_dir"], dir_names[name])
# the --custom_folders flag overrides the default folder structure with the
# custom folders defined in the [custom_folders] section of the config file
fw_config["custom_folders"] = args.custom_folders
if fw_config["custom_folders"]:
fw_config["media_dir"] = config_parser["custom_folders"].get("media_dir")
for opt in ["video_dir", "images_dir", "tex_dir", "text_dir"]:
fw_config[opt] = config_parser["custom_folders"].get(opt)
# Handle the -s (--save_last_frame) flag: invalidate the -w flag
# At this point the save_last_frame option has already been set by
# both CLI and the cfg file, so read the config dict directly
if fw_config["save_last_frame"]:
fw_config["write_to_movie"] = False
# Handle the -t (--transparent) flag. This flag determines which
# section to use from the .cfg file.
section = config_parser["transparent"] if args.transparent else default
for opt in ["png_mode", "movie_file_extension", "background_opacity"]:
fw_config[opt] = section[opt]
# Handle the -n flag. Read first from the cfg and then override with CLI.
# These two are integers -- use getint()
for opt in ["from_animation_number", "upto_animation_number"]:
fw_config[opt] = default.getint(opt)
if fw_config["upto_animation_number"] == -1:
fw_config["upto_animation_number"] = float("inf")
nflag = args.from_animation_number
if nflag is not None:
if "," in nflag:
start, end = nflag.split(",")
fw_config["from_animation_number"] = int(start)
fw_config["upto_animation_number"] = int(end)
else:
fw_config["from_animation_number"] = int(nflag)
# Handle the --dry_run flag. This flag determines which section
# to use from the .cfg file. All options involved are boolean.
# Note this overrides the flags -w, -s, -a, -g, and -i.
if args.dry_run:
for opt in [
"write_to_movie",
"save_last_frame",
"save_pngs",
"save_as_gif",
"write_all",
]:
fw_config[opt] = config_parser["dry_run"].getboolean(opt)
if not fw_config["write_to_movie"]:
fw_config["disable_caching"] = True
# Read in the streaming section -- all values are strings
fw_config["streaming"] = {
opt: config_parser["streaming"][opt]
for opt in [
"live_stream_name",
"twitch_stream_key",
"streaming_protocol",
"streaming_ip",
"streaming_protocol",
"streaming_client",
"streaming_port",
"streaming_port",
"streaming_console_banner",
]
}
# For internal use (no CLI flag)
fw_config["skip_animations"] = fw_config["save_last_frame"]
fw_config["max_files_cached"] = default.getint("max_files_cached")
if fw_config["max_files_cached"] == -1:
fw_config["max_files_cached"] = float("inf")
# Parse the verbosity flag to read in the log level
verbosity = getattr(args, "verbosity")
verbosity = default["verbosity"] if verbosity is None else verbosity
fw_config["verbosity"] = verbosity
# Parse the ffmpeg log level in the config
ffmpeg_loglevel = config_parser["ffmpeg"].get("loglevel", None)
fw_config["ffmpeg_loglevel"] = (
constants.FFMPEG_VERBOSITY_MAP[verbosity]
if ffmpeg_loglevel is None
else ffmpeg_loglevel
)
# Parse the progress_bar flag
progress_bar = getattr(args, "progress_bar")
if progress_bar is None:
progress_bar = default.getboolean("progress_bar")
fw_config["progress_bar"] = progress_bar
return fw_config
def _parse_cli(arg_list, input=True):
parser = argparse.ArgumentParser(
description="Animation engine for explanatory math videos",
epilog="Made with <3 by the manim community devs",
)
if input:
# If the only command is `manim`, we want both subcommands like `cfg`
# and mandatory positional arguments like `file` to show up in the help section.
only_manim = len(sys.argv) == 1
if only_manim or _subcommand_name():
subparsers = parser.add_subparsers(dest="subcommands")
# More subcommands can be added here, with elif statements.
# If a help command is passed, we still want subcommands to show
# up, so we check for help commands as well before adding the
# subcommand's subparser.
if only_manim or _subcommand_name() in ["cfg", "--help", "-h"]:
cfg_related = _init_cfg_subcmd(subparsers)
if only_manim or not _subcommand_name(ignore=["--help", "-h"]):
parser.add_argument(
"file",
help="path to file holding the python code for the scene",
)
parser.add_argument(
"scene_names",
nargs="*",
help="Name of the Scene class you want to see",
default=[""],
)
parser.add_argument(
"-o",
"--output_file",
help="Specify the name of the output file, if "
"it should be different from the scene class name",
default="",
)
# The following use (action='store_const', const=True) instead of
# the built-in (action='store_true'). This is because the latter
# will default to False if not specified, while the former sets no
# default value. Since we want to set the default value in
# manim.cfg rather than here, we use the former.
parser.add_argument(
"-p",
"--preview",
action="store_const",
const=True,
help="Automatically open the saved file once its done",
)
parser.add_argument(
"-f",
"--show_in_file_browser",
action="store_const",
const=True,
help="Show the output file in the File Browser",
)
parser.add_argument(
"--sound",
action="store_const",
const=True,
help="Play a success/failure sound",
)
parser.add_argument(
"--leave_progress_bars",
action="store_const",
const=True,
help="Leave progress bars displayed in terminal",
)
parser.add_argument(
"-a",
"--write_all",
action="store_const",
const=True,
help="Write all the scenes from a file",
)
parser.add_argument(
"-w",
"--write_to_movie",
action="store_const",
const=True,
help="Render the scene as a movie file (this is on by default)",
)
parser.add_argument(
"-s",
"--save_last_frame",
action="store_const",
const=True,
help="Save the last frame only (no movie file is generated)",
)
parser.add_argument(
"-g",
"--save_pngs",
action="store_const",
const=True,
help="Save each frame as a png",
)
parser.add_argument(
"-i",
"--save_as_gif",
action="store_const",
const=True,
help="Save the video as gif",
)
parser.add_argument(
"--disable_caching",
action="store_const",
const=True,
help="Disable caching (will generate partial-movie-files anyway)",
)
parser.add_argument(
"--flush_cache",
action="store_const",
const=True,
help="Remove all cached partial-movie-files",
)
parser.add_argument(
"--log_to_file",
action="store_const",
const=True,
help="Log terminal output to file",
)
# The default value of the following is set in manim.cfg
parser.add_argument(
"-c",
"--background_color",
help="Specify background color",
)
parser.add_argument(
"--background_opacity",
help="Specify background opacity",
)
parser.add_argument(
"--media_dir",
help="Directory to store media (including video files)",
)
parser.add_argument(
"--log_dir",
help="Directory to store log files",
)
parser.add_argument(
"--tex_template",
help="Specify a custom TeX template file",
)
# All of the following use (action="store_true"). This means that
# they are by default False. In contrast to the previous ones that
# used (action="store_const", const=True), the following do not
# correspond to a single configuration option. Rather, they
# override several options at the same time.
# The following overrides -w, -a, -g, and -i
parser.add_argument(
"--dry_run",
action="store_true",
help="Do a dry run (render scenes but generate no output files)",
)
# The following overrides PNG_MODE, MOVIE_FILE_EXTENSION, and
# BACKGROUND_OPACITY
parser.add_argument(
"-t",
"--transparent",
action="store_true",
help="Render a scene with an alpha channel",
)
# The following are mutually exclusive and each overrides
# FRAME_RATE, PIXEL_HEIGHT, and PIXEL_WIDTH,
parser.add_argument(
"-l",
"--low_quality",
action="store_true",
help="Render at low quality",
)
parser.add_argument(
"-m",
"--medium_quality",
action="store_true",
help="Render at medium quality",
)
parser.add_argument(
"-e",
"--high_quality",
action="store_true",
help="Render at high quality",
)
parser.add_argument(
"-k",
"--fourk_quality",
action="store_true",
help="Render at 4K quality",
)
# This overrides any of the above
parser.add_argument(
"-r",
"--resolution",
help='Resolution, passed as "height,width". '
"Overrides the -l, -m, -e, and -k flags, if present",
)
# This sets FROM_ANIMATION_NUMBER and UPTO_ANIMATION_NUMBER
parser.add_argument(
"-n",
"--from_animation_number",
help="Start rendering at the specified animation index, "
"instead of the first animation. If you pass in two comma "
"separated values, e.g. '3,6', it will end "
"the rendering at the second value",
)
# Specify the manim.cfg file
parser.add_argument(
"--config_file",
help="Specify the configuration file",
)
# Specify whether to use the custom folders
parser.add_argument(
"--custom_folders",
action="store_true",
help="Use the folders defined in the [custom_folders] "
"section of the config file to define the output folder structure",
)
# Specify the verbosity
parser.add_argument(
"-v",
"--verbosity",
type=str,
help="Verbosity level. Also changes the ffmpeg log level unless the latter is specified in the config",
choices=constants.VERBOSITY_CHOICES,
)
# Specify if the progress bar should be displayed
def _str2bool(s):
if s == "True":
return True
elif s == "False":
return False
else:
raise argparse.ArgumentTypeError("True or False expected")
parser.add_argument(
"--progress_bar",
type=_str2bool,
help="Display the progress bar",
metavar="True/False",
)
parsed = parser.parse_args(arg_list)
if hasattr(parsed, "subcommands"):
if _subcommand_name() == "cfg":
setattr(
parsed,
"cfg_subcommand",
cfg_related.parse_args(sys.argv[2:]).cfg_subcommand,
)
return parsed
def _init_dirs(config):
# Make sure all folders exist
for folder in [
config["media_dir"],
config["video_dir"],
config["tex_dir"],
config["text_dir"],
config["log_dir"],
]:
if not os.path.exists(folder):
# If log_to_file is False, ignore log_dir
if folder is config["log_dir"] and (not config["log_to_file"]):
pass
else:
os.makedirs(folder)
def _from_command_line():
"""Determine if manim was called from the command line."""
# Manim can be called from the command line in three different
# ways. The first two involve using the manim or manimcm commands.
# Note that some Windows CLIs replace those commands with the path
# to their executables, so we must check for this as well
prog = os.path.split(sys.argv[0])[-1]
from_cli_command = prog in ["manim", "manim.exe", "manimcm", "manimcm.exe"]
# The third way involves using `python -m manim ...`. In this
# case, the CLI arguments passed to manim do not include 'manim',
# 'manimcm', or even 'python'. However, the -m flag will always
# be the first argument.
from_python_m = sys.argv[0] == "-m"
return from_cli_command or from_python_m
def _from_dunder_main():
dunder_main_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "__main__.py"
)
return sys.argv[0] == dunder_main_path
def _paths_config_file():
library_wide = os.path.abspath(
os.path.join(os.path.dirname(__file__), "default.cfg")
)
if sys.platform.startswith("win32"):
user_wide = os.path.expanduser(
os.path.join("~", "AppData", "Roaming", "Manim", "manim.cfg")
)
else:
user_wide = os.path.expanduser(
os.path.join("~", ".config", "manim", "manim.cfg")
)
return [library_wide, user_wide]
def _run_config():
# Config files to be parsed, in ascending priority
config_files = _paths_config_file()
if _from_command_line() or _from_dunder_main():
args = _parse_cli(sys.argv[1:])
if not hasattr(args, "subcommands"):
if args.config_file is not None:
if os.path.exists(args.config_file):
config_files.append(args.config_file)
else:
raise FileNotFoundError(
f"Config file {args.config_file} doesn't exist"
)
else:
script_directory_file_config = os.path.join(
os.path.dirname(args.file), "manim.cfg"
)
if os.path.exists(script_directory_file_config):
config_files.append(script_directory_file_config)
else:
working_directory_file_config = os.path.join(os.getcwd(), "manim.cfg")
if os.path.exists(working_directory_file_config):
config_files.append(working_directory_file_config)
else:
# In this case, we still need an empty args object.
args = _parse_cli([], input=False)
# Need to populate the options left out
args.file, args.scene_names, args.output_file = "", "", ""
config_parser = configparser.ConfigParser()
successfully_read_files = config_parser.read(config_files)
# this is for internal use when writing output files
file_writer_config = _parse_file_writer_config(config_parser, args)
return args, config_parser, file_writer_config, successfully_read_files
def finalized_configs_dict():
config = _run_config()[1]
return {section: dict(config[section]) for section in config.sections()}
def _subcommand_name(ignore=()):
"""Goes through sys.argv to check if any subcommand has been passed,
and returns the first such subcommand's name, if found.
Parameters
----------
ignore : Iterable[:class:`str`], optional
List of NON_ANIM_UTILS to ignore when searching for subcommands, by default []
Returns
-------
Optional[:class:`str`]
If a subcommand is found, returns the string of its name. Returns None if no
subcommand is found.
"""
NON_ANIM_UTILS = ["cfg", "--help", "-h"]
NON_ANIM_UTILS = [util for util in NON_ANIM_UTILS if util not in ignore]
# If a subcommand is found, break out of the inner loop, and hit the break of the outer loop
# on the way out, effectively breaking out of both loops. The value of arg will be the
# subcommand to be taken.
# If no subcommand is found, none of the breaks are hit, and the else clause of the outer loop
# is run, setting arg to None.
for item in NON_ANIM_UTILS:
for arg in sys.argv:
if arg == item:
break
else:
continue
break
else:
arg = None
return arg
def _init_cfg_subcmd(subparsers):
"""Initialises the subparser for the `cfg` subcommand.
Parameters
----------
subparsers : :class:`argparse._SubParsersAction`
The subparser object for which to add the sub-subparser for the cfg subcommand.
Returns
-------
:class:`argparse.ArgumentParser`
The parser that parser anything cfg subcommand related.
"""
cfg_related = subparsers.add_parser(
"cfg",
)
cfg_subparsers = cfg_related.add_subparsers(dest="cfg_subcommand")
cfg_write_parser = cfg_subparsers.add_parser("write")
cfg_write_parser.add_argument(
"--level",
choices=["user", "cwd"],
default=None,
help="Specify if this config is for user or just the working directory.",
)
cfg_write_parser.add_argument(
"--open", action="store_const", const=True, default=False
)
cfg_subparsers.add_parser("show")
cfg_export_parser = cfg_subparsers.add_parser("export")
cfg_export_parser.add_argument("--dir", default=os.getcwd())
return cfg_related
|
import tornado.web
import traceback
import logging
logger = logging.getLogger("tornado.application")
class BaseHandler(tornado.web.RequestHandler):
def write_error(self, status_code, **kwargs):
exc_type, exc_value, exc_traceback = kwargs["exc_info"]
logger.error("status_code %s: %s" % (status_code, exc_value))
logger.error("traceback: %s" % ''.join(traceback.format_tb(exc_traceback)))
msg = "error %s" % exc_value
if exc_type == tornado.web.HTTPError:
msg = "%s" % exc_value.log_message
if self.application.settings.get('debug', False):
self.write(msg) # return custom error message in the body
def get_current_user(self):
user_json = self.get_secure_cookie("gallim_user")
if not user_json: return None
return tornado.escape.json_decode(user_json)
@tornado.gen.coroutine
def _fetch_async(self, req):
http_client = tornado.httpclient.AsyncHTTPClient()
logger.debug("fetching %s" % req.url)
try:
ans = yield http_client.fetch(req)
except tornado.httpclient.HTTPError as e:
if e.response is None:
msg = "'%s'" % (req.url)
else:
msg = "'%s': %s" % (req.url, e.response.body.decode('utf-8'))
raise tornado.web.HTTPError(e.code, msg)
logger.debug("done")
return ans
|
"""Test ndpoly baseclass functionality."""
import numpy
from pytest import raises
import numpoly
XY = numpoly.variable(2)
X, Y = numpoly.symbols("q0"), numpoly.symbols("q1")
EMPTY = numpoly.polynomial([])
def test_scalars():
"""Test scalar objects to catch edgecases."""
assert XY.shape == (2,)
assert XY.size == 2
assert X.shape == ()
assert X.size == 1
assert EMPTY.shape in [(), (0,)] # different behavior in py2/3
assert EMPTY.size == 0
assert numpy.all(numpy.array(XY.coefficients) == [[1, 0], [0, 1]])
assert X.coefficients == [1]
assert EMPTY.coefficients == []
assert numpy.all(XY.exponents == [[1, 0], [0, 1]])
assert XY.exponents.shape == (2, 2)
assert X.exponents == 1
assert X.exponents.shape == (1, 1)
assert numpy.all(EMPTY.exponents == 0)
assert EMPTY.exponents.shape == (1, 1)
assert numpy.all(XY.indeterminants == XY)
assert X.indeterminants == X
assert numpy.all(XY.values == numpy.array(
[(1, 0), (0, 1)], dtype=[("<;", int), (";<", int)]))
assert X.values == numpy.array((1,), dtype=[("<", int)])
assert EMPTY.values.dtype == numpy.dtype([(";", int)])
assert not EMPTY.values.size
assert not X.isconstant()
assert EMPTY.isconstant()
assert X.todict() == {(1,): 1}
assert EMPTY.todict() == {}
assert isinstance(EMPTY.tonumpy(), numpy.ndarray)
assert X.dtype == int
assert X.astype(float).dtype == float
assert EMPTY.dtype == int
assert EMPTY.astype(float).dtype == float
def test_dispatch_array_ufunc():
"""Test dispatch for ufuncs."""
assert numpoly.sum(XY) == XY.__array_ufunc__(numpy.sum, "__call__", XY)
with raises(numpoly.FeatureNotSupported):
XY.__array_ufunc__(numpy.sum, "not_a_method", XY)
with raises(numpoly.FeatureNotSupported):
XY.__array_ufunc__(object, "__call__", XY)
def test_dispatch_array_function():
"""Test dispatch for functions."""
assert numpoly.sum(XY) == XY.__array_function__(numpy.sum, (int,), (XY,), {})
with raises(numpoly.FeatureNotSupported):
XY.__array_function__(object, (int,), (XY,), {})
|
# Generated by Django 3.1.4 on 2021-04-22 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('booking', '0008_auto_20210320_1615'),
]
operations = [
migrations.AlterField(
model_name='cart',
name='status',
field=models.SmallIntegerField(default=0, verbose_name='Status'),
),
migrations.AlterField(
model_name='cartitem',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='erstellt'),
),
migrations.AlterField(
model_name='cartitem',
name='status',
field=models.SmallIntegerField(default=0, verbose_name='Status'),
),
migrations.AlterField(
model_name='coupon',
name='created',
field=models.DateTimeField(auto_now=True, verbose_name='Erstellt'),
),
migrations.AlterField(
model_name='invoice',
name='email',
field=models.EmailField(max_length=128, verbose_name='E-Mail-Adresse'),
),
migrations.AlterField(
model_name='paymentmethod',
name='name',
field=models.CharField(max_length=32, verbose_name='Anzeigename'),
),
migrations.AlterField(
model_name='paymentmethod',
name='name_de',
field=models.CharField(max_length=32, null=True, verbose_name='Anzeigename'),
),
migrations.AlterField(
model_name='paymentmethod',
name='name_en',
field=models.CharField(max_length=32, null=True, verbose_name='Anzeigename'),
),
migrations.AlterField(
model_name='product',
name='players',
field=models.SmallIntegerField(blank=True, null=True, verbose_name='Spieler'),
),
migrations.AlterField(
model_name='room',
name='is_active',
field=models.BooleanField(default=False, verbose_name='Aktiv'),
),
migrations.AlterField(
model_name='room',
name='photo',
field=models.ImageField(blank=True, null=True, upload_to='uploads/rooms', verbose_name='Bild'),
),
migrations.AlterField(
model_name='room',
name='photo_alt',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='Alternativtext'),
),
migrations.AlterField(
model_name='schedule',
name='duration',
field=models.PositiveSmallIntegerField(default=60, verbose_name='Zeitspanne'),
),
migrations.AlterField(
model_name='slot',
name='duration',
field=models.PositiveSmallIntegerField(default=60, verbose_name='Zeitspanne'),
),
]
|
"""
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sympy import *
from sympy.matrices import Matrix,eye
from moro.abc import *
from moro.util import *
__all__ = [
"axa2rot",
"compose_rotations",
"dh",
"eul2htm",
"htm2eul",
"htmrot",
"htmtra",
"rot2axa",
"rotx",
"roty",
"rotz",
"skew"
]
# ~ ==========================================
# ~ Transformation operations
# ~ ==========================================
def rotz(theta, deg=False):
"""
Calculates the rotation matrix about the z-axis
Parameters
----------
:param theta: Rotation angle (given in radians by default)
:type theta: float, int or `symbolic`
:param deg: ¿Is theta given in degrees?, False is default value.
:type deg: bool
Returns
-------
:return: `sympy.matrices.dense.MutableDenseMatrix`
:rtype: Rotation matrix (SO3)
Examples
--------
Using angle in radians,
>>> rotz(pi/2)
⎡0 -1 0⎤
⎢ ⎥
⎢1 0 0⎥
⎢ ⎥
⎣0 0 1⎦
Or symbolic variables,
>>> x = symbols("x")
>>> rotz(x)
⎡cos(x) -sin(x) 0⎤
⎢ ⎥
⎢sin(x) cos(x) 0⎥
⎢ ⎥
⎣ 0 0 1⎦
Using angles in degrees:
>>> rotz(45, deg=True)
⎡0.707106781186548 -0.707106781186547 0⎤
⎢ ⎥
⎢0.707106781186547 0.707106781186548 0⎥
⎢ ⎥
⎣ 0 0 1⎦
"""
if deg: # If theta is given in degrees -> convert to radians
theta = deg2rad(theta, False)
ct = cos(theta)
st = sin(theta)
R = Matrix([[ct, -st, 0],
[st, ct, 0],
[0, 0, 1]])
return R
def roty(theta, deg=False):
"""
Calculates the rotation matrix about the y-axis
Parameters
----------
:param theta: Rotation angle (given in radians by default)
:type theta: float, int or `symbolic`
:param deg: ¿Is theta given in degrees?, False is default value.
:type deg: bool
Returns
-------
:return: `sympy.matrices.dense.MutableDenseMatrix`
:rtype: Rotation matrix (SO3)
Examples
--------
>>> roty(pi/3)
⎡ √3 ⎤
⎢1/2 0 ── ⎥
⎢ 2 ⎥
⎢ ⎥
⎢ 0 1 0 ⎥
⎢ ⎥
⎢-√3 ⎥
⎢──── 0 1/2⎥
⎣ 2 ⎦
>>> roty(30, deg=True)
⎡0.866025403784439 0 0.5 ⎤
⎢ ⎥
⎢ 0 1 0 ⎥
⎢ ⎥
⎣ -0.5 0 0.866025403784439⎦
"""
if deg: # If theta is given in degrees -> convert to radians
theta = deg2rad(theta, False)
ct = cos(theta)
st = sin(theta)
R = Matrix([[ct, 0, st],
[0, 1, 0],
[-st, 0, ct]])
return R
def rotx(theta, deg=False):
"""
Calculates the rotation matrix about the x-axis
Parameters
----------
:param theta: Rotation angle (given in radians by default)
:type theta: float, int or `symbolic`
:param deg: ¿Is theta given in degrees?, False is default value.
:type deg: bool
Returns
-------
:return: `sympy.matrices.dense.MutableDenseMatrix`
:rtype: Rotation matrix (SO3)
Examples
--------
>>> rotx(pi)
⎡1 0 0 ⎤
⎢ ⎥
⎢0 -1 0 ⎥
⎢ ⎥
⎣0 0 -1⎦
>>> rotx(60, deg=True)
⎡1 0 0 ⎤
⎢ ⎥
⎢0 0.5 -0.866025403784439⎥
⎢ ⎥
⎣0 0.866025403784439 0.5 ⎦
"""
if deg: # If theta is given in degrees -> convert to radians
theta = deg2rad(theta, False)
ct = cos(theta)
st = sin(theta)
R = Matrix([[1, 0, 0],
[0, ct, -st],
[0, st, ct]])
return R
def _rot(theta, axis, deg=False):
if axis in ("X","x",1,"1"):
R = rotx(theta, deg)
elif axis in ("Y","y",2,"2"):
R = roty(theta, deg)
elif axis in ("Z","z",3,"3"):
R = rotz(theta, deg)
else:
R = eye(3)
return R
def compose_rotations(*rotations):
"""
Composes rotation matrices w.r.t. fixed or movable frames
Parameters
----------
:param rotations: A tuple that contains (angle, axis, frame, deg)
:type rotations: tuple
Returns
-------
:return: Rotation matrix
:rtype: :class:`sympy.matrices.dense.MutableDenseMatrix`
Examples
--------
>>> compose_rotations((45, "z", "fixed", True), (30, "x", "local", True))
⎡0.707106781186548 -0.612372435695794 0.353553390593274 ⎤
⎢ ⎥
⎢0.707106781186547 0.612372435695795 -0.353553390593274⎥
⎢ ⎥
⎣ 0 0.5 0.866025403784439 ⎦
"""
R = eye(3) # I3x3 matrix
for rot in rotations:
angle,axis,frame,*_ = rot
if len(rot)==4:
deg = rot[-1]
else:
deg = False # default value
crm = _rot(angle,axis,deg)
if frame in ("world","fixed","global","w","0",0):
R = crm*R
elif frame in ("current","movable","local","c","1",1):
R = R*crm
else:
pass # Nothing to do here -> raise except. (to impl.)
return R
def dh(a,alpha,d,theta):
"""
Calculates Denavit-Hartenberg matrix given the four parameters.
Parameters
----------
:param a: DH parameter
:type a: int, float or symbol
:param alpha: DH parameter
:type alpha: int, float or symbol
:param d: DH parameter
:type d: int, float or symbol
:param theta: DH parameter
:type theta: int, float or symbol
Returns
-------
:return: Denavit-Hartenberg matrix (4x4)
:rtype: :class:`sympy.matrices.dense.MutableDenseMatrix`
Examples
--------
With numerical values:
>>> dh(100,pi/2,50,pi/2)
⎡0 0 1 0 ⎤
⎢ ⎥
⎢1 0 0 100⎥
⎢ ⎥
⎢0 1 0 50 ⎥
⎢ ⎥
⎣0 0 0 1 ⎦
Using symbolic values:
>>> a = symbols("a")
>>> t = symbols("t")
>>> dh(a,0,0,t)
⎡cos(t) -sin(t) 0 a⋅cos(t)⎤
⎢ ⎥
⎢sin(t) cos(t) 0 a⋅sin(t)⎥
⎢ ⎥
⎢ 0 0 1 0 ⎥
⎢ ⎥
⎣ 0 0 0 1 ⎦
"""
H = Matrix([[cos(theta),-sin(theta)*cos(alpha),sin(theta)*sin(alpha),a*cos(theta)],
[sin(theta),cos(theta)*cos(alpha),-cos(theta)*sin(alpha),a*sin(theta)],
[0,sin(alpha),cos(alpha),d],
[0,0,0,1]])
return H
def eul2htm(phi,theta,psi,seq="zxz",deg=False):
"""
Given a set of Euler Angles (phi,theta,psi) for specific
sequence this function returns the homogeneous transformation
matrix associated. Default sequence is ZXZ.
Parameters
----------
:param phi: phi angle
:type phi: int,float,symbol
:param theta: theta angle
:type theta: int,float,symbol
:param psi: psi angle
:type psi: int,float,symbol
:param seq: Rotation sequence
:type seq: str
:param deg: True if (phi,theta,psi) are given in degrees
:type deg: bool
Returns
-------
:return: Homogeneous transformation matrix
:rtype: :class:`sympy.matrices.dense.MutableDenseMatrix`
Examples
--------
>>> eul2htm(90,90,90,"zxz",True)
⎡0 0 1 0⎤
⎢ ⎥
⎢0 -1 0 0⎥
⎢ ⎥
⎢1 0 0 0⎥
⎢ ⎥
⎣0 0 0 1⎦
>>> eul2htm(pi/2,pi/2,pi/2)
⎡0 0 1 0⎤
⎢ ⎥
⎢0 -1 0 0⎥
⎢ ⎥
⎢1 0 0 0⎥
⎢ ⎥
⎣0 0 0 1⎦
>>> eul2htm(0,pi/2,0,"zyz")
⎡0 0 1 0⎤
⎢ ⎥
⎢0 1 0 0⎥
⎢ ⎥
⎢-1 0 0 0⎥
⎢ ⎥
⎣0 0 0 1⎦
"""
if deg: # If angles are given in degrees -> convert to radians
phi,theta,psi = deg2rad(Matrix([phi,theta,psi]), evalf=False)
seq = seq.lower()
if not seq in ("zxz","zyz","xyx","xzx","yxy","yzy"):
raise ValueError(f"{seq} is not a valid sequence")
axis1 = seq[0]
axis2 = seq[1]
axis3 = seq[2]
H = htmrot(phi,axis1) * htmrot(theta,axis2) * htmrot(psi,axis3)
return H
def htm2eul(H, seq="zxz", deg=False):
"""
Given a homogeneous transformation matrix this function
return the equivalent set of Euler Angles.
If "deg" is True then Euler Angles are converted to degrees.
>>> H = htmrot(pi/3,"y")*htmrot(pi/4,"x")
>>> H
⎡ √6 √6 ⎤
⎢1/2 ── ── 0⎥
⎢ 4 4 ⎥
⎢ ⎥
⎢ √2 -√2 ⎥
⎢ 0 ── ──── 0⎥
⎢ 2 2 ⎥
⎢ ⎥
⎢-√3 √2 √2 ⎥
⎢──── ── ── 0⎥
⎢ 2 4 4 ⎥
⎢ ⎥
⎣ 0 0 0 1⎦
>>> htm2eul(H)
⎛ ⎛√3⎞ ⎞
⎜atan⎜──⎟, atan(√7), -atan(√6)⎟
⎝ ⎝2 ⎠ ⎠
>>> htm2eul(H, deg=True)
(40.8933946491309, 69.2951889453646, -67.7923457014035)
"""
if seq in ("ZXZ","zxz"):
return _htm2zxz(H, deg)
# elif seq in ("ZYZ","zyz"):
# return _htm2zyz(H, deg)
else:
raise ValueError("Currently only ZXZ sequence is supported")
def _htm2zxz(H, deg=False):
"""
Calculates ZXZ Euler Angles from a homogeneous transformation matrix
"""
R = H[:3,:3] # rotation sub-matrix
r33,r13,r23,r31,r32,r11,r12,r21 = R[2,2],R[0,2],R[1,2],R[2,0],R[2,1],R[0,0],R[0,1],R[1,0]
if abs(r33) != 1:
theta = atan2(sqrt(1-r33**2), r33)
phi = atan2(r13, -r23)
psi = atan2(r31, r32)
elif r33==1:
theta = 0
phi = 0
psi = atan2(r21, r11)
elif r33==-1:
theta = pi
psi = 0
phi = atan2(r21, r11)
else:
theta = atan2(sqrt(1-r33**2), r33)
phi = atan2(r13,-r23)
psi = atan2(r31,r32)
if deg:
return rad2deg(phi), rad2deg(theta), rad2deg(psi)
return phi,theta,psi
def htmtra(*args,**kwargs):
"""
Calculate the homogeneous transformation matrix of a translation
Parameters
----------
*args : list, tuple, int, float
Translation vector or components
**kwargs : float, int
dx, dy and dz keyword arguments
Returns
-------
H : :class:`sympy.matrices.dense.MutableDenseMatrix`
Homogeneous transformation matrix
Examples
--------
>>> htmtra([50,-100,30])
⎡1 0 0 50 ⎤
⎢ ⎥
⎢0 1 0 -100⎥
⎢ ⎥
⎢0 0 1 30 ⎥
⎢ ⎥
⎣0 0 0 1 ⎦
>>> a,b,c = symbols("a,b,c")
>>> htmtra([a,b,c])
⎡1 0 0 a⎤
⎢ ⎥
⎢0 1 0 b⎥
⎢ ⎥
⎢0 0 1 c⎥
⎢ ⎥
⎣0 0 0 1⎦
Using float/integer arguments:
>>> htmtra(10,-40,50)
⎡1 0 0 10 ⎤
⎢ ⎥
⎢0 1 0 -40⎥
⎢ ⎥
⎢0 0 1 50 ⎥
⎢ ⎥
⎣0 0 0 1 ⎦
Using keyword arguments:
>>> htmtra(dz=100,dx=300,dy=-200)
⎡1 0 0 300 ⎤
⎢ ⎥
⎢0 1 0 -200⎥
⎢ ⎥
⎢0 0 1 100 ⎥
⎢ ⎥
⎣0 0 0 1 ⎦
"""
if args and not kwargs:
if isinstance(args[0], (list,tuple)):
d = args[0]
elif len(args)==3:
d = args
elif kwargs and not args:
d = [0,0,0]
if "dx" in kwargs:
d[0] = kwargs.get("dx")
if "dy" in kwargs:
d[1] = kwargs.get("dy")
if "dz" in kwargs:
d[2] = kwargs.get("dz")
else:
raise ValueError("Only pass *args or **kwargs, not both")
dx,dy,dz = d[0],d[1],d[2]
M = Matrix([[1,0,0,dx],
[0,1,0,dy],
[0,0,1,dz],
[0,0,0,1]])
return M
def htmrot(theta, axis="z", deg=False):
"""
Return a homogeneous transformation matrix that represents a
rotation "theta" about "axis".
Parameters
----------
theta : float, int or `symbolic`
Rotation angle (given in radians by default)
axis : str
Rotation axis
deg : bool
¿Is theta given in degrees?
Returns
-------
H : :class:`sympy.matrices.dense.MutableDenseMatrix`
Homogeneous transformation matrix
Examples
--------
>>> htmrot(pi/2)
⎡0 -1 0 0⎤
⎢ ⎥
⎢1 0 0 0⎥
⎢ ⎥
⎢0 0 1 0⎥
⎢ ⎥
⎣0 0 0 1⎦
>>> htmrot(pi/2, "x")
⎡1 0 0 0⎤
⎢ ⎥
⎢0 0 -1 0⎥
⎢ ⎥
⎢0 1 0 0⎥
⎢ ⎥
⎣0 0 0 1⎦
>>> htmrot(30, "y", True)
⎡0.866025403784439 0 0.5 0⎤
⎢ ⎥
⎢ 0 1 0 0⎥
⎢ ⎥
⎢ -0.5 0 0.866025403784439 0⎥
⎢ ⎥
⎣ 0 0 0 1⎦
>>> t = symbols("t")
>>> htmrot(t, "x")
⎡1 0 0 0⎤
⎢ ⎥
⎢0 cos(t) -sin(t) 0⎥
⎢ ⎥
⎢0 sin(t) cos(t) 0⎥
⎢ ⎥
⎣0 0 0 1⎦
"""
if deg: # Is theta given in degrees? -> then convert to radians
theta = deg2rad(theta)
if axis in ("z","Z",3,"3"):
R = rotz(theta)
elif axis in ("y","Y",2,"2"):
R = roty(theta)
elif axis in ("x","X",1,"1"):
R = rotx(theta)
else:
R = eye(3) # raise except (to impl)
H = _rot2htm(R)
return H
def _rot2htm(R):
"""
Given a SO(3) matrix return a SE(3) homogeneous
transformation matrix.
"""
_H = R.row_join(zeros(3,1))
H = _H.col_join(Matrix([0,0,0,1]).T)
return H
def rot2axa(R, deg=False):
"""
Given a SO(3) matrix return the axis-angle representation
"""
r32,r23 = R[2,1],R[1,2]
r13,r31 = R[0,2],R[2,0]
r21,r12 = R[1,0],R[0,1]
theta = acos((R.trace() - 1)/2)
k = ( (1/(2*sin(theta)))*Matrix([r32-r23, r13-r31, r21-r12]) ).evalf()
if deg:
theta = rad2deg(theta)
return k,theta
def axa2rot(k,theta):
"""
Given a R^3 vector (k) and an angle (theta), return
the SO(3) matrix associated.
"""
if isinstance(k,(list,tuple)):
k = Matrix(k)
ct = cos(theta)
st = sin(theta)
vt = 1 - cos(theta)
kx,ky,kz = k.normalized()
r11 = kx**2*vt + ct
r21 = kx*ky*vt + kz*st
r31 = kx*kz*vt - ky*st
r12 = kx*ky*vt - kz*st
r22 = ky**2*vt + ct
r32 = ky*kz*vt + kx*st
r13 = kx*kz*vt + ky*st
r23 = ky*kz*vt - kx*st
r33 = kz**2*vt + ct
R = Matrix([[r11,r12,r13],[r21,r22,r23],[r31,r32,r33]])
return R
def skew(u):
"""
Return skew-symmetric matrix associated to u vector
"""
ux,uy,uz = u
S = Matrix([[0, -uz, uy],
[uz, 0, -ux],
[-uy, ux, 0]])
return S
if __name__=="__main__":
print(eul2htm(0,0,pi,"xyy")) |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-09 02:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0003_build_error'),
]
operations = [
migrations.AlterField(
model_name='build',
name='result',
field=models.IntegerField(choices=[(0, 'Pending'), (10, 'Fail'), (19, 'Non-critical Fail'), (20, 'Pass')], default=0),
),
]
|
# -*- coding: utf-8 -*-
#
# This file is part of AceQL Python Client SDK.
# AceQL Python Client SDK: Remote SQL access over HTTP with AceQL HTTP.
# Copyright (C) 2021, KawanSoft SAS
# (http://www.kawansoft.com). All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import os
from os.path import sep
from aceql import Connection
from aceql._private.aceql_debug_parms import AceQLDebugParms
from tests.util.connection_builder import ConnectionBuilder
class TestExecuteServerQuery():
@staticmethod
def test_main(connection: Connection):
AceQLDebugParms.DEBUG_ON = False
print()
print("aceql version : " + Connection.get_client_version())
print("aceql version full : " + Connection.get_client_version_full())
print("Connection Options : " + str(connection.get_connections_options()))
print("Connection creation: " + str(connection.get_creation_datetime()))
print("Database Info : " + str(connection.get_database_info()))
cursor = connection.cursor()
server_query_executor_class_name = "com.mycompany.MyServerQueryExecutor"
my_parameters = [1]
cursor.execute_server_query(server_query_executor_class_name, my_parameters)
print("cursor.rowcount : " + str(cursor.rowcount))
rows = cursor.fetchall()
print("fetchall:")
for row in rows:
print(row)
cursor.close()
if __name__ == '__main__':
connection: Connection = ConnectionBuilder.get_connection()
try:
TestExecuteServerQuery.test_main(connection)
finally:
connection.close()
print("The End!")
exit() |
i=1
s=0
dec=int(input("Enter the value of number\n"))
while(dec>0):
rem=int(dec%10)
s=s+(i*rem)
dec=int(dec/10)
i=i*2
print("\n")
print(s)
input()
|
'''
Kattis - pervasiveheartmonitor
Simple input parsing, just try to write as little code as possible.
'''
import statistics
from sys import stdin
for line in stdin:
line = line.strip()
line = line.split()
name = []
while line[0][0].isalpha():
name.append(line.pop(0))
while line[-1][0].isalpha():
name.insert(0, (line.pop()))
name = ' '.join(name)
line = [float(x) for x in line]
line = statistics.mean(line)
print(line, name) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2019-08-02 13:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('analytics', '0001_initial'), ('analytics', '0002_node'), ('analytics', '0003_delete_node'), ('analytics', '0004_auto_20180117_1045'), ('analytics', '0005_importconfig'), ('analytics', '0006_analyticsimporttask'), ('analytics', '0007_query_api_mgmt_id'), ('analytics', '0008_auto_20180725_1114_squashed_0009_query_uri'), ('analytics', '0009_importconfig_last_cursor'), ('analytics', '0010_importconfig_time'), ('analytics', '0011_auto_20190123_1530'), ('analytics', '0012_auto_20190124_1117'), ('analytics', '0013_remove_importconfig_time'), ('analytics', '0014_auto_20190220_1127'), ('analytics', '0015_analyticsimporttask_node'), ('analytics', '0016_auto_20190613_1103')]
initial = True
dependencies = [
('django_datajsonar', '0006_synchronizer_node'),
]
operations = [
migrations.CreateModel(
name='Query',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ids', models.TextField()),
('args', models.TextField()),
('timestamp', models.DateTimeField()),
('ip_address', models.CharField(max_length=200, null=True)),
('params', models.TextField()),
('api_mgmt_id', models.IntegerField(blank=True, null=True, unique=True)),
('request_time', models.DecimalField(decimal_places=25, default=0, max_digits=30)),
('status_code', models.IntegerField(default=0)),
('user_agent', models.TextField(default='')),
('uri', models.TextField(default='')),
],
options={
'verbose_name_plural': 'Tabla consultas',
},
),
migrations.CreateModel(
name='ImportConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('endpoint', models.URLField()),
('token', models.CharField(max_length=64)),
('kong_api_id', models.CharField(max_length=64)),
('last_cursor', models.CharField(blank=True, max_length=64)),
],
options={
'abstract': False,
'verbose_name': 'Configuración de importación de analytics',
},
),
migrations.CreateModel(
name='AnalyticsImportTask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('RUNNING', 'Procesando catálogos'), ('FINISHED', 'Finalizada')], max_length=20)),
('logs', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('finished', models.DateTimeField(null=True)),
('node', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='django_datajsonar.Node')),
],
options={
'verbose_name': 'Corrida de importación de analytics',
'verbose_name_plural': 'Corridas de importación de analytics',
},
),
migrations.CreateModel(
name='HitsIndicator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('serie_id', models.CharField(max_length=64)),
('date', models.DateField()),
('hits', models.IntegerField()),
],
options={
'verbose_name': 'Consultas por día de serie',
'verbose_name_plural': 'Consultas por día de series',
},
),
migrations.AlterUniqueTogether(
name='hitsindicator',
unique_together=set([('serie_id', 'date')]),
),
]
|
from torch import nn
from video.ConvLstmCell import ConvLstmCell
import torch
class LSTM_PixelSnail(nn.Module):
def _to_one_hot(self, y, num_classes):
scatter_dim = len(y.size())
y_tensor = y.view(*y.size(), -1)
zeros = torch.zeros(*y.size(), num_classes, dtype=y.dtype).to('cuda')
return zeros.scatter(scatter_dim, y_tensor, 1)
def __init__(self, lstm_model, cnn_model, pixel_model):
super().__init__()
self.type = type
self.pixel_model = pixel_model
self.lstm_model = lstm_model
self.cnn_model = cnn_model
def forward(self, inputs_, cells_state):
input_ = inputs_[:, 0, :, :, :]
target = inputs_[:, 1, :, :, :]
lstm_out, cells_state = self.lstm_model(input_, cells_state)
cnn_out = self.cnn_model(lstm_out)
out, _ = self.pixel_model(target, condition=cnn_out)
return out, cells_state
|
# :coding: utf-8
# :copyright: Copyright (c) 2021 strack
"""Describe the distribution to distutils."""
# Import third-party modules
import os
from setuptools import find_packages
from setuptools import setup
ROOT_PATH = os.path.dirname(os.path.realpath(__file__))
README_PATH = os.path.join(ROOT_PATH, 'README.md')
requires = [
'dayu_widgets >= 0.0.1',
'strack-api >=1.0.0',
'PyYAML >= 5.0'
]
setup(
name='strack-connect',
author='weijer',
url='https://github.com/cgpipline/strack-connect',
license='Apache License (2.0)',
version='0.0.1',
author_email='[email protected]',
description=('Connect for strack.'),
long_description=open(README_PATH, 'r', encoding='UTF-8').read(),
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation'
],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
setup_requires=['pytest-runner'],
tests_require=['pytest']
)
|
import unittest
import subprocess
import sys
import socket
import os
import time
import signal
import argparse
import across
from across.__main__ import _parse_tcp
from .utils import par, mktemp, localhost, localhost_ipv6, anyaddr, skip_if_no_unix_sockets, skip_if_no_ipv6
def spawn_main(args, **popen_extras):
return subprocess.Popen(
[sys.executable, '-m', 'across'] + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
**popen_extras
)
def run_main(args):
process = spawn_main(args)
process.stdin.close()
stdout = process.stdout.read()
process.stdout.close()
rc = process.wait()
return rc, stdout.decode()
def serve(sock):
with across.Connection.from_socket(sock.accept()[0]) as conn:
conn.wait()
class ParserTest(unittest.TestCase):
def test_parse_tcp(self):
for text, address in [
('wonderland:1865', ('wonderland', 1865)),
('wonderland:0', ('wonderland', 0)),
('127.0.0.1:65535', ('127.0.0.1', 65535)),
('[::1]:20', ('::1', 20)),
('[::ffff:192.0.2.128]:20', ('::ffff:192.0.2.128', 20)),
]:
self.assertEqual(_parse_tcp(text), ('tcp',) + address)
for text in [
'bad',
'20',
'bad:-20',
'bad:100000',
'[bad]:20',
'[bad.bad]:20',
'::1:20',
'[[::1]:20',
'[]::1]:20',
'ba[d:20',
'ba]d:20',
]:
self.assertRaises(argparse.ArgumentTypeError, _parse_tcp, text)
# Note that tests using this function have a race condition :(
def _random_port():
sock = socket.socket()
sock.bind((anyaddr, 0))
port = sock.getsockname()[1]
sock.close()
return port
class MainTest(unittest.TestCase):
def test_version(self):
rc, out = run_main(['--version'])
self.assertEqual(rc, 0)
self.assertEqual(out.strip(), across.__version__)
def test_tcp_client(self):
sock = socket.socket()
sock.bind((localhost, 0))
sock.listen(1)
(rc, out), _ = par(
lambda: run_main([
'--tcp',
'{}:{}'.format(localhost, sock.getsockname()[1]),
'--execute',
'import os; os.getpid()',
]),
lambda: serve(sock),
)
sock.close()
self.assertEqual(rc, 0)
self.assertEqual(int(out.strip()), os.getpid())
def test_tcp_server(self):
self._run_tcp_server_test(socket.AF_INET)
@skip_if_no_ipv6
def test_tcp_server_ipv6(self):
self._run_tcp_server_test(socket.AF_INET6)
def _run_tcp_server_test(self, family):
if family == socket.AF_INET6:
host = localhost_ipv6
escaped_host = '[{}]'.format(host)
else:
host = escaped_host = localhost
port = _random_port()
process = spawn_main(['--server', '--tcp', '{}:{}'.format(escaped_host, port)])
sock = socket.socket(family)
while sock.connect_ex((host, port)) != 0:
time.sleep(0.01)
self.assertIsNone(process.poll())
os.kill(process.pid, signal.SIGINT)
process.wait()
sock.close()
process.stdin.close()
process.stdout.close()
def test_bad_usage(self):
args_list = [
# no action/address
[],
# no action
['--stdio'],
# no address
['--wait'],
# stdio in server mode
['--server', '--stdio'],
# action in server mode
['--server', '--tcp', '{}:0'.format(localhost), '--execute', 'pass'],
# bad TCP addresses
['--server', '--tcp', ''],
['--server', '--tcp', 'bad'],
['--server', '--tcp', '{}:bad'.format(localhost)],
]
for args in args_list:
process = spawn_main(args, stderr=subprocess.PIPE)
out, err = process.communicate()
self.assertEqual(process.wait(), 2)
self.assertEqual(out, b'')
@skip_if_no_unix_sockets
class UnixMainTest(unittest.TestCase):
def test_unix_client(self):
path = mktemp()
sock = socket.socket(socket.AF_UNIX)
sock.bind(path)
sock.listen(1)
(rc, out), _ = par(
lambda: run_main([
'--unix',
path,
'--execute',
'import os; os.getpid()',
]),
lambda: serve(sock),
)
sock.close()
self.assertEqual(rc, 0)
self.assertEqual(int(out.strip()), os.getpid())
def test_unix_server(self):
path = mktemp()
process = spawn_main(['--server', '--unix', path])
sock = socket.socket(socket.AF_UNIX)
while sock.connect_ex(path) != 0:
time.sleep(0.01)
self.assertIsNone(process.poll())
os.kill(process.pid, signal.SIGINT)
process.wait()
sock.close()
process.stdin.close()
process.stdout.close()
|
# python stdlib dependencies
import binascii
import os
import sys
import yaml
# local imports
from .command import Command
from .command import commandlet
from .db import Db
from .utils import TemporaryDirectory
from .utils import hash_pass
from .utils import check_pin
from .utils import rand_str
from .utils import getwrapper
from .utils import load_sobject
from .utils import load_sealobject
from .tpm2 import Tpm2
from .pkcs11t import *
class NewKeyCommandBase(Command):
'''
creates a key to a token within a tpm2-pkcs11 store.
'''
def generate_options(self, group_parser):
group_parser.add_argument(
'--id',
help='The key id. Defaults to a random 8 bytes of hex.\n',
default=binascii.hexlify(os.urandom(8)))
pinopts = group_parser.add_mutually_exclusive_group(required=True)
pinopts.add_argument('--sopin', help='The Administrator pin.\n'),
pinopts.add_argument('--userpin', help='The User pin.\n'),
# Implemented by derived class
def new_key_create(self, sobjctx, sobjauth, objauth, tpm2, path, alg,
privkey):
raise NotImplementedError('Implement: new_key')
@staticmethod
def new_key_init(label, sopin, userpin, db, tpm2):
token = db.gettoken(label)
# Get the primary object encrypted auth value and sokey information
# to decode it. Based on the incoming pin
is_so = sopin != None
pin = sopin if is_so else userpin
pobjauth = check_pin(token, pin, is_so)
# At this point we have recovered the ACTUAL auth value for the primary object, so now we
# can load up the seal objects
pobj, sealctx, sealauth = load_sealobject(token, tpm2, db, pobjauth,
pin, is_so)
# Now that the sealobject is loaded, we need to unseal the wrapping key
# object auth or the key when the TPM doesn't support encryptdecrypt
wrappingkeyauth = tpm2.unseal(sealctx, sealauth)
wrapper = getwrapper(token, db, tpm2, pobjauth, wrappingkeyauth)
sobjctx, sobjauth = load_sobject(token, db, tpm2, wrapper, pobj,
pobjauth)
#create an auth value for the tertiary object.
objauth = hash_pass(rand_str(32))['hash']
encobjauth = wrapper.wrap(objauth)
return (sobjctx, sobjauth, encobjauth, objauth)
@staticmethod
def new_key_save(alg, keylabel, tid, label, tertiarypriv, tertiarypub,
tertiarypubdata, encobjauth, objauth, db, tpm2):
token = db.gettoken(label)
#
# Cache the objects attributes from the public structure and other sources
# and populate the db with the data. This allows use of the public data
# without needed to load any objects which requires a pin to do.
#
y = yaml.load(tertiarypubdata)
if alg.startswith('rsa'):
attrs = [
{
CKA_KEY_TYPE: CKK_RSA
},
{
CKA_CLASS: CKO_PRIVATE_KEY
},
{
CKA_CLASS: CKO_PUBLIC_KEY
},
{
CKA_ID: tid
},
{
CKA_MODULUS: y['rsa']
},
{
CKA_PUBLIC_EXPONENT: 65537
},
]
mech = [{
CKM_RSA_X_509: ""
}, {
CKM_RSA_PKCS_OAEP: {
"hashalg": CKM_SHA256,
"mgf": CKG_MGF1_SHA256
}
}]
elif alg.startswith('ecc'):
attrs = [
{
CKA_KEY_TYPE: CKK_EC
},
{
CKA_CLASS: CKO_PRIVATE_KEY
},
{
CKA_CLASS: CKO_PUBLIC_KEY
},
{
CKA_ID: tid
},
]
mech = [{CKM_ECDSA: ""},]
elif alg.startswith('aes'):
attrs = [{
CKA_CLASS: CKO_SECRET_KEY
}, {
CKA_KEY_TYPE: CKK_AES
}, {
CKA_VALUE_BITS: y['sym-keybits']
}, {
CKA_VALUE_LEN: y['sym-keybits'] / 8
}]
mech = [{CKM_AES_CBC: ""},]
else:
sys.exit('Cannot handle algorithm: "{}"'.format(alg))
# Add keylabel for ALL objects if set
if keylabel is not None:
attrs.append({CKA_LABEL: keylabel})
# Now get the secondary object from db
sobj = db.getsecondary(token['id'])
# Store to database
rowid = db.addtertiary(sobj['id'], tertiarypriv, tertiarypub,
encobjauth, attrs, mech)
# if the keylabel is not set, use the tertiary object tid as the keylabel
# Normally we would use a transaction to make this atomic, but Pythons
# sqlite3 transaction handling is quite odd. So when the keylabel is None, just insert
# into the db without that attribute, retrieve the primary key, and then issue an
# update. A possible race exists if someone is looking for the key by label between
# these operations.
# See:
# - https://stackoverflow.com/questions/107005/predict-next-auto-inserted-row-tid-sqlite
if keylabel is None:
keylabel = str(rowid)
attrs.append({CKA_LABEL: keylabel})
db.updatetertiaryattrs(rowid, attrs)
db.commit()
return keylabel
def __call__(self, args):
path = args['path']
with Db(path) as db:
with TemporaryDirectory() as d:
tpm2 = Tpm2(d)
label = args['label']
sopin = args['sopin']
userpin = args['userpin']
alg = args['algorithm']
key_label = args['key_label']
tid = args['id']
privkey = None
try:
privkey = args['privkey']
except:
privkey = None
else:
path = args['path']
path = args['path']
sobjctx, sobjauth, encobjauth, objauth = NewKeyCommandBase.new_key_init(
label, sopin, userpin, db, tpm2)
tertiarypriv, tertiarypub, tertiarypubdata = self.new_key_create(
sobjctx, sobjauth, objauth, tpm2, path, alg, privkey)
final_key_label = NewKeyCommandBase.new_key_save(
alg, key_label, tid, label, tertiarypriv, tertiarypub,
tertiarypubdata, encobjauth, objauth, db, tpm2)
return final_key_label
@commandlet("import")
class ImportCommand(NewKeyCommandBase):
'''
Imports a rsa key to a token within a tpm2-pkcs11 store.
'''
# adhere to an interface
# pylint: disable=no-self-use
def generate_options(self, group_parser):
super(ImportCommand, self).generate_options(group_parser)
group_parser.add_argument(
'--privkey',
help='Full path of the private key to be imported.\n',
required=True)
group_parser.add_argument(
'--label',
help='The tokens label to import the key too.\n',
required=True)
group_parser.add_argument(
'--key-label',
help='The label of the key imported. Defaults to an integer value.\n'
)
group_parser.add_argument(
'--algorithm',
help='The type of the key.\n',
choices=['rsa'],
required=True)
# Imports a new key
def new_key_create(self, sobjctx, sobjauth, objauth, tpm2, path, alg,
privkey):
if alg != 'rsa':
sys.exit('Unknown algorithm or algorithm not supported, got "%s"' %
alg)
if privkey == None:
sys.exit("Invalid private key path")
tertiarypriv, tertiarypub, tertiarypubdata = tpm2.importkey(
sobjctx, sobjauth, objauth, privkey=privkey, alg=alg)
return (tertiarypriv, tertiarypub, tertiarypubdata)
def __call__(self, args):
keylabel = super(self.__class__, self).__call__(args)
print('Imported key as label: "{keylabel}"'.format(keylabel=keylabel))
@commandlet("addkey")
class AddKeyCommand(NewKeyCommandBase):
'''
Adds a key to a token within a tpm2-pkcs11 store.
'''
# adhere to an interface
# pylint: disable=no-self-use
def generate_options(self, group_parser):
super(self.__class__, self).generate_options(group_parser)
group_parser.add_argument(
'--label',
help='The tokens label to add a key too.\n',
required=True)
group_parser.add_argument(
'--algorithm',
help='The type of the key.\n',
choices=[
'rsa1024', 'rsa2048', 'aes128', 'aes256', 'ecc224', 'ecc256',
'ecc384', 'ecc521'
],
required=True)
group_parser.add_argument(
'--key-label',
help='The key label to identify the key. Defaults to an integer value.\n'
)
# Creates a new key
def new_key_create(self, sobjctx, sobjauth, objauth, tpm2, path, alg,
privkey):
tertiarypriv, tertiarypub, tertiarypubdata = tpm2.create(
sobjctx, sobjauth, objauth, alg=alg)
return (tertiarypriv, tertiarypub, tertiarypubdata)
def __call__(self, args):
keylabel = super(self.__class__, self).__call__(args)
print('Added key as label: "{keylabel}"'.format(keylabel=keylabel))
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import glob
import time
from collections import deque
import os
# os.environ['CUDA_VISIBLE_DEVICES']='-1'
from tracker import tracker,detector,helpers
import cv2
import tqdm
import argparse
import config
from data_management import get_mapped_dir_lists,sort_frames
from data_utils import box_to_str
class single_tracker(object):
def __init__(self,max_age=65,frame_count=0,min_hits=1):
self.tracker_list=[]
self.track_id_list= deque([str(i) for i in range(100)])
self.max_age=max_age
self.frame_count=frame_count
self.min_hits=min_hits
self.detect_track_iou_thres=0.6
self.box_ios_thres=0.8
self.otsu_iou_thres=0.5
self.current_tracker=None
# Global variables to be used by funcitons of VideoFileClop
frame_count = 0 # frame counter
max_age = 20 # no.of consecutive unmatched detection before
# a track is deleted
min_hits =1 # no. of consecutive matches needed to establish a track
tracker_list =[] # list for trackers
# list for track ID
track_id_list= deque(['1', '2', '3', '4', '5', '6', '7', '7', '8', '9', '10'])
debug = False
'''
Function to track single person
'''
def pipeline_single_tracker(det,img,otsu_box,track,draw=False):
track.frame_count+=1
org_im=img.copy()
img_dim = (img.shape[1], img.shape[0])
if debug:
print('\nFrame:', track.frame_count,' \n')
#Detect person in the image
detect_box = det.get_localization(img) # measurement
final_box=[]
improved=False
#check for small box
if len(detect_box)!=0:
detect_box=helpers.remove_small_box(detect_box[0],height_limit=150,width_limit=150)
#If Detected
if len(detect_box)!=0:
if debug:
print("Detection found")
# detect_box=detect_box[0]
if draw:
img1= helpers.draw_box_label('Org Det.',img, detect_box, box_color=(255, 0, 0))
#Tracker alive or not
if track.current_tracker!=None:
#If alive
if debug:
print("Tracker Alive")
track_box=track.current_tracker.box
#Track result matches,detection or not
#------------------------------------
#If matches
if helpers.box_iou2(track_box,detect_box)>track.detect_track_iou_thres:
#Check abnormal detect box
#Abnormal, use previous box NOTE can be improved
# detect_area=helpers.find_area(detect_box)
# track_area=helpers.find_area(track_box)
height_d,width_d=helpers.find_dim(detect_box)
height_t,width_t=helpers.find_dim(track_box)
delta=0.2
delta2=0.3
if height_d<(1-delta)*height_t or width_d<(1-delta)*width_t or height_d>(1+delta2)*height_t or width_d>(1+delta2)*width_t:
if debug:
print("Detection improved by tracker")
improved=True
detect_box=track.current_tracker.box
# detect_box=helpers.union_box(track_box,detect_box)
#Track box does not matched
elif otsu_box==True and helpers.box_ios(detect_box,track_box)>track.box_ios_thres and helpers.box_iou2(track_box,helpers.largest_contour(img))>track.otsu_iou_thres:
if debug:
print("Detect box is subset of track. Track box and Otsu are similar.")
print("Detection improved by tracker")
detect_box=track.current_tracker.box
improved=True
else:
if debug:
print("Tracker lost deleting the current tracker")
track.tracker_list.append(track.current_tracker)
track.current_tracker=None
#Improve detect_box by Otsu or any other way
if otsu_box==True:
ret,detect_box=helpers.detection_otsu(img,detect_box,draw=True,threshold=track.otsu_iou_thres)
#Update or create tracker
#Update if exist or matched
if track.current_tracker!=None:
final_box = detect_box
z = np.expand_dims(detect_box, axis=0).T
track.current_tracker.kalman_filter(z)
xx = track.current_tracker.x_state.T[0].tolist()
xx =[xx[0], xx[2], xx[4], xx[6]]
if improved:
final_box = xx
track.current_tracker.box =xx
track.current_tracker.hits += 1
track.current_tracker.no_losses =0
else:
final_box = detect_box
z = np.expand_dims(detect_box, axis=0).T
track.current_tracker = tracker.Tracker() # Create a new tracker
x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T
track.current_tracker.x_state = x
track.current_tracker.predict_only()
xx = track.current_tracker.x_state
xx = xx.T[0].tolist()
xx =[xx[0], xx[2], xx[4], xx[6]]
track.current_tracker.box =xx
track.current_tracker.id = track.track_id_list.popleft() # assign an ID for the tracker
if debug:
print("New Tracker\n ID: ",track.current_tracker.id)
#Not Detection
else:
#Tracker alive or not
#alive
if track.current_tracker!=None:
if debug:
print("Tracker Alive")
track.current_tracker.predict_only()
xx = track.current_tracker.x_state
xx = xx.T[0].tolist()
xx =[xx[0], xx[2], xx[4], xx[6]]
if otsu_box==True:
# if False:
current_state=xx
flag,current_otsu=helpers.tracker_otsu(img,current_state,draw=True,threshold=track.otsu_iou_thres)
if not flag:
if debug:
print("Tracker does not matched with Otsu box, Tracker id",track.current_tracker.id)
xx=helpers.remove_small_box(xx,height_limit=150,width_limit=150)
if len(xx)==0:
if debug:
print("Small track box. Deleting...............")
track.tracker_list.append(track.current_tracker)
track.current_tracker=None
final_box = []
else:
track.current_tracker.no_losses+=1
track.current_tracker.box =xx
final_box = xx
else:
if debug:
print("Tracker box matched with Otsu box, Tracker id",track.current_tracker.id)
track.current_tracker.no_losses += 0.5
final_box = current_otsu
current_otsu = np.expand_dims(current_otsu, axis=0).T
track.current_tracker.kalman_filter(current_otsu)
xx = track.current_tracker.x_state.T[0].tolist()
xx =[xx[0], xx[2], xx[4], xx[6]]
track.current_tracker.box =xx
else:
if debug:
print("No Otsu")
xx=helpers.remove_small_box(xx,height_limit=150,width_limit=150)
if len(xx)==0:
if debug:
print("Small track box. Deleting...............")
track.tracker_list.append(track.current_tracker)
track.current_tracker=None
final_box = []
else:
track.current_tracker.no_losses += 1
track.current_tracker.box =xx
final_box = xx
#---------------------
#Person left the frames or not
#If left
#Not left, no detection
#Not active tracker
else:
if debug:
print("No tracked Box ")
#Final box
if track.current_tracker!=None:
if ((track.current_tracker.hits >= min_hits) and (track.current_tracker.no_losses <=max_age)):
# final_box = track.current_tracker.box
if debug:
print('updated box: ', final_box)
print()
if draw:
img= helpers.draw_box_label("Final",img, final_box,show_label=True) # Draw the bounding boxes on the
img= helpers.draw_box_label(track.current_tracker.id,img, track.current_tracker.box,box_color=(255, 255,0),show_label=False) # Draw the bounding boxes on the
elif track.current_tracker.no_losses >max_age:
if debug:
print('Tracker age criteria is not satisfied. Deleting..........')
track.tracker_list.append(track.current_tracker)
track.current_tracker=None
else:
if debug:
print('Tracker zero hit')
if draw:
img= helpers.draw_box_label("Final",img, final_box,show_label=True) # Draw the bounding boxes on the
if debug:
print("Final Box")
print(final_box)
return final_box,img
def tracking_frames(detection_threshold,frames,numbers,otsu_box=True):
"""
Function to track and return boxes
"""
detector_ = detector.PersonDetector(threshold=float(detection_threshold),model_path=config.detector_model_path)
track=single_tracker()
total=len(frames)
frame_num=[]
boxes=[]
for i in tqdm.tqdm(range(total)):
frame=frames[i]
number=numbers[i]
img=cv2.imread(frame)
box,new_img = pipeline_single_tracker(detector_,img,otsu_box,track,draw=False)
if len(box)!=0:
frame_num.append(number)
boxes.append(box)
return boxes,frame_num
def tracking_frames_to_csv(detector,frames,numbers,output_path,otsu_box=True,visualize=False):
"""
Function to track and save the corner coordinates in csv
input:
frames:- list of frames path in sorted order
numbers: the sorted numbes as mentioned in the image name
Output_path: output csv path
otsu_box: True or False
visualize: True or False
"""
track=single_tracker()
filename=os.path.basename(output_path)
output_csvname=os.path.join(output_path,filename+'.csv')
outFile = open(output_csvname, 'w')
print("----------------------")
print("csv output path-",output_csvname)
print("----------------------")
outFile.write(','.join(['Frame number', 'Track box (y_up x_left y_down x_right)']) + '\n');
total=len(frames)
for i in tqdm.tqdm(range(total)):
frame=frames[i]
number=numbers[i]
img=cv2.imread(frame)
np.asarray(img)
box,new_img = pipeline_single_tracker(detector,img,otsu_box,track,draw=visualize)
if len(box)!=0:
outFile.write(','.join([str(number),box_to_str(box)]) + '\n');
if visualize:
cv2.imshow("frame",new_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
outFile.close()
cv2.destroyAllWindows()
"""
Function to track and save the video visualizations
input:
frames:- list of frames path in sorted order
Output_path:
otsu_box:
visualize:
"""
def tracking_frames_to_video(detector,frames,output_path,frame_rate=10.0,otsu_box=True,visualize=False):
track=single_tracker()
output_filename=os.path.join(output_path,'Track.avi')
print("----------------------")
print("Video output path-",output_filename)
print("----------------------")
# start=time.time()
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_filename,fourcc, frame_rate, (640,480))
for frame in tqdm.tqdm(frames):
img=cv2.imread(frame)
np.asarray(img)
# new_img = pipeline(det,img,otsu_box)
_,new_img = pipeline_single_tracker(detector,img,otsu_box,track,draw=True)
# cv2.imshow("frame",new_img)
out.write(new_img)
if visualize:
cv2.imshow("frame",new_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
out.release()
if __name__ == "__main__":
# print("Available GPU devices")
parser = argparse.ArgumentParser(description='Person tracking on TSF dataset with Tensorflow API model and Kalman Filtering')
parser.add_argument('--detection_threshold', default=0.3,
help='Threshold for object detection')
parser.add_argument('--output_type', default='csv',
help='Type of output-csv or video')
parser.add_argument('--otsu_box',default=True,
help='Set True or False for contour box localization')
parser.add_argument('--visualize',default=False,
help='It will show the tracking boxes at run time')
parser.add_argument('--debug',default=False,
help='Print the intermediate steps for each image')
args = parser.parse_args()
otsu_box=False
if args.otsu_box=='True':
print("-------------------------")
print("Using Otsu Contour box")
print("-------------------------")
otsu_box=True
if args.debug=='True':
debug=True
visualize=False
if args.visualize=='True':
print("-------------------------")
print("Viusal mode: All the frames will be shown")
print("-------------------------")
visualize=True
#Initialization of tracker and detector
detector_ = detector.PersonDetector(threshold=float(args.detection_threshold),model_path=config.detector_model_path)
#Output video frame rate
frame_rate=10.0
root_drive = config.root_drive
track_root_folder=config.track_root_folder
output_type=args.output_type
dset=config.root_folder#The name of dataset should match the folder name
output_dir=root_drive+'/'+track_root_folder+'/'+output_type
#Getting input and output folder mapping
ADL_list,Fall_list=get_mapped_dir_lists(dset,output_dir=output_dir,d_type='frame')
if len(ADL_list)==0:
print("Dataset directory not found")
#Tracking fall videos
for input_path,output_path in tqdm.tqdm(Fall_list):
print("------------------")
print("input_path:",input_path)
print("------------------")
os.makedirs(output_path, exist_ok=True)
frames = glob.glob(input_path+'/*.jpg') + glob.glob(input_path+'/*.png')
frames,numbers = sort_frames(frames, dset)
if output_type=='video':
tracking_frames_to_video(detector_,frames,output_path,frame_rate,otsu_box,visualize)
elif output_type=='csv':
tracking_frames_to_csv(detector_,frames,numbers,output_path,otsu_box,visualize)
else:
print("Invalid output_type argument")
sys.exit()
#Tracking ADL videos
for input_path,output_path in tqdm.tqdm(ADL_list):
os.makedirs(output_path, exist_ok=True)
print("------------------")
print("input_path:",input_path)
print("------------------")
frames = glob.glob(input_path+'/*.jpg') + glob.glob(input_path+'/*.png')
frames,numbers = sort_frames(frames, dset)
if output_type=='video':
#tracking_frames_to_video(tracker,detector,frames,output_path,frame_rate=10.0,otsu_box=True,visualize=False)
tracking_frames_to_video(detector,frames,output_path,frame_rate,otsu_box,visualize)
elif output_type=='csv':
tracking_frames_to_csv(detector,frames,numbers,output_path,otsu_box,visualize)
else:
print("Invalid output_type argument")
sys.exit()
|
# -*- coding: utf-8 -*-
"""
This is a standalone script that writes a csv with columns Time in UTCG, Lat,
Lon, and Alt to a great arc propagator file (.pg). Inputs of vehicle ID and
full csv path are prompted from the user. Output is a .pg file in the same
directory that can be imported into any STK object with a great arc propagator.
"""
from agi.stk12.stkengine import STKEngine
from agi.stk12.stkutil import *
import csv
# Set up STK Engine for conversion utility to EpSec
stk = STKEngine.StartApplication(noGraphics=True)
root = stk.NewObjectRoot()
converter = root.ConversionUtility
epoch = converter.ConvertDate('EpSec', 'UTCG', '0')
# Example vehicle ID: GV1
vehicleID = input('Input vehicle ID: ')
# Example path: C:\LLA.csv
csvPath = input('Input full csv path: ')
# Read input file
file = open(csvPath)
inputData = list(csv.reader(file))
file.close()
# Create output file
outputPath = csvPath.rsplit('\\', 1)[0]
ephem = open(f'{outputPath}\\{vehicleID}.pg', 'w')
# Write header
numPoints = len(inputData) - 1
header = ['stk.v.5.0\n',
'BEGIN GreatArc\n',
'Method DetVelFromTime\n',
f'TimeOfFirstWaypoint {epoch}\n',
'ArcGranularity 0.01745\n',
'AltRef Terrain\n',
f'NumberOfWaypoints {numPoints}\n',
'BEGIN Waypoints\n']
ephem.writelines(header)
# Convert time to EpSec and write data points
line_count = 0
for row in inputData:
if line_count != 0:
epSecTime = converter.ConvertDate('UTCG', 'EpSec', row[0])
ephem.write(f'{epSecTime}\t{row[1]}\t{row[2]}\t0\t0\t0\n')
line_count += 1
# Write footer
footer = ['END Waypoints\n',
'END GreatArc\n']
ephem.writelines(footer)
# Close out
ephem.close()
stk.ShutDown() |
import os
import torch
from tqdm.std import tqdm
from SRDataset import SRDataset
from SR_parser import parameter_parser
from tqdm import tqdm
from utils import save_result_pic
from models.HidingRes import HidingRes
opt = parameter_parser()
stage = "IniStage"
dataset_dir = "/home/ay3/houls/watermark_dataset/derain/"
StageDir = os.path.join(dataset_dir, stage)
mode = "train"
# "/home/ay3/houls/watermark_dataset/derain/IniStage/train"
modeStageDir = os.path.join(StageDir, mode)
testDir = os.path.join(dataset_dir, "test")
result_root = "/home/ay3/houls/Deep-Model-Watermarking/result"
result_stage = "derain_flower_Init"
result_time = "2021-09-30-11_20"
result_dir = os.path.join(result_root, result_stage, result_time)
rmodelname = "netR191.pth"
modelpath = os.path.join(result_dir, 'modelrun/outckpts', rmodelname)
# 输入到SR model中的路径
# input_dir = os.path.join(result_dir, input_name)
# input_name = "test"
# input_dir = os.path.join(dataset_dir, "test")
input_dir = os.path.join(result_root, "derain_flower_SR/2021-10-05-21_22", "SRout_two")
output_dir = os.path.join(result_root, "derain_flower_SR/2021-10-05-21_22", "SRout_Rextractone")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
netR = HidingRes(in_c=3, out_c=3)
netR.load_state_dict(torch.load(modelpath))
netR.cuda()
netR.eval()
Rdataset = SRDataset(opt, input_dir)
data_loader = torch.utils.data.DataLoader(
Rdataset, batch_size=1, shuffle=False, num_workers=int(opt.nThreads))
for i, data in tqdm(enumerate(data_loader)):
input_A = data['A'].cuda()
real_B = data['B'].cuda()
fake_B = data['B1'].cuda()
# B2 = data['B2'].cuda()
#print(f'input a size: {input_A.size()}')
this_batch_size = int(input_A.size()[0])
img_path = data['A_paths'][1]
watermark_B1 = netR(fake_B)
# watermark_B = netR(B2)
watermark_inputA = netR(input_A)
images_tensor = torch.cat([input_A, watermark_inputA, real_B, watermark_B1], axis=-1)
save_result_pic(images_tensor, img_path[0], "testSREmb", output_dir)
|
#!/usr/bin/env python
import os
import shutil
import sys
# Check if directory to be cleanup provided in command line arguments
# Else take current command line path as a default root directory path
if len(sys.argv) > 1:
rootDir = sys.argv[1]
else:
rootDir = os.getcwd()
# Defined directory to be removed
rmDirs = ["build", ".gradle", ".settings", ".cxx", ".idea", ".externalNativeBuild"]
# Defined file extensions to be removed
rmFiles = '.iml'
# Iterate to Root Directory
for root, subDirs, files in os.walk(rootDir):
for dir in subDirs:
if dir in rmDirs:
try:
shutil.rmtree(os.path.join(root, dir))
print("Delete Directory: " + os.path.join(root, dir))
except FileNotFoundError as err:
# Print error and continue
print(err)
print("Delete Failed: " + os.path.join(root, dir))
for file in files:
if file.endswith(rmFiles):
try:
os.remove(os.path.join(root, file))
print("Delete File: " + os.path.join(root, file))
except FileNotFoundError as err:
# Print error and continue
print(err)
print("Delete Failed: " + os.path.join(root, file))
|
import sys
import argparse
import RPi.GPIO as GPIO
import txaio
txaio.use_twisted()
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.task import LoopingCall
from twisted.internet.error import ReactorNotRunning
from autobahn import wamp
from autobahn.wamp.exception import ApplicationError
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.wamp import ApplicationRunner
from autobahn.twisted.choosereactor import install_reactor
def get_serial():
"""
Get the Pi's serial number.
"""
with open('/proc/cpuinfo') as fd:
for line in fd.read().splitlines():
line = line.strip()
if line.startswith('Serial'):
_, serial = line.split(':')
return '{}'.format(int(serial.strip(), 16))
class DispenserAdapter(ApplicationSession):
"""
Connects the Pi's GPIOs to WAMP.
"""
PINMODES = {
'bcm': GPIO.BCM,
'board': GPIO.BOARD
}
@inlineCallbacks
def onJoin(self, details):
self._serial = get_serial()
self._prefix = 'io.crossbar.demo.iotstarterkit.{}.dispenser'.format(self._serial)
self.log.info("Crossbar.io IoT Starterkit Serial No.: {serial}", serial=self._serial)
self.log.info("DispenserAdapter connected: {details}", details=details)
# get custom configuration
extra = self.config.extra
# Device ID and auxiliary info
self._digout_pins = extra.get("digout_pins", [])
self._digin_pins = extra.get("digin_pins", [])
self._scan_rate = extra.get("scan_rate", 30)
# init GPIO
GPIO.setwarnings(False)
pinmode = extra.get("pin_mode", "bcm")
if pinmode in DispenserAdapter.PINMODES:
GPIO.setmode(DispenserAdapter.PINMODES[pinmode])
else:
GPIO.setmode(GPIO.BCM)
GPIO.cleanup()
# setup GPIO pins
for digout_pin in self._digout_pins:
GPIO.setup(digout_pin, GPIO.OUT)
for digin_pin in self._digin_pins:
GPIO.setup(digin_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# setup pin state vectors
self._digout_state = [False for digout in self._digout_pins]
self._digin_state = [GPIO.input(digin_pin) == 1 for digin_pin in self._digin_pins]
# register methods on this object for remote calling via WAMP
for proc in [self.get_version,
self.set_digout,
self.get_digout,
self.trigger_digout,
self.toggle_digout,
self.get_digin]:
uri = '{}.{}'.format(self._prefix, proc.__name__)
yield self.register(proc, uri)
self.log.info("DispenserAdapter registered procedure {}".format(uri))
# start digin scanner
self._digin_scanner = LoopingCall(self._scan_digins)
self._digin_scanner.start(1./float(self._scan_rate))
# signal we are done with initializing our component
self.publish('{}.on_ready'.format(self._prefix))
# install a heartbeat logger
self._tick_no = 0
self._tick_loop = LoopingCall(self._tick)
self._tick_loop.start(5)
self.log.info("DispenserAdapter ready.")
def onLeave(self, details):
self.log.info("Session closed: {details}", details=details)
self.disconnect()
def onDisconnect(self):
self.log.info("Connection closed")
if self._tick_loop:
self._tick_loop.stop()
self._tick_loop = None
try:
reactor.stop()
except ReactorNotRunning:
pass
def _tick(self):
self._tick_no += 1
self.log.info('I am alive [tick {}]'.format(self._tick_no))
def get_version(self):
"""
Get Pi and board version information.
"""
version = {
'pi': GPIO.RPI_INFO,
'board': GPIO.RPI_INFO['P1_REVISION']
}
return version
def _check_digout_arg(self, digout):
if digout not in range(0, len(self._digout_pins)):
raise ApplicationError(ApplicationError.INVALID_ARGUMENT, "invalid value '{}' for digout".format(digout))
def set_digout(self, digout, state):
"""
Set a digout state.
"""
self._check_digout_arg(digout)
if type(state) != bool:
raise ApplicationError("ApplicationError.INVALID_ARGUMENT", "state must be a bool")
# only process if state acually changes
if self._digout_state[digout] != state:
self._digout_state[digout] = state
# now set the digout value
GPIO.output(self._digout_pins[digout], GPIO.HIGH if state else GPIO.LOW)
# publish WAMP event
self.publish('{}.on_digout_changed'.format(self._prefix), digout=digout, state=state)
if state:
self.log.info("digout {} asserted".format(digout))
else:
self.log.info("digout {} deasserted".format(digout))
return True
else:
return False
def get_digout(self, digout=None):
"""
Get a digout state.
"""
if digout is not None:
self._check_digout_arg(digout)
return self._digout_state[digout]
else:
return self._digout_state
def trigger_digout(self, digout, period=500):
"""
Trigger a digout.
"""
self._check_digout_arg(digout)
self.set_digout(digout, True)
def clear():
self.set_digout(digout, False)
reactor.callLater(float(period)/1000., clear)
def toggle_digout(self, digout):
"""
Toggle a digout.
"""
self._check_digout_arg(digout)
self.set_digout(digout, not self._digout_state[digout])
return self._digout_state[digout]
def _check_digin_arg(self, digin):
if digin not in range(0, len(self._digin_pins)):
raise ApplicationError("com.example.invalid_argument", "No digin with ID {}".format(digin))
def get_digin(self, digin = None):
"""
Get a digin state.
"""
if digin is not None:
self._check_digin_arg(digin)
return self._digin_state[digin]
else:
return self._digin_state
def _scan_digins(self):
for digin in range(0, len(self._digin_pins)):
# read value from digin
state = GPIO.input(self._digin_pins[digin]) == 1
# only process if state has changed
if self._digin_state[digin] != state:
self._digin_state[digin] = state
# publish WAMP event
self.publish('{}.on_digin_changed'.format(self._prefix), digin=digin, state=state)
if state:
self.log.info("digin {} state asserted".format(digin))
else:
self.log.info("digin {} state unasserted".format(digin))
if __name__ == '__main__':
# parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true', help='Enable debug output.')
parser.add_argument("--router", type=str, default="wss://demo.crossbar.io/ws", help='WAMP router URL.')
parser.add_argument("--realm", type=str, default="crossbardemo", help='WAMP router realm.')
args = parser.parse_args()
if args.debug:
txaio.start_logging(level='debug')
else:
txaio.start_logging(level='info')
# custom configuration data
extra = {
# PIN numbering mode (use "bcm" or "board")
"pin_mode": "bcm",
# these Pins are wired to digouts
"digout_pins": [13, 19, 5, 6],
# these Pins are wired to digins
"digin_pins": [20, 21],
# we will scan the digins at this rate (Hz)
"scan_rate": 50
}
# create and start app runner for our app component ..
runner = ApplicationRunner(url=args.router, realm=args.realm, extra=extra)
runner.run(DispenserAdapter, auto_reconnect=True)
|
'''
3.8.3 Common Permutation
PC/UVa IDs: 110303/10252, Popularity: A, Success rate: average
'''
import collections
def commonPermutation(s1, s2):
c1 = collections.Counter(s1)
c2 = collections.Counter(s2)
p = {}
for i in c1:
if i in c2:
if c1[i] < c2[i]:
p[i] = c1[i]
else:
p[i] = c2[i]
result = []
for i in p:
result.append(i * p[i])
result.sort()
return ''.join(result)
if __name__ == '__main__':
f = open('input.txt')
for line in f:
if line == '':
break
s1 = line.strip()
s2 = f.readline().strip()
print(commonPermutation(s1, s2))
|
import os
import plistlib
import sqlite3
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, logdevinfo, tsv, is_platform_windows
def get_dataArk(files_found, report_folder, seeker):
data_list = []
file_found = str(files_found[0])
with open(file_found, "rb") as fp:
pl = plistlib.load(fp)
for key, val in pl.items():
data_list.append((key, val))
if key == "-DeviceName":
logdevinfo(f"Device name: {val}")
if key == "-TimeZone":
logdevinfo(f"Timezone per Data Ark: {val}")
if key == "com.apple.iTunes.backup-LastBackupComputerName":
logdevinfo(f"Last backup computer name: {val}")
if key == ("com.apple.iTunes.backup-LastBackupComputerType"):
logdevinfo(f"Last backup computer type: {val}")
report = ArtifactHtmlReport('Data Ark')
report.start_artifact_report(report_folder, 'Data Ark')
report.add_script()
data_headers = ('Key','Values' )
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'Data Ark'
tsv(report_folder, data_headers, data_list, tsvname)
|
import requests
import json
import time
import sys
import os
def Salad_Earnings():
sys.stdout.write("\x1b]2;Downloading History\x07")
headers = {
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'
' Salad/0.4.0 Chrome/78.0.3904.130 Electron/7.1.9 Safari/537.36'
}
with open('config.json') as f:
js = json.load(f)
salad_auth = js['salad_key']
salad_refresh_token = js['salad_refresh_token']
cookie = {
"sAccessToken": salad_auth,
"sIdRefreshToken": salad_refresh_token
}
r = requests.get(url='https://app-api.salad.io/api/v2/reports/1-day-earning-history', cookies=cookie,
headers=headers)
jason = r.json()
with open('data.json', 'w+') as f:
f.write(json.dumps(jason))
print('Downloading data...')
time.sleep(2)
os.system('python3 utils/History_show.py --asd -f data.json --smh -min -rev')
|
import json
import sys
import os
import contextlib
from subprocess import Popen
from subprocess import check_output
from subprocess import PIPE
from toolz import first, filter, last
from .container import Container
from .utils import indent
from .utils import TemporaryDirectory
@contextlib.contextmanager
def cd(path):
"""A context manager which changes the working directory to the given
path, and then changes it back to its previous value on exit.
"""
prev_cwd = os.getcwd()
os.chdir(path)
yield
os.chdir(prev_cwd)
class Hume:
def __init__(self, image, params=None):
self._params = params
self.set_container(image)
def set_container(self, image):
self.image = image
self._container = Container(image)
@classmethod
def build(cls, tag, path=".", dockerfile=None, verbose=False):
"""Transform a plain dockerfile into a Humefile, with
on-build instructions which will create a model.
"""
with cd(path):
dockerfile = dockerfile or "./Dockerfile"
try:
with open(dockerfile, "r") as f:
dockerfile_str = f.read()
except Exception as e:
dockerfile_str = dockerfile
humefile = to_humefile(dockerfile_str)
if verbose:
print("New Humefile:")
print(indent(humefile, "@@ "))
with open("./Humefile", "w+") as f:
f.write(humefile)
cmd = ["docker", "build", "-t", tag, "-f", "Humefile", "."]
out = check_output(cmd).decode("utf-8")
if verbose:
print("Sending to `docker build`...")
print(indent(out, " "))
return Hume(tag)
def _fit_docker_cmd(self, image, build_args={}, path="."):
cmd = ["docker", "build", "-t", image]
for i,j in build_args.iteritems():
cmd += ["--build-arg", "{}={}".format(i,j)]
cmd += [path]
return cmd
def fit(self, data, target=None, params=None, image=None, tag="fitted", inplace=True, verbose=True):
"""
Parameters
----------
data : file-like object or str
Either a file object or a url string that will provide the data for
fitting in a csv format.
target : str
The label of the target to fit with.
params : dict
A dictionary of model parameters.
image : str
Override the image to fit with.
tag : str
Set the output tag. Defaults to `<image>:fitted`
inplace : bool
Swap out underlying image with fitted version. By default `True`,
mirroring `sklearn`'s behavior.
verbose : bool
Show verbose output (default: True)
Return
------
self
"""
image = image or self.image
new_image = "{}:{}".format(image, tag)
params = params if params else self._params
params_dump = json.dumps(params or "{}")
build_args = {"TRAIN_PARAMS": params_dump}
csv_ = data.read()
if target:
build_args['TARGET_LABEL'] = target
with TemporaryDirectory() as tempdir:
if verbose:
print("Creating build context for Docker image at {}".format(tempdir))
with open("traindata.csv", "w+") as f:
f.write(csv_)
build_args["TRAIN_WITH"] = f.name
with open("Dockerfile", "w+") as dckr:
dckr.write("FROM {}".format(self.image))
p = Popen(self._fit_docker_cmd(new_image, build_args), stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
p.wait()
if inplace:
self.set_container(new_image)
if verbose:
print(out)
print(err)
return self
def params(self):
cmd = ["docker", "run", "--entrypoint", "/bin/sh", self.image, "-c", 'echo "$TRAIN_PARAMS"']
return json.loads(check_output(cmd).strip() or "{}")
#TODO: This is going to be tricky...
def set_param(self, param, value):
raise NotImplementedError
def get_param(self, param):
return self.params()[param]
def predict(self, sample):
"Predict with the container"
cmd = ["docker", "run", "-i", self.image, "predict"]
if hasattr(sample, "read"):
return check_output(cmd, stdin=sample)
elif isinstance(sample, basestring):
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate(input=bytes(sample))
if err:
raise Exception(err)
p.wait()
return out
else:
raise NotImplementedError
#TODO: Delegate the above code to self._container / a docker module
# return self._container.run(["predict", out])
def entrypoint(dockerfile):
"Return the entrypoint, if declared"
f = dockerfile.split("\n")[::-1] # reverse the lines
try:
entry_line = first(filter(lambda x: "ENTRYPOINT" in x, f))
except StopIteration as e:
# No ENTRYPOINT line was found
return None
else:
res = last(entry_line.partition("ENTRYPOINT")).strip()
try:
return json.loads(res)
except:
return res.split()
return None
def fitline(entry, fit_cmd):
entry += [fit_cmd]
run_args = ['{}'.format(arg) for arg in entry]
return "ONBUILD RUN {}".format(" ".join(run_args))
def to_humefile(dockerfile, entry=None):
# Get the entrypoint (if any) to know what we need to pass
# to the ONBUILD RUN line.
entry = entry or entrypoint(dockerfile)
if not entry:
raise Exception("You must provide an entrypoint.")
out = dockerfile.split("\n")
out += [
"#<<<HUMEDOC",
"ONBUILD RUN mkdir /opt/hume",
# Data
"ONBUILD ARG TRAIN_WITH",
"ONBUILD ENV TRAIN_WITH ${TRAIN_WITH}",
"ONBUILD ENV HUME_TRAIN_DATA /opt/hume/data",
"ONBUILD COPY ${TRAIN_WITH} $HUME_TRAIN_DATA",
# Parameters
"ONBUILD ARG TRAIN_PARAMS=",
'ONBUILD ENV TRAIN_PARAMS "${TRAIN_PARAMS}"',
"ONBUILD ENV HUME_PARAMS_FILE /opt/hume/params",
"ONBUILD RUN echo $TRAIN_PARAMS",
'ONBUILD RUN echo $TRAIN_PARAMS > $HUME_PARAMS_FILE',
# Target label
"ONBUILD ARG TARGET_LABEL=target",
"ONBUILD ENV HUME_TARGET_LABEL ${TARGET_LABEL}",
fitline(entry, 'fit'),
"#HUMEDOC\n"
]
return "\n".join(out)
if __name__ == "__main__":
main()
|
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Programming
class _Framework(_Programming):
_type = "framework"
_icon_dir = "resources/programming/framework"
class Angular(_Framework):
_icon = "angular.png"
class Backbone(_Framework):
_icon = "backbone.png"
class Django(_Framework):
_icon = "django.png"
class Ember(_Framework):
_icon = "ember.png"
class Fastapi(_Framework):
_icon = "fastapi.png"
class Flask(_Framework):
_icon = "flask.png"
class Flutter(_Framework):
_icon = "flutter.png"
class Laravel(_Framework):
_icon = "laravel.png"
class Micronaut(_Framework):
_icon = "micronaut.png"
class Rails(_Framework):
_icon = "rails.png"
class React(_Framework):
_icon = "react.png"
class Spring(_Framework):
_icon = "spring.png"
class Vue(_Framework):
_icon = "vue.png"
# Aliases
FastAPI = Fastapi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.