content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from sklearn.preprocessing import MaxAbsScaler
from tpot.builtins import StackingEstimator
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'].values, random_state=None)
# Average CV score on the training set was:-2018.8830873326278
exported_pipeline = make_pipeline(
MaxAbsScaler(),
StackingEstimator(estimator=ExtraTreesRegressor(bootstrap=False, max_features=0.9500000000000001, min_samples_leaf=6, min_samples_split=5, n_estimators=100)),
GradientBoostingRegressor(alpha=0.75, learning_rate=0.5, loss="huber", max_depth=4, max_features=0.7500000000000001, min_samples_leaf=10, min_samples_split=9, n_estimators=100, subsample=0.9000000000000001)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
|
python
|
# 454. 4Sum II
import collections
class Solution:
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
ab = {}
for a in A:
for b in B:
ab[a+b] = ab.get(a+b, 0) + 1
count = 0
for c in C:
for d in D:
count += ab.get(-c-d, 0)
return count
# short solution
def fourSumCount2(self, A, B, C, D):
ab = collections.Counter([a+b for a in A for b in B])
return sum([ab[-c-d] for c in C for d in D])
sol = Solution()
print(sol.fourSumCount2([1,2], [-2,-1], [-1,2], [0,2]))
|
python
|
import soundfile as sf
import numpy as np
import librosa
from scipy import signal
import cPickle
import src.config as cfg
def to_mono(wav):
if wav.ndim == 1:
return wav
elif wav.ndim == 2:
return np.mean(wav, axis=-1)
def calculate_logmel(rd_fd):
wav, fs = sf.read(rd_fd)
wav = to_mono(wav)
#assert fs == cfg.fs
ham_win = np.hamming(cfg.n_fft)
[f, t, x] = signal.spectral.spectrogram(x=wav,
window=ham_win,
nperseg=cfg.n_fft,
noverlap=0,
detrend=False,
return_onesided=True,
mode='magnitude') #Compute a spectrogram with consecutive Fourier transforms.
x = x.T
print x.shape
if globals().get('melW') is None:
global melW
melW = librosa.filters.mel(sr=fs,
n_fft=cfg.n_fft,
n_mels=64,
fmin=0.,
fmax=22100)
x = np.dot(x, melW.T)
x = np.log(x + 1e-8)
print x
rd_fd +=".f"
cPickle.dump(x, open(rd_fd , 'wb'), protocol=cPickle.HIGHEST_PROTOCOL)
def make_pred(rd_path):
calculate_logmel(rd_path)
import kera_pred
msg = kera_pred.others(rd_path+".f",cfg.ld_md)
return msg
|
python
|
from datetime import datetime as dt
from ..ff.window import Window
class BrowserSession(object):
def __init__(self, ss_json):
self._windows = WindowSet(ss_json["windows"])
self._start_time = dt.fromtimestamp(ss_json["session"]["startTime"] / 1000)
self._selected_window = ss_json["selectedWindow"] - 1
return
def __repr__(self):
n_win = len(self.windows)
return f"BrowserSession of {n_win} windows, since {self.start_time}"
@property
def windows(self):
return self._windows
@property
def start_time(self):
return self._start_time
class WindowSet(list):
"""
A class which reads the 'windows' of a Firefox recovery JSON file, and
instantiates a set of Window classes for each of the listed entries.
"""
def __init__(self, json_list):
self.extend([Window(j) for j in json_list])
return
def __repr__(self):
n_win = len(self)
window_reprs = "\n\n".join([str(w) for w in self])
return f"WindowSet of {n_win} windows\n\n{window_reprs}"
|
python
|
import numpy as np;
import xlrd;
import xlwt;
sheet = xlrd.open_workbook('data1.xls');
workbook = xlwt.Workbook(encoding = 'ascii');
worksheet = workbook.add_sheet('school')
data = sheet.sheets()[0];
row = data.nrows;
col = data.ncols;
for i in range(0,row):
for j in range(0,col):
worksheet.write(i, j, label = str(data.cell(i, j).value).split('.')[0] );
workbook.save('/Users/tinoryj/Desktop/ans.xls');
|
python
|
import json
import sys
import os
import typer
from typing import List
import time
from .logger import get_logger
from .config.imports import check_imports
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logger = get_logger()
cli = typer.Typer()
@cli.command('setup')
def setup_auto():
chk_libs = ['tensorflow', 'torch', 'transformers']
typer.echo(f'Setting Up Libraries and Checking Installed')
installed_libs = check_imports()
for lib in chk_libs:
_is_installed = f'{lib} - {installed_libs[lib]} is installed' if installed_libs[lib] else f'{lib} is not not installed'
typer.echo(_is_installed)
if typer.confirm(f"Update {lib}?"):
os.system(f'pip install -q --upgrade {lib}')
if __name__ == "__main__":
cli()
|
python
|
import re
import matplotlib.pyplot as plt
import numpy as np
import sklearn
from sklearn import svm
from sklearn import linear_model, tree
from sklearn.ensemble import RandomForestClassifier
from sklearn import neural_network
import copy
path = r"D:\THU\curriculum\Software Engineering\Course_Project\data_1109_0427_RGB_fixed.txt"
f = open(path, "r")
#Read data from file
channels = []
for line in f:
data = re.split(" ", line)
data = [float(i) for i in data]
channels.append(data)
#Smoothing
length = len(channels[0])
smoothed = copy.deepcopy(channels)
for i in [2,3,4]:
for j in range(length):
smoothed[i][j] = np.mean(channels[i][max(j - 5, 0):min(length - 1, j + 5)])
#Specify each channel
time = channels[0]
index = np.arange(len(time))
ppg = channels[1]
colors = np.transpose(smoothed[3:])
#Split the training set and test set
training_data = colors[:int(len(colors)*0.9)]
training_label = ppg[:int(len(colors)*0.9)]
test_data = colors[int(len(colors)*0.9):]
test_label = ppg[int(len(colors)*0.9):]
#Rescale the ppg data into the range of 0-1
training_label = np.divide(np.subtract(training_label,50),200)
test_label = np.divide(np.subtract(test_label,50),200)
#reg = linear_model.LinearRegression()
reg = linear_model.BayesianRidge()
reg.fit(training_data, training_label)
predicted_label = np.add(np.matmul(test_data, reg.coef_), reg.intercept_)
plt.plot(index[:200], test_label[:200])
plt.show()
plt.plot(index[:200], np.transpose(test_data)[0][:200])
plt.show()
print(np.shape(training_label))
plt.plot(index[:200], np.transpose(test_data)[1][:200])
plt.show()
print(np.shape(training_label))
plt.plot(index[:200], predicted_label[:200])
plt.show()
print(np.shape(training_label))
print(reg.coef_, reg.intercept_)
|
python
|
import csv
import re
from os import path
import numpy as np
FIELDNAMES = 'timeStamp', 'response_time', 'request_name', "status_code", "responseMessage", "threadName", "dataType",\
"success", "failureMessage", "bytes", "sentBytes", "grpThreads", "allThreads", "URL", "Latency",\
"IdleTime", "Connect"
class JTLParser(object):
def parse_jtl(self):
log_file = "/tmp/reports/jmeter.jtl"
unparsed_counter = 0
requests = {}
if not path.exists(log_file):
return requests
start_timestamp, end_timestamp = float('inf'), 0
with open(log_file, 'r+', encoding="utf-8") as tsv:
entries = csv.DictReader(tsv, delimiter=",", fieldnames=FIELDNAMES, restval="not_found")
for entry in entries:
try:
if entry['request_name'] != 'label':
if re.search(r'-\d+$', entry['request_name']):
continue
if start_timestamp > int(entry['timeStamp']):
start_timestamp = int(entry['timeStamp']) - int(entry['response_time'])
if end_timestamp < int(entry['timeStamp']):
end_timestamp = int(entry['timeStamp'])
if entry['request_name'] not in requests:
data = {'request_name': entry['request_name'],
'response_time': [int(entry['response_time'])]}
if entry['success'] == 'true':
data['OK'], data['KO'] = 1, 0
else:
data['OK'], data['KO'] = 0, 1
requests[entry['request_name']] = data
else:
requests[entry['request_name']]['response_time'].append(int(entry['response_time']))
if entry['success'] == 'true':
requests[entry['request_name']]['OK'] += 1
else:
requests[entry['request_name']]['KO'] += 1
except Exception as e:
print(e)
unparsed_counter += 1
pass
if unparsed_counter > 0:
print("Unparsed errors: %d" % unparsed_counter)
for req in requests:
requests[req]['response_time'] = int(np.percentile(requests[req]['response_time'], 95, interpolation="linear"))
duration = int((end_timestamp - start_timestamp)/1000)
throughput = self.calculate_throughput(requests, duration)
error_rate = self.calculate_error_rate(requests)
results = {"requests": requests, "throughput": throughput, "error_rate": error_rate}
return results
@staticmethod
def calculate_throughput(requests, duration):
count = 0
for req in requests:
count += requests[req]['OK']
return round(float(count/duration), 2)
@staticmethod
def calculate_error_rate(requests):
count, failed = 0, 0
for req in requests:
count += requests[req]['OK']
count += requests[req]['KO']
failed += requests[req]['KO']
return round(float(failed/count) * 100, 2)
|
python
|
from setuptools import setup, find_packages
setup(
name='taxies',
version='0.1.dev',
packages=find_packages(),
include_package_data=True,
install_requires=[
'Click',
],
entry_points='''
[console_scripts]
taxies=taxies.scripts.taxies:cli
''',
)
|
python
|
"""App.
"""
import logging
import sys
from django.apps import AppConfig
from configs.part_detection import DF_PD_VIDEO_SOURCE_IS_OPENCV
logger = logging.getLogger(__name__)
class AzurePartDetectionConfig(AppConfig):
"""App Config."""
name = "vision_on_edge.azure_part_detections"
def ready(self):
"""ready."""
if "runserver" in sys.argv:
# pylint: disable=unused-import, import-outside-toplevel
logger.info("ready while running server")
logger.info("Importing Signals")
from ..azure_part_detections.models import PartDetection, PDScenario
from ..azure_projects.models import Project
from ..cameras.models import Camera
from ..inference_modules.models import InferenceModule
from . import signals # noqa: F401
# pylint: enable=unused-import, import-outside-toplevel
create_demo = True
if create_demo:
project_obj = Project.objects.filter(is_demo=False).first()
inference_obj = InferenceModule.objects.first()
else:
project_obj = inference_obj = None
if PartDetection.objects.count() == 0:
PartDetection.objects.create(
name="Part Detection",
project=project_obj,
inference_module=inference_obj,
inference_source=(
"opencv" if DF_PD_VIDEO_SOURCE_IS_OPENCV else "lva"
),
)
PDScenario.objects.all().delete()
# =============================================
# Simple Part Detection ===
# =============================================
pd_scenario = PDScenario.objects.create(
name="Simple Part Detection",
inference_mode="PD",
project=Project.objects.get(name="Demo Part Detection Project"),
)
pd_scenario.parts.set(
Project.objects.get(
is_demo=True, name="Demo Part Detection Project"
).part_set.all()
)
# =============================================
# Part Counting ===
# =============================================
pc_scenario = PDScenario.objects.create(
name="Counting objects",
inference_mode="PC",
project=Project.objects.get(name="Demo Part Counting Project"),
)
pc_scenario.cameras.set(
Camera.objects.filter(
is_demo=True, name="Scenario 1 - Counting Objects"
)
)
pc_scenario.parts.set(
Project.objects.get(
is_demo=True, name="Demo Part Counting Project"
).part_set.all()
)
# =============================================
# Employee safety ===
# =============================================
es_scenario = PDScenario.objects.create(
name="Employee safety",
inference_mode="ES",
project=Project.objects.get(name="Demo Employee Safety Project"),
)
es_scenario.cameras.set(
Camera.objects.filter(is_demo=True, name="Scenario 2 - Employ Safety")
)
es_scenario.parts.set(
Project.objects.get(
is_demo=True, name="Demo Employee Safety Project"
).part_set.all()
)
# =============================================
# Defect Detection ===
# =============================================
dd_scenario = PDScenario.objects.create(
name="Defect detection",
inference_mode="DD",
project=Project.objects.get(name="Demo Defect Detection Project"),
)
dd_scenario.cameras.set(
Camera.objects.filter(
is_demo=True, name="Scenario 3 - Defect Detection"
)
)
dd_scenario.parts.set(
Project.objects.get(
is_demo=True, name="Demo Defect Detection Project"
).part_set.all()
)
# =============================================
# Empty Shelf Alert ===
# =============================================
esa_scenario = PDScenario.objects.create(
name="Empty shelf alert",
inference_mode="ESA",
project=Project.objects.get(name="Demo Empty Shelf Alert Project"),
)
esa_scenario.cameras.set(
Camera.objects.filter(
is_demo=True, name="Scenario 4 - Empty Shelf Alert"
)
)
esa_scenario.parts.set(
Project.objects.get(
is_demo=True, name="Demo Empty Shelf Alert Project"
).part_set.all()
)
# =============================================
# Total Customer Counting ===
# =============================================
tcc_scenario = PDScenario.objects.create(
name="People counting",
inference_mode="TCC",
project=Project.objects.get(
name="Demo Total Customer Counting Project"
),
)
tcc_scenario.cameras.set(
Camera.objects.filter(
is_demo=True, name="Scenario 5 - Total Customer Counting"
)
)
tcc_scenario.parts.set(
Project.objects.get(
is_demo=True, name="Demo Total Customer Counting Project"
).part_set.all()
)
# =============================================
# Crowded Queue Alert ===
# =============================================
cqa_scenario = PDScenario.objects.create(
name="Crowded queue alert",
inference_mode="CQA",
project=Project.objects.get(name="Demo Crowded Queue Alert Project"),
)
cqa_scenario.cameras.set(
Camera.objects.filter(
is_demo=True, name="Scenario 6 - Crowded Queue Alert"
)
)
cqa_scenario.parts.set(
Project.objects.get(
is_demo=True, name="Demo Crowded Queue Alert Project"
).part_set.all()
)
|
python
|
"""
Query suggestion hierarchical encoder-decoder code.
The code is inspired from nmt encdec code in groundhog
but we do not rely on groundhog infrastructure.
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Alessandro Sordoni")
__contact__ = "Alessandro Sordoni <[email protected]>"
import theano
import theano.tensor as T
import numpy as np
import cPickle
import logging
logger = logging.getLogger(__name__)
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.nnet.conv3d2d import *
from collections import OrderedDict
from model import *
from utils import *
import operator
# Theano speed-up
theano.config.scan.allow_gc = False
def add_to_params(params, new_param):
params.append(new_param)
return new_param
class EncoderDecoderBase():
def __init__(self, state, rng, parent):
self.rng = rng
self.parent = parent
self.state = state
self.__dict__.update(state)
self.session_rec_activation = eval(self.session_rec_activation)
self.query_rec_activation = eval(self.query_rec_activation)
self.params = []
class Encoder(EncoderDecoderBase):
def init_params(self):
""" sent weights """
self.W_emb = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.idim, self.rankdim), name='W_emb'))
self.W_in = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in'))
self.W_hh = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='W_hh'))
self.b_hh = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_hh'))
if self.query_step_type == "gated":
self.W_in_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in_r'))
self.W_in_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in_z'))
self.W_hh_r = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='W_hh_r'))
self.W_hh_z = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='W_hh_z'))
self.b_z = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_z'))
self.b_r = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_r'))
""" Context weights """
self.Ws_in = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, self.sdim), name='Ws_in'))
self.Ws_hh = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.sdim, self.sdim)), name='Ws_hh'))
self.bs_hh = add_to_params(self.params, theano.shared(value=np.zeros((self.sdim,), dtype='float32'), name='bs_hh'))
if self.session_step_type == "gated":
self.Ws_in_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, self.sdim), name='Ws_in_r'))
self.Ws_in_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, self.sdim), name='Ws_in_z'))
self.Ws_hh_r = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.sdim, self.sdim)), name='Ws_hh_r'))
self.Ws_hh_z = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.sdim, self.sdim)), name='Ws_hh_z'))
self.bs_z = add_to_params(self.params, theano.shared(value=np.zeros((self.sdim,), dtype='float32'), name='bs_z'))
self.bs_r = add_to_params(self.params, theano.shared(value=np.zeros((self.sdim,), dtype='float32'), name='bs_r'))
def plain_query_step(self, x_t, m_t, h_tm1, hr_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
h_t = self.query_rec_activation(T.dot(x_t, self.W_in) + T.dot(hr_tm1, self.W_hh) + self.b_hh)
hr_t = m_t * h_t
return h_t, hr_t,
def gated_query_step(self, x_t, m_t, h_tm1, hr_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
r_t = T.nnet.sigmoid(T.dot(x_t, self.W_in_r) + T.dot(hr_tm1, self.W_hh_r) + self.b_r)
z_t = T.nnet.sigmoid(T.dot(x_t, self.W_in_z) + T.dot(hr_tm1, self.W_hh_z) + self.b_z)
h_tilde = self.query_rec_activation(T.dot(x_t, self.W_in) + T.dot(r_t * hr_tm1, self.W_hh) + self.b_hh)
h_t = (np.float32(1.0) - z_t) * hr_tm1 + z_t * h_tilde
hr_t = m_t * h_t
# return both reset state and non-reset state
return h_t, hr_t, r_t, z_t, h_tilde
def plain_session_step(self, h_t, m_t, hs_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
hs_update = self.session_rec_activation(T.dot(h_t, self.Ws_in) + T.dot(hs_tm1, self.Ws_hh) + self.bs_hh)
hs_t = (m_t) * hs_tm1 + (1 - m_t) * hs_update
return hs_t,
def gated_session_step(self, h_t, m_t, hs_tm1):
rs_t = T.nnet.sigmoid(T.dot(h_t, self.Ws_in_r) + T.dot(hs_tm1, self.Ws_hh_r) + self.bs_r)
zs_t = T.nnet.sigmoid(T.dot(h_t, self.Ws_in_z) + T.dot(hs_tm1, self.Ws_hh_z) + self.bs_z)
hs_tilde = self.session_rec_activation(T.dot(h_t, self.Ws_in) + T.dot(rs_t * hs_tm1, self.Ws_hh) + self.bs_hh)
hs_update = (np.float32(1.) - zs_t) * hs_tm1 + zs_t * hs_tilde
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
hs_t = (m_t) * hs_tm1 + (1 - m_t) * hs_update
return hs_t, hs_tilde, rs_t, zs_t
def approx_embedder(self, x):
return self.W_emb[x]
def build_encoder(self, x, xmask=None, **kwargs):
one_step = False
if len(kwargs):
one_step = True
# if x.ndim == 2 then
# x = (n_steps, batch_size)
if x.ndim == 2:
batch_size = x.shape[1]
# else x = (word_1, word_2, word_3, ...)
# or x = (last_word_1, last_word_2, last_word_3, ..)
# in this case batch_size is
else:
batch_size = 1
# if it is not one_step then we initialize everything to 0
if not one_step:
h_0 = T.alloc(np.float32(0), batch_size, self.qdim)
hr_0 = T.alloc(np.float32(0), batch_size, self.qdim)
hs_0 = T.alloc(np.float32(0), batch_size, self.sdim)
# in sampling mode (i.e. one step) we require
else:
# in this case x.ndim != 2
assert x.ndim != 2
assert 'prev_h' in kwargs
assert 'prev_hr' in kwargs
assert 'prev_hs' in kwargs
h_0 = kwargs['prev_h']
hr_0 = kwargs['prev_hr']
hs_0 = kwargs['prev_hs']
xe = self.approx_embedder(x)
if xmask == None:
xmask = T.neq(x, self.eoq_sym)
# Gated Encoder
if self.query_step_type == "gated":
f_enc = self.gated_query_step
o_enc_info = [h_0, hr_0, None, None, None]
else:
f_enc = self.plain_query_step
o_enc_info = [h_0, hr_0]
if self.session_step_type == "gated":
f_hier = self.gated_session_step
o_hier_info = [hs_0, None, None, None]
else:
f_hier = self.plain_session_step
o_hier_info = [hs_0]
# Run through all the sentence (encode everything)
if not one_step:
_res, _ = theano.scan(
f_enc, sequences=[xe, xmask], outputs_info=o_enc_info)
# Make just one step further
else:
_res = f_enc(xe, xmask, h_0, hr_0)
# Get the hidden state sequence
h = _res[0]
hr = _res[1]
# All hierarchical sentence
# The hs sequence is based on the original mask
if not one_step:
_res, _ = theano.scan(
f_hier, sequences=[h, xmask], outputs_info=o_hier_info)
# Just one step further
else:
_res = f_hier(h, xmask, hs_0)
if isinstance(_res, list) or isinstance(_res, tuple):
hs = _res[0]
else:
hs = _res
return (h, hr), hs, (_res[2], _res[3])
def __init__(self, state, rng, parent):
EncoderDecoderBase.__init__(self, state, rng, parent)
self.init_params()
class Decoder(EncoderDecoderBase):
EVALUATION = 0
BEAM_SEARCH = 1
def __init__(self, state, rng, parent, encoder):
EncoderDecoderBase.__init__(self, state, rng, parent)
# Take as input the encoder instance for the embeddings..
# To modify in the future
self.encoder = encoder
self.trng = MRG_RandomStreams(self.seed)
self.init_params()
def init_params(self):
""" Decoder weights """
self.Wd_emb = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.idim, self.rankdim), name='Wd_emb'))
self.Wd_hh = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='Wd_hh'))
self.Wd_in = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='Wd_in'))
self.bd_hh = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_hh'))
self.Wd_s_0 = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_0'))
self.bd_s_0 = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_s_0'))
if self.decoder_bias_type == 'all':
self.Wd_s_q = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_q'))
if self.query_step_type == "gated":
self.Wd_in_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='Wd_in_r'))
self.Wd_in_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='Wd_in_z'))
self.Wd_hh_r = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='Wd_hh_r'))
self.Wd_hh_z = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='Wd_hh_z'))
self.bd_r = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_r'))
self.bd_z = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_z'))
if self.decoder_bias_type == 'all':
self.Wd_s_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_z'))
self.Wd_s_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_r'))
out_target_dim = self.qdim
if not self.maxout_out:
out_target_dim = self.rankdim
self.Wd_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, out_target_dim), name='Wd_out'))
self.bd_out = add_to_params(self.params, theano.shared(value=np.zeros((self.idim,), dtype='float32'), name='bd_out'))
# Set up deep output
if self.deep_out:
self.Wd_e_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, out_target_dim), name='Wd_e_out'))
self.bd_e_out = add_to_params(self.params, theano.shared(value=np.zeros((out_target_dim,), dtype='float32'), name='bd_e_out'))
if self.decoder_bias_type != 'first':
self.Wd_s_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, out_target_dim), name='Wd_s_out'))
""" Rank """
if hasattr(self, 'train_rank'):
self.Wr_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, 1), name='Wr_out'))
self.br_out = add_to_params(self.params, theano.shared(value=np.zeros((1,), dtype='float32'), name='br_out'))
def build_rank_layer(self, hs):
return T.dot(hs, self.Wr_out) + self.br_out
def build_output_layer(self, hs, xd, hd):
pre_activ = T.dot(hd, self.Wd_out)
if self.deep_out:
pre_activ += T.dot(xd, self.Wd_e_out) + self.bd_e_out
if self.decoder_bias_type != 'first':
pre_activ += T.dot(hs, self.Wd_s_out)
# ^ if bias all, bias the deep output
if self.maxout_out:
pre_activ = Maxout(2)(pre_activ)
return pre_activ
def build_next_probs_predictor(self, hs, x, prev_hd):
"""
Return output probabilities given prev_words x, hierarchical pass hs, and previous hd
hs should always be the same (and should not be updated).
"""
return self.build_decoder(hs, x, mode=Decoder.BEAM_SEARCH, prev_hd=prev_hd)
def approx_embedder(self, x):
# Here we use the same embeddings learnt in the encoder !!!
return self.encoder.approx_embedder(x)
def output_softmax(self, pre_activ):
# returns a (timestep, bs, idim) matrix (huge)
return SoftMax(T.dot(pre_activ, self.Wd_emb.T) + self.bd_out)
def build_decoder(self, hs, x, xmask=None, y=None, y_neg=None, mode=EVALUATION, prev_hd=None, step_num=None):
# Check parameter consistency
if mode == Decoder.EVALUATION:
assert not prev_hd
assert y
else:
assert not y
assert prev_hd
# if mode == EVALUATION
# xd = (timesteps, batch_size, qdim)
#
# if mode != EVALUATION
# xd = (n_samples, dim)
xd = self.approx_embedder(x)
if not xmask:
xmask = T.neq(x, self.eoq_sym)
# we must zero out the </s> embedding
# i.e. the embedding x_{-1} is the 0 vector
# as well as hd_{-1} which will be reseted in the scan functions
if xd.ndim != 3:
assert mode != Decoder.EVALUATION
xd = (xd.dimshuffle((1, 0)) * xmask).dimshuffle((1, 0))
else:
assert mode == Decoder.EVALUATION
xd = (xd.dimshuffle((2,0,1)) * xmask).dimshuffle((1,2,0))
# Run the decoder
if mode == Decoder.EVALUATION:
hd_init = T.alloc(np.float32(0), x.shape[1], self.qdim)
else:
hd_init = prev_hd
if self.query_step_type == "gated":
f_dec = self.gated_step
o_dec_info = [hd_init, None, None, None]
else:
f_dec = self.plain_step
o_dec_info = [hd_init]
# If the mode of the decoder is EVALUATION
# then we evaluate by default all the sentence
# xd - i.e. xd.ndim == 3, xd = (timesteps, batch_size, qdim)
if mode == Decoder.EVALUATION:
_res, _ = theano.scan(f_dec,
sequences=[xd, xmask, hs],\
outputs_info=o_dec_info)
# else we evaluate only one step of the recurrence using the
# previous hidden states and the previous computed hierarchical
# states.
else:
_res = f_dec(xd, xmask, hs, prev_hd)
if isinstance(_res, list) or isinstance(_res, tuple):
hd = _res[0]
else:
hd = _res
pre_activ = self.build_output_layer(hs, xd, hd)
# EVALUATION : Return target_probs + all the predicted ranks
# target_probs.ndim == 3
if mode == Decoder.EVALUATION:
target_probs = GrabProbs(self.output_softmax(pre_activ), y)
return target_probs, hd, _res
# BEAM_SEARCH : Return output (the softmax layer) + the new hidden states
elif mode == Decoder.BEAM_SEARCH:
return self.output_softmax(pre_activ), hd
def gated_step(self, xd_t, m_t, hs_t, hd_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
hd_tm1 = (m_t) * hd_tm1 + (1 - m_t) * self.query_rec_activation(T.dot(hs_t, self.Wd_s_0) + self.bd_s_0)
# hd_{t - 1} = tanh(W_s_0 hs_t + bd_s_0) else hd_{t - 1} is left unchanged (m_t = 1)
# In the 'all' decoder bias type each hidden state of the decoder
# RNN receives the hs_t vector as bias without modification
if self.decoder_bias_type == 'all':
rd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_r) + T.dot(hd_tm1, self.Wd_hh_r) + T.dot(hs_t, self.Wd_s_r) + self.bd_r)
zd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_z) + T.dot(hd_tm1, self.Wd_hh_z) + T.dot(hs_t, self.Wd_s_z) + self.bd_z)
hd_tilde = self.query_rec_activation(T.dot(xd_t, self.Wd_in)
+ T.dot(rd_t * hd_tm1, self.Wd_hh)
+ T.dot(hs_t, self.Wd_s_q)
+ self.bd_hh)
hd_t = (np.float32(1.) - zd_t) * hd_tm1 + zd_t * hd_tilde
output = (hd_t, rd_t, zd_t, hd_tilde)
else:
# Do not bias all the decoder (force to store very useful information in the first state)
rd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_r) + T.dot(hd_tm1, self.Wd_hh_r) + self.bd_r)
zd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_z) + T.dot(hd_tm1, self.Wd_hh_z) + self.bd_z)
hd_tilde = self.query_rec_activation(T.dot(xd_t, self.Wd_in)
+ T.dot(rd_t * hd_tm1, self.Wd_hh)
+ self.bd_hh)
hd_t = (np.float32(1.) - zd_t) * hd_tm1 + zd_t * hd_tilde
output = (hd_t, rd_t, zd_t, hd_tilde)
return output
def plain_step(self, xd_t, m_t, hs_t, hd_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
# We already assume that xd are zeroed out
hd_tm1 = (m_t) * hd_tm1 + (1 - m_t) * self.query_rec_activation(T.dot(hs_t, self.Wd_s_0) + self.bd_s_0)
# ^ iff x_{t - 1} = </s> (m_t = 0) then x_{t-1} = 0
# and hd_{t - 1} = 0 else hd_{t - 1} is left unchanged (m_t = 1)
if self.decoder_bias_type == 'first':
# Do not bias all the decoder (force to store very useful information in the first state)
hd_t = self.query_rec_activation( T.dot(xd_t, self.Wd_in)
+ T.dot(hd_tm1, self.Wd_hh)
+ self.bd_hh )
output = (hd_t,)
elif self.decoder_bias_type == 'all':
hd_t = self.query_rec_activation( T.dot(xd_t, self.Wd_in)
+ T.dot(hd_tm1, self.Wd_hh)
+ T.dot(hs_t, self.Wd_s_q)
+ self.bd_hh )
output = (hd_t,)
return output
####
class SessionEncoderDecoder(Model):
def indices_to_words(self, seq, exclude_start_end=False):
"""
Converts a list of words to a list
of word ids. Use unk_sym if a word is not
known.
"""
def convert():
for word_index in seq:
if word_index > len(self.idx_to_str):
raise ValueError('Word index is too large for the model vocabulary!')
if word_index == self.eos_sym:
break
if not exclude_start_end or (word_index != self.eoq_sym and word_index != self.soq_sym):
yield self.idx_to_str[word_index]
return list(convert())
def words_to_indices(self, seq):
"""
Converts a list of words to a list
of word ids. Use unk_sym if a word is not
known.
"""
return [self.str_to_idx.get(word, self.unk_sym) for word in seq]
def compute_updates(self, training_cost, params):
updates = []
grads = T.grad(training_cost, params)
grads = OrderedDict(zip(params, grads))
# Clip stuff
c = numpy.float32(self.cutoff)
clip_grads = []
norm_gs = T.sqrt(sum(T.sum(g ** 2) for p, g in grads.items()))
normalization = T.switch(T.ge(norm_gs, c), c / norm_gs, np.float32(1.))
notfinite = T.or_(T.isnan(norm_gs), T.isinf(norm_gs))
for p, g in grads.items():
clip_grads.append((p, T.switch(notfinite, numpy.float32(.1) * p, g * normalization)))
grads = OrderedDict(clip_grads)
if self.updater == 'adagrad':
updates = Adagrad(grads, self.lr)
elif self.updater == 'sgd':
raise Exception("Sgd not implemented!")
elif self.updater == 'adadelta':
updates = Adadelta(grads)
elif self.updater == 'rmsprop':
updates = RMSProp(grads, self.lr)
elif self.updater == 'adam':
updates = Adam(grads)
else:
raise Exception("Updater not understood!")
return updates
def build_train_function(self):
if not hasattr(self, 'train_fn'):
# Compile functions
logger.debug("Building train function")
self.train_fn = theano.function(
inputs=[self.x_data, self.x_ranks, self.x_max_length, self.x_cost_mask],
outputs=self.training_cost, updates=self.updates, name="train_fn")
return self.train_fn
def build_eval_function(self):
if not hasattr(self, 'eval_fn'):
# Compile functions
logger.debug("Building evaluation function")
self.eval_fn = theano.function(inputs=[self.x_data, self.x_ranks, self.x_max_length, self.x_cost_mask],
outputs=self.training_cost, name="eval_fn")
return self.eval_fn
def build_score_function(self):
if not hasattr(self, 'score_fn'):
self.score_fn = theano.function(
inputs=[self.x_data, self.x_max_length],
outputs=[self.per_example_cost],
name="score_fn")
return self.score_fn
def build_rank_prediction_function(self):
if not hasattr(self, 'rank_fn'):
(h, hr), hs, _ = self.encoder.build_encoder(self.aug_x_data)
ranks = self.decoder.build_rank_layer(hs)
self.rank_fn = theano.function(
inputs=[self.x_data],
outputs=[ranks],
name="rank_fn")
return self.rank_fn
def build_get_states_function(self):
if not hasattr(self, 'get_states_fn'):
# Compile functions
logger.debug("Get states of the network")
outputs = [self.h, self.hs, self.hd, self.rs, self.us] + [x for x in self.decoder_states]
self.get_states_fn = theano.function(inputs=[self.x_data, self.x_max_length],
outputs=outputs, name="get_states_fn")
return self.get_states_fn
def build_next_probs_function(self):
if not hasattr(self, 'next_probs_fn'):
outputs, hd = self.decoder.build_next_probs_predictor(
self.beam_hs, self.beam_source, prev_hd=self.beam_hd)
self.next_probs_fn = theano.function(
inputs=[self.beam_hs, self.beam_source, self.beam_hd],
outputs=[outputs, hd],
name="next_probs_fn")
return self.next_probs_fn
def build_first_vector(self):
if not hasattr(self, 'first_vec_fn'):
(h, hr), hs, _ = self.encoder.build_encoder(self.aug_x_data)
hd0 = self.decoder.query_rec_activation(T.dot(hs, self.decoder.Wd_s_0) + self.decoder.bd_s_0)
self.first_vec_fn = theano.function(inputs=[self.x_data],
outputs=[h, hs, hd0], name="first_vec_fn")
return self.first_vec_fn
def build_encoder_function(self):
if not hasattr(self, 'encoder_fn'):
(h, hr), hs, _ = self.encoder.build_encoder(self.aug_x_data)
self.encoder_fn = theano.function(inputs=[self.x_data],
outputs=[h, hr, hs], name="encoder_fn")
return self.encoder_fn
def __init__(self, state):
Model.__init__(self)
self.state = state
# Compatibility towards older models
self.__dict__.update(state)
self.rng = numpy.random.RandomState(state['seed'])
# Load dictionary
raw_dict = cPickle.load(open(self.dictionary, 'r'))
# Probabilities for each term in the corpus
self.noise_probs = [x[2] for x in sorted(raw_dict, key=operator.itemgetter(1))]
self.noise_probs = numpy.array(self.noise_probs, dtype='float64')
self.noise_probs /= numpy.sum(self.noise_probs)
self.noise_probs = self.noise_probs ** 0.75
self.noise_probs /= numpy.sum(self.noise_probs)
self.t_noise_probs = theano.shared(self.noise_probs.astype('float32'), 't_noise_probs')
# Dictionaries to convert str to idx and vice-versa
self.str_to_idx = dict([(tok, tok_id) for tok, tok_id, _ in raw_dict])
self.idx_to_str = dict([(tok_id, tok) for tok, tok_id, freq in raw_dict])
if '</q>' not in self.str_to_idx \
or '</s>' not in self.str_to_idx:
raise Exception("Error, malformed dictionary!")
# Number of words in the dictionary
self.idim = len(self.str_to_idx)
self.state['idim'] = self.idim
logger.debug("Initializing encoder")
self.encoder = Encoder(self.state, self.rng, self)
logger.debug("Initializing decoder")
self.decoder = Decoder(self.state, self.rng, self, self.encoder)
# Init params
self.params = self.encoder.params + self.decoder.params
assert len(set(self.params)) == (len(self.encoder.params) + len(self.decoder.params))
self.y_neg = T.itensor3('y_neg')
self.x_data = T.imatrix('x_data')
self.x_ranks = T.imatrix('x_ranks')
self.x_cost_mask = T.matrix('cost_mask')
self.x_max_length = T.iscalar('x_max_length')
# The training is done with a trick. We append a special </q> at the beginning of the dialog
# so that we can predict also the first sent in the dialog starting from the dialog beginning token (</q>).
self.aug_x_data = T.concatenate([T.alloc(np.int32(self.eoq_sym), 1, self.x_data.shape[1]), self.x_data])
training_x = self.aug_x_data[:self.x_max_length]
training_y = self.aug_x_data[1:self.x_max_length+1]
training_ranks = self.x_ranks[:self.x_max_length-1].flatten()
training_ranks_mask = T.neq(training_ranks, 0).flatten()
# Here we find the end-of-sentence tokens in the minibatch.
training_hs_mask = T.neq(training_x, self.eoq_sym)
training_x_cost_mask = self.x_cost_mask[:self.x_max_length].flatten()
# Backward compatibility
if 'decoder_bias_type' in self.state:
logger.debug("Decoder bias type {}".format(self.decoder_bias_type))
logger.info("Build encoder")
(self.h, _), self.hs, (self.rs, self.us) = \
self.encoder.build_encoder(training_x, xmask=training_hs_mask)
logger.info("Build decoder (EVAL)")
target_probs, self.hd, self.decoder_states = \
self.decoder.build_decoder(self.hs, training_x, xmask=training_hs_mask, \
y=training_y, mode=Decoder.EVALUATION)
logger.info("Build rank predictor")
self.predicted_ranks = self.decoder.build_rank_layer(self.hs)
# Prediction cost and rank cost
self.per_example_cost = -T.log2(target_probs).reshape((self.x_max_length, self.x_data.shape[1]))
self.rank_cost = T.sum(((self.predicted_ranks[1:].flatten() - training_ranks) ** 2) * (training_ranks_mask)) / T.sum(training_ranks_mask)
self.training_cost = T.sum(-T.log2(target_probs) * training_x_cost_mask) + np.float32(self.lambda_rank) * self.rank_cost
self.updates = self.compute_updates(self.training_cost / training_x.shape[1], self.params)
# Beam-search variables
self.beam_source = T.lvector("beam_source")
self.beam_hs = T.matrix("beam_hs")
self.beam_step_num = T.lscalar("beam_step_num")
self.beam_hd = T.matrix("beam_hd")
|
python
|
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import re
import string
import shutil
import time
from collections import Counter
import pexpect
from wlauto import BigLittleDevice, RuntimeParameter, Parameter, settings
from wlauto.exceptions import ConfigError, DeviceError
from wlauto.utils.android import adb_connect, adb_disconnect, adb_list_devices
from wlauto.utils.serial_port import open_serial_connection
from wlauto.utils.misc import merge_dicts
from wlauto.utils.types import boolean
BOOT_FIRMWARE = {
'uefi': {
'SCC_0x010': '0x000003E0',
'reboot_attempts': 0,
},
'bootmon': {
'SCC_0x010': '0x000003D0',
'reboot_attempts': 2,
},
}
MODES = {
'mp_a7_only': {
'images_file': 'images_mp.txt',
'dtb': 'mp_a7',
'initrd': 'init_mp',
'kernel': 'kern_mp',
'SCC_0x700': '0x1032F003',
'cpus': ['a7', 'a7', 'a7'],
},
'mp_a7_bootcluster': {
'images_file': 'images_mp.txt',
'dtb': 'mp_a7bc',
'initrd': 'init_mp',
'kernel': 'kern_mp',
'SCC_0x700': '0x1032F003',
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
},
'mp_a15_only': {
'images_file': 'images_mp.txt',
'dtb': 'mp_a15',
'initrd': 'init_mp',
'kernel': 'kern_mp',
'SCC_0x700': '0x0032F003',
'cpus': ['a15', 'a15'],
},
'mp_a15_bootcluster': {
'images_file': 'images_mp.txt',
'dtb': 'mp_a15bc',
'initrd': 'init_mp',
'kernel': 'kern_mp',
'SCC_0x700': '0x0032F003',
'cpus': ['a15', 'a15', 'a7', 'a7', 'a7'],
},
'iks_cpu': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x1032F003',
'cpus': ['a7', 'a7'],
},
'iks_a15': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x0032F003',
'cpus': ['a15', 'a15'],
},
'iks_a7': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x0032F003',
'cpus': ['a7', 'a7'],
},
'iks_ns_a15': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x0032F003',
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
},
'iks_ns_a7': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x0032F003',
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
},
}
A7_ONLY_MODES = ['mp_a7_only', 'iks_a7', 'iks_cpu']
A15_ONLY_MODES = ['mp_a15_only', 'iks_a15']
DEFAULT_A7_GOVERNOR_TUNABLES = {
'interactive': {
'above_hispeed_delay': 80000,
'go_hispeed_load': 85,
'hispeed_freq': 800000,
'min_sample_time': 80000,
'timer_rate': 20000,
},
'ondemand': {
'sampling_rate': 50000,
},
}
DEFAULT_A15_GOVERNOR_TUNABLES = {
'interactive': {
'above_hispeed_delay': 80000,
'go_hispeed_load': 85,
'hispeed_freq': 1000000,
'min_sample_time': 80000,
'timer_rate': 20000,
},
'ondemand': {
'sampling_rate': 50000,
},
}
ADB_SHELL_TIMEOUT = 30
class _TC2DeviceConfig(object):
name = 'TC2 Configuration'
device_name = 'TC2'
def __init__(self, # pylint: disable=R0914,W0613
root_mount='/media/VEMSD',
disable_boot_configuration=False,
boot_firmware=None,
mode=None,
fs_medium='usb',
device_working_directory='/data/local/usecase',
bm_image='bm_v519r.axf',
serial_device='/dev/ttyS0',
serial_baud=38400,
serial_max_timeout=600,
serial_log=sys.stdout,
init_timeout=120,
always_delete_uefi_entry=True,
psci_enable=True,
host_working_directory=None,
a7_governor_tunables=None,
a15_governor_tunables=None,
adb_name=None,
# Compatibility with other android devices.
enable_screen_check=None, # pylint: disable=W0613
**kwargs
):
self.root_mount = root_mount
self.disable_boot_configuration = disable_boot_configuration
if not disable_boot_configuration:
self.boot_firmware = boot_firmware or 'uefi'
self.default_mode = mode or 'mp_a7_bootcluster'
elif boot_firmware or mode:
raise ConfigError('boot_firmware and/or mode cannot be specified when disable_boot_configuration is enabled.')
self.mode = self.default_mode
self.working_directory = device_working_directory
self.serial_device = serial_device
self.serial_baud = serial_baud
self.serial_max_timeout = serial_max_timeout
self.serial_log = serial_log
self.bootmon_prompt = re.compile('^([KLM]:\\\)?>', re.MULTILINE)
self.fs_medium = fs_medium.lower()
self.bm_image = bm_image
self.init_timeout = init_timeout
self.always_delete_uefi_entry = always_delete_uefi_entry
self.psci_enable = psci_enable
self.resource_dir = os.path.join(os.path.dirname(__file__), 'resources')
self.board_dir = os.path.join(self.root_mount, 'SITE1', 'HBI0249A')
self.board_file = 'board.txt'
self.board_file_bak = 'board.bak'
self.images_file = 'images.txt'
self.host_working_directory = host_working_directory or settings.meta_directory
if not a7_governor_tunables:
self.a7_governor_tunables = DEFAULT_A7_GOVERNOR_TUNABLES
else:
self.a7_governor_tunables = merge_dicts(DEFAULT_A7_GOVERNOR_TUNABLES, a7_governor_tunables)
if not a15_governor_tunables:
self.a15_governor_tunables = DEFAULT_A15_GOVERNOR_TUNABLES
else:
self.a15_governor_tunables = merge_dicts(DEFAULT_A15_GOVERNOR_TUNABLES, a15_governor_tunables)
self.adb_name = adb_name
@property
def src_images_template_file(self):
return os.path.join(self.resource_dir, MODES[self.mode]['images_file'])
@property
def src_images_file(self):
return os.path.join(self.host_working_directory, 'images.txt')
@property
def src_board_template_file(self):
return os.path.join(self.resource_dir, 'board_template.txt')
@property
def src_board_file(self):
return os.path.join(self.host_working_directory, 'board.txt')
@property
def kernel_arguments(self):
kernel_args = ' console=ttyAMA0,38400 androidboot.console=ttyAMA0 selinux=0'
if self.fs_medium == 'usb':
kernel_args += ' androidboot.hardware=arm-versatileexpress-usb'
if 'iks' in self.mode:
kernel_args += ' no_bL_switcher=0'
return kernel_args
@property
def kernel(self):
return MODES[self.mode]['kernel']
@property
def initrd(self):
return MODES[self.mode]['initrd']
@property
def dtb(self):
return MODES[self.mode]['dtb']
@property
def SCC_0x700(self):
return MODES[self.mode]['SCC_0x700']
@property
def SCC_0x010(self):
return BOOT_FIRMWARE[self.boot_firmware]['SCC_0x010']
@property
def reboot_attempts(self):
return BOOT_FIRMWARE[self.boot_firmware]['reboot_attempts']
def validate(self):
valid_modes = MODES.keys()
if self.mode not in valid_modes:
message = 'Invalid mode: {}; must be in {}'.format(
self.mode, valid_modes)
raise ConfigError(message)
valid_boot_firmware = BOOT_FIRMWARE.keys()
if self.boot_firmware not in valid_boot_firmware:
message = 'Invalid boot_firmware: {}; must be in {}'.format(
self.boot_firmware,
valid_boot_firmware)
raise ConfigError(message)
if self.fs_medium not in ['usb', 'sdcard']:
message = 'Invalid filesystem medium: {} allowed values : usb, sdcard '.format(self.fs_medium)
raise ConfigError(message)
class TC2Device(BigLittleDevice):
name = 'TC2'
description = """
TC2 is a development board, which has three A7 cores and two A15 cores.
TC2 has a number of boot parameters which are:
:root_mount: Defaults to '/media/VEMSD'
:boot_firmware: It has only two boot firmware options, which are
uefi and bootmon. Defaults to 'uefi'.
:fs_medium: Defaults to 'usb'.
:device_working_directory: The direcitory that WA will be using to copy
files to. Defaults to 'data/local/usecase'
:serial_device: The serial device which TC2 is connected to. Defaults to
'/dev/ttyS0'.
:serial_baud: Defaults to 38400.
:serial_max_timeout: Serial timeout value in seconds. Defaults to 600.
:serial_log: Defaults to standard output.
:init_timeout: The timeout in seconds to init the device. Defaults set
to 30.
:always_delete_uefi_entry: If true, it will delete the ufi entry.
Defaults to True.
:psci_enable: Enabling the psci. Defaults to True.
:host_working_directory: The host working directory. Defaults to None.
:disable_boot_configuration: Disables boot configuration through images.txt and board.txt. When
this is ``True``, those two files will not be overwritten in VEMSD.
This option may be necessary if the firmware version in the ``TC2``
is not compatible with the templates in WA. Please note that enabling
this will prevent you form being able to set ``boot_firmware`` and
``mode`` parameters. Defaults to ``False``.
TC2 can also have a number of different booting mode, which are:
:mp_a7_only: Only the A7 cluster.
:mp_a7_bootcluster: Both A7 and A15 clusters, but it boots on A7
cluster.
:mp_a15_only: Only the A15 cluster.
:mp_a15_bootcluster: Both A7 and A15 clusters, but it boots on A15
clusters.
:iks_cpu: Only A7 cluster with only 2 cpus.
:iks_a15: Only A15 cluster.
:iks_a7: Same as iks_cpu
:iks_ns_a15: Both A7 and A15 clusters.
:iks_ns_a7: Both A7 and A15 clusters.
The difference between mp and iks is the scheduling policy.
TC2 takes the following runtime parameters
:a7_cores: Number of active A7 cores.
:a15_cores: Number of active A15 cores.
:a7_governor: CPUFreq governor for the A7 cluster.
:a15_governor: CPUFreq governor for the A15 cluster.
:a7_min_frequency: Minimum CPU frequency for the A7 cluster.
:a15_min_frequency: Minimum CPU frequency for the A15 cluster.
:a7_max_frequency: Maximum CPU frequency for the A7 cluster.
:a15_max_frequency: Maximum CPU frequency for the A7 cluster.
:irq_affinity: lambda x: Which cluster will receive IRQs.
:cpuidle: Whether idle states should be enabled.
:sysfile_values: A dict mapping a complete file path to the value that
should be echo'd into it. By default, the file will be
subsequently read to verify that the value was written
into it with DeviceError raised otherwise. For write-only
files, this check can be disabled by appending a ``!`` to
the end of the file path.
"""
has_gpu = False
a15_only_modes = A15_ONLY_MODES
a7_only_modes = A7_ONLY_MODES
not_configurable_modes = ['iks_a7', 'iks_cpu', 'iks_a15']
parameters = [
Parameter('core_names', mandatory=False, override=True,
description='This parameter will be ignored for TC2'),
Parameter('core_clusters', mandatory=False, override=True,
description='This parameter will be ignored for TC2'),
]
runtime_parameters = [
RuntimeParameter('irq_affinity', lambda d, x: d.set_irq_affinity(x.lower()), lambda: None),
RuntimeParameter('cpuidle', lambda d, x: d.enable_idle_states() if boolean(x) else d.disable_idle_states(),
lambda d: d.get_cpuidle())
]
def get_mode(self):
return self.config.mode
def set_mode(self, mode):
if self._has_booted:
raise DeviceError('Attempting to set boot mode when already booted.')
valid_modes = MODES.keys()
if mode is None:
mode = self.config.default_mode
if mode not in valid_modes:
message = 'Invalid mode: {}; must be in {}'.format(mode, valid_modes)
raise ConfigError(message)
self.config.mode = mode
mode = property(get_mode, set_mode)
def _get_core_names(self):
return MODES[self.mode]['cpus']
def _set_core_names(self, value):
pass
core_names = property(_get_core_names, _set_core_names)
def _get_core_clusters(self):
seen = set([])
core_clusters = []
cluster_id = -1
for core in MODES[self.mode]['cpus']:
if core not in seen:
seen.add(core)
cluster_id += 1
core_clusters.append(cluster_id)
return core_clusters
def _set_core_clusters(self, value):
pass
core_clusters = property(_get_core_clusters, _set_core_clusters)
@property
def cpu_cores(self):
return MODES[self.mode]['cpus']
@property
def max_a7_cores(self):
return Counter(MODES[self.mode]['cpus'])['a7']
@property
def max_a15_cores(self):
return Counter(MODES[self.mode]['cpus'])['a15']
@property
def a7_governor_tunables(self):
return self.config.a7_governor_tunables
@property
def a15_governor_tunables(self):
return self.config.a15_governor_tunables
def __init__(self, **kwargs):
super(TC2Device, self).__init__()
self.config = _TC2DeviceConfig(**kwargs)
self.working_directory = self.config.working_directory
self._serial = None
self._has_booted = None
def boot(self, **kwargs): # NOQA
mode = kwargs.get('os_mode', None)
self._is_ready = False
self._has_booted = False
self.mode = mode
self.logger.debug('Booting in {} mode'.format(self.mode))
with open_serial_connection(timeout=self.config.serial_max_timeout,
port=self.config.serial_device,
baudrate=self.config.serial_baud) as target:
if self.config.boot_firmware == 'bootmon':
self._boot_using_bootmon(target)
elif self.config.boot_firmware == 'uefi':
self._boot_using_uefi(target)
else:
message = 'Unexpected boot firmware: {}'.format(self.config.boot_firmware)
raise ConfigError(message)
try:
target.sendline('')
self.logger.debug('Waiting for the Android prompt.')
target.expect(self.android_prompt, timeout=40) # pylint: disable=E1101
except pexpect.TIMEOUT:
# Try a second time before giving up.
self.logger.debug('Did not get Android prompt, retrying...')
target.sendline('')
target.expect(self.android_prompt, timeout=10) # pylint: disable=E1101
self.logger.debug('Waiting for OS to initialize...')
started_waiting_time = time.time()
time.sleep(20) # we know it's not going to to take less time than this.
boot_completed, got_ip_address = False, False
while True:
try:
if not boot_completed:
target.sendline('getprop sys.boot_completed')
boot_completed = target.expect(['0.*', '1.*'], timeout=10)
if not got_ip_address:
target.sendline('getprop dhcp.eth0.ipaddress')
# regexes are processed in order, so ip regex has to
# come first (as we only want to match new line if we
# don't match the IP). We do a "not" make the logic
# consistent with boot_completed.
got_ip_address = not target.expect(['[1-9]\d*.\d+.\d+.\d+', '\n'], timeout=10)
except pexpect.TIMEOUT:
pass # We have our own timeout -- see below.
if boot_completed and got_ip_address:
break
time.sleep(5)
if (time.time() - started_waiting_time) > self.config.init_timeout:
raise DeviceError('Timed out waiting for the device to initialize.')
self._has_booted = True
def connect(self):
if not self._is_ready:
if self.config.adb_name:
self.adb_name = self.config.adb_name # pylint: disable=attribute-defined-outside-init
else:
with open_serial_connection(timeout=self.config.serial_max_timeout,
port=self.config.serial_device,
baudrate=self.config.serial_baud) as target:
# Get IP address and push the Gator and PMU logger.
target.sendline('su') # as of Android v5.0.2, Linux does not boot into root shell
target.sendline('netcfg')
ipaddr_re = re.compile('eth0 +UP +(.+)/.+', re.MULTILINE)
target.expect(ipaddr_re)
output = target.after
match = re.search('eth0 +UP +(.+)/.+', output)
if not match:
raise DeviceError('Could not get adb IP address.')
ipaddr = match.group(1)
# Connect to device using adb.
target.expect(self.android_prompt) # pylint: disable=E1101
self.adb_name = ipaddr + ":5555" # pylint: disable=W0201
if self.adb_name in adb_list_devices():
adb_disconnect(self.adb_name)
adb_connect(self.adb_name)
self._is_ready = True
self.execute("input keyevent 82", timeout=ADB_SHELL_TIMEOUT)
self.execute("svc power stayon true", timeout=ADB_SHELL_TIMEOUT)
def disconnect(self):
adb_disconnect(self.adb_name)
self._is_ready = False
# TC2-specific methods. You should avoid calling these in
# Workloads/Instruments as that would tie them to TC2 (and if that is
# the case, then you should set the supported_devices parameter in the
# Workload/Instrument accordingly). Most of these can be replace with a
# call to set_runtime_parameters.
def get_cpuidle(self):
return self.get_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable')
def enable_idle_states(self):
"""
Fully enables idle states on TC2.
See http://wiki.arm.com/Research/TC2SetupAndUsage ("Enabling Idle Modes" section)
and http://wiki.arm.com/ASD/ControllingPowerManagementInLinaroKernels
"""
# Enable C1 (cluster shutdown).
self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 0, verify=False)
# Enable C0 on A15 cluster.
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0, verify=False)
# Enable C0 on A7 cluster.
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 1, verify=False)
def disable_idle_states(self):
"""
Disable idle states on TC2.
See http://wiki.arm.com/Research/TC2SetupAndUsage ("Enabling Idle Modes" section)
and http://wiki.arm.com/ASD/ControllingPowerManagementInLinaroKernels
"""
# Disable C1 (cluster shutdown).
self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 1, verify=False)
# Disable C0.
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0xFF, verify=False)
def set_irq_affinity(self, cluster):
"""
Set's IRQ affinity to the specified cluster.
This method will only work if the device mode is mp_a7_bootcluster or
mp_a15_bootcluster. This operation does not make sense if there is only one
cluster active (all IRQs will obviously go to that), and it will not work for
IKS kernel because clusters are not exposed to sysfs.
:param cluster: must be either 'a15' or 'a7'.
"""
if self.config.mode not in ('mp_a7_bootcluster', 'mp_a15_bootcluster'):
raise ConfigError('Cannot set IRQ affinity with mode {}'.format(self.config.mode))
if cluster == 'a7':
self.execute('/sbin/set_irq_affinity.sh 0xc07', check_exit_code=False)
elif cluster == 'a15':
self.execute('/sbin/set_irq_affinity.sh 0xc0f', check_exit_code=False)
else:
raise ConfigError('cluster must either "a15" or "a7"; got {}'.format(cluster))
def _boot_using_uefi(self, target):
self.logger.debug('Booting using UEFI.')
self._wait_for_vemsd_mount(target)
self._setup_before_reboot()
self._perform_uefi_reboot(target)
# Get to the UEFI menu.
self.logger.debug('Waiting for UEFI default selection.')
target.sendline('reboot')
target.expect('The default boot selection will start in'.rstrip())
time.sleep(1)
target.sendline(''.rstrip())
# If delete every time is specified, try to delete entry.
if self.config.always_delete_uefi_entry:
self._delete_uefi_entry(target, entry='workload_automation_MP')
self.config.always_delete_uefi_entry = False
# Specify argument to be passed specifying that psci is (or is not) enabled
if self.config.psci_enable:
psci_enable = ' psci=enable'
else:
psci_enable = ''
# Identify the workload automation entry.
selection_pattern = r'\[([0-9]*)\] '
try:
target.expect(re.compile(selection_pattern + 'workload_automation_MP'), timeout=5)
wl_menu_item = target.match.group(1)
except pexpect.TIMEOUT:
self._create_uefi_entry(target, psci_enable, entry_name='workload_automation_MP')
# At this point the board should be rebooted so we need to retry to boot
self._boot_using_uefi(target)
else: # Did not time out.
try:
#Identify the boot manager menu item
target.expect(re.compile(selection_pattern + 'Boot Manager'))
boot_manager_menu_item = target.match.group(1)
#Update FDT
target.sendline(boot_manager_menu_item)
target.expect(re.compile(selection_pattern + 'Update FDT path'), timeout=15)
update_fdt_menu_item = target.match.group(1)
target.sendline(update_fdt_menu_item)
target.expect(re.compile(selection_pattern + 'NOR Flash .*'), timeout=15)
bootmonfs_menu_item = target.match.group(1)
target.sendline(bootmonfs_menu_item)
target.expect('File path of the FDT blob:')
target.sendline(self.config.dtb)
#Return to main manu and boot from wl automation
target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15)
return_to_main_menu_item = target.match.group(1)
target.sendline(return_to_main_menu_item)
target.sendline(wl_menu_item)
except pexpect.TIMEOUT:
raise DeviceError('Timed out')
def _setup_before_reboot(self):
if not self.config.disable_boot_configuration:
self.logger.debug('Performing pre-boot setup.')
substitution = {
'SCC_0x010': self.config.SCC_0x010,
'SCC_0x700': self.config.SCC_0x700,
}
with open(self.config.src_board_template_file, 'r') as fh:
template_board_txt = string.Template(fh.read())
with open(self.config.src_board_file, 'w') as wfh:
wfh.write(template_board_txt.substitute(substitution))
with open(self.config.src_images_template_file, 'r') as fh:
template_images_txt = string.Template(fh.read())
with open(self.config.src_images_file, 'w') as wfh:
wfh.write(template_images_txt.substitute({'bm_image': self.config.bm_image}))
shutil.copyfile(self.config.src_board_file,
os.path.join(self.config.board_dir, self.config.board_file))
shutil.copyfile(self.config.src_images_file,
os.path.join(self.config.board_dir, self.config.images_file))
os.system('sync') # make sure everything is flushed to microSD
else:
self.logger.debug('Boot configuration disabled proceeding with existing board.txt and images.txt.')
def _delete_uefi_entry(self, target, entry): # pylint: disable=R0201
"""
this method deletes the entry specified as parameter
as a precondition serial port input needs to be parsed AT MOST up to
the point BEFORE recognizing this entry (both entry and boot manager has
not yet been parsed)
"""
try:
selection_pattern = r'\[([0-9]+)\] *'
try:
target.expect(re.compile(selection_pattern + entry), timeout=5)
wl_menu_item = target.match.group(1)
except pexpect.TIMEOUT:
return # Entry does not exist, nothing to delete here...
# Identify and select boot manager menu item
target.expect(selection_pattern + 'Boot Manager', timeout=15)
bootmanager_item = target.match.group(1)
target.sendline(bootmanager_item)
# Identify and select 'Remove entry'
target.expect(selection_pattern + 'Remove Boot Device Entry', timeout=15)
new_entry_item = target.match.group(1)
target.sendline(new_entry_item)
# Delete entry
target.expect(re.compile(selection_pattern + entry), timeout=5)
wl_menu_item = target.match.group(1)
target.sendline(wl_menu_item)
# Return to main manu
target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15)
return_to_main_menu_item = target.match.group(1)
target.sendline(return_to_main_menu_item)
except pexpect.TIMEOUT:
raise DeviceError('Timed out while deleting UEFI entry.')
def _create_uefi_entry(self, target, psci_enable, entry_name):
"""
Creates the default boot entry that is expected when booting in uefi mode.
"""
self._wait_for_vemsd_mount(target)
try:
selection_pattern = '\[([0-9]+)\] *'
# Identify and select boot manager menu item.
target.expect(selection_pattern + 'Boot Manager', timeout=15)
bootmanager_item = target.match.group(1)
target.sendline(bootmanager_item)
# Identify and select 'add new entry'.
target.expect(selection_pattern + 'Add Boot Device Entry', timeout=15)
new_entry_item = target.match.group(1)
target.sendline(new_entry_item)
# Identify and select BootMonFs.
target.expect(selection_pattern + 'NOR Flash .*', timeout=15)
BootMonFs_item = target.match.group(1)
target.sendline(BootMonFs_item)
# Specify the parameters of the new entry.
target.expect('.+the kernel', timeout=5)
target.sendline(self.config.kernel) # kernel path
target.expect('Has FDT support\?.*\[y\/n\].*', timeout=5)
time.sleep(0.5)
target.sendline('y') # Has Fdt support? -> y
target.expect('Add an initrd.*\[y\/n\].*', timeout=5)
time.sleep(0.5)
target.sendline('y') # add an initrd? -> y
target.expect('.+the initrd.*', timeout=5)
time.sleep(0.5)
target.sendline(self.config.initrd) # initrd path
target.expect('.+to the binary.*', timeout=5)
time.sleep(0.5)
_slow_sendline(target, self.config.kernel_arguments + psci_enable) # arguments to pass to binary
time.sleep(0.5)
target.expect('.+new Entry.+', timeout=5)
_slow_sendline(target, entry_name) # Entry name
target.expect('Choice.+', timeout=15)
time.sleep(2)
except pexpect.TIMEOUT:
raise DeviceError('Timed out while creating UEFI entry.')
self._perform_uefi_reboot(target)
def _perform_uefi_reboot(self, target):
self._wait_for_vemsd_mount(target)
open(os.path.join(self.config.root_mount, 'reboot.txt'), 'a').close()
def _wait_for_vemsd_mount(self, target, timeout=100):
attempts = 1 + self.config.reboot_attempts
if os.path.exists(os.path.join(self.config.root_mount, 'config.txt')):
return
self.logger.debug('Waiting for VEMSD to mount...')
for i in xrange(attempts):
if i: # Do not reboot on the first attempt.
target.sendline('reboot')
target.sendline('usb_on')
for _ in xrange(timeout):
time.sleep(1)
if os.path.exists(os.path.join(self.config.root_mount, 'config.txt')):
return
raise DeviceError('Timed out waiting for VEMSD to mount.')
def _boot_using_bootmon(self, target):
"""
This method Boots TC2 using the bootmon interface.
"""
self.logger.debug('Booting using bootmon.')
try:
self._wait_for_vemsd_mount(target, timeout=20)
except DeviceError:
# OK, something's wrong. Reboot the board and try again.
self.logger.debug('VEMSD not mounted, attempting to power cycle device.')
target.sendline(' ')
state = target.expect(['Cmd> ', self.config.bootmon_prompt, self.android_prompt]) # pylint: disable=E1101
if state == 0 or state == 1:
# Reboot - Bootmon
target.sendline('reboot')
target.expect('Powering up system...')
elif state == 2:
target.sendline('reboot -n')
target.expect('Powering up system...')
else:
raise DeviceError('Unexpected board state {}; should be 0, 1 or 2'.format(state))
self._wait_for_vemsd_mount(target)
self._setup_before_reboot()
# Reboot - Bootmon
self.logger.debug('Rebooting into bootloader...')
open(os.path.join(self.config.root_mount, 'reboot.txt'), 'a').close()
target.expect('Powering up system...')
target.expect(self.config.bootmon_prompt)
# Wait for VEMSD to mount
self._wait_for_vemsd_mount(target)
#Boot Linux - Bootmon
target.sendline('fl linux fdt ' + self.config.dtb)
target.expect(self.config.bootmon_prompt)
target.sendline('fl linux initrd ' + self.config.initrd)
target.expect(self.config.bootmon_prompt)
#Workaround TC2 bootmon serial issue for loading large initrd blob
target.sendline(' ')
target.expect(self.config.bootmon_prompt)
target.sendline('fl linux boot ' + self.config.kernel + self.config.kernel_arguments)
# Utility functions.
def _slow_sendline(target, line):
for c in line:
target.send(c)
time.sleep(0.1)
target.sendline('')
|
python
|
# 源程序文件名
SOURCE_FILE = "{filename}.hs"
# 输出程序文件名
OUTPUT_FILE = "{filename}.out"
# 编译命令行
COMPILE = "ghc {source} -o {output} {extra}"
# 运行命令行
RUN = 'sh -c "./{program} {redirect}"'
# 显示名
DISPLAY = "Haskell"
# 版本
VERSION = "GHC 8.0.2"
# Ace.js模式
ACE_MODE = "haskell"
|
python
|
# Generated by Django 2.1.7 on 2019-10-03 20:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0017_auto_20190921_1849'),
]
operations = [
migrations.RemoveField(
model_name='estruturacurricular',
name='ano_periodo',
),
migrations.RemoveField(
model_name='estruturacurricular',
name='sigla',
),
migrations.AddField(
model_name='estruturacurricular',
name='ano_entrada_vigor',
field=models.IntegerField(default=2019),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_atividade_obrigatoria',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_complementar_minima',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_ideal_semestre',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_maxima_semestre',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_minima_semestre',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_nao_atividade_obrigatoria',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_optativas_minima',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_total_minima',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='cr_ideal_semestre',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='cr_maximo_semestre',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='cr_minimo_semestre',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='cr_nao_atividade_obrigatorio',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='cr_total_minimo',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='curso',
field=models.ForeignKey(default=7191770, on_delete=django.db.models.deletion.PROTECT, to='core.Curso'),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='id_curriculo',
field=models.IntegerField(default=0, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='max_eletivos',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='meses_conclusao_ideal',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='meses_conclusao_maximo',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='meses_conclusao_minimo',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='observacao',
field=models.TextField(default='', max_length=500),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='periodo_entrada_vigor',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='semestre_conclusao_ideal',
field=models.IntegerField(default=8),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='semestre_conclusao_maximo',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='semestre_conclusao_minimo',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='estruturacurricular',
name='codigo',
field=models.CharField(max_length=10, unique=True),
),
]
|
python
|
import torch
from torch.optim import Adam,SGD
from opt import opt
import math
import random
import collections
from torch.utils.data import sampler
import torch.nn as nn
def extract_feature( model, loader):
features = torch.FloatTensor()
for (inputs, labels) in loader:
ff = torch.FloatTensor(inputs.size(0),2048).zero_()
for i in range(2):
if i == 1:
inputs = inputs.index_select(3, torch.arange(inputs.size(3) - 1, -1, -1).long())
input_img = inputs.to('cuda')
outputs = model(input_img)
f = outputs[0].data.cpu()
ff = ff + f
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
ff = ff.div(fnorm.expand_as(ff))
features = torch.cat((features, ff), 0)
return features
def get_optimizer(net):
if opt.freeze:
for p in net.parameters():
p.requires_grad = True
for q in net.backbone.parameters():
q.requires_grad = False
optimizer = Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=opt.lr, weight_decay=5e-4,amsgrad=True)
else:
#optimizer = SGD(net.parameters(), lr=opt.lr,momentum=0.9, weight_decay=5e-4)
optimizer = Adam(net.parameters(), lr=opt.lr, weight_decay=5e-4, amsgrad=True)
return optimizer
class TripletLoss(nn.Module):
"""Triplet loss with hard positive/negative mining.
Reference:
Hermans et al. In Defense of the Triplet Loss for Person Re-Identification. arXiv:1703.07737.
Code imported from https://github.com/Cysu/open-reid/blob/master/reid/loss/triplet.py.
Args:
margin (float): margin for triplet.
"""
def __init__(self, margin=0.3, mutual_flag=False):
super(TripletLoss, self).__init__()
self.margin = margin
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
self.mutual = mutual_flag
def forward(self, inputs, targets):
"""
Args:
inputs: feature matrix with shape (batch_size, feat_dim)
targets: ground truth labels with shape (num_classes)
"""
n = inputs.size(0)
# inputs = 1. * inputs / (torch.norm(inputs, 2, dim=-1, keepdim=True).expand_as(inputs) + 1e-12)
# Compute pairwise distance, replace by the official when merged
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)
dist = dist + dist.t()
dist.addmm_(1, -2, inputs, inputs.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
# For each anchor, find the hardest positive and negative
mask = targets.expand(n, n).eq(targets.expand(n, n).t())
dist_ap, dist_an = [], []
for i in range(n):
dist_ap.append(dist[i][mask[i]].max().unsqueeze(0))
dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0))
dist_ap = torch.cat(dist_ap)
dist_an = torch.cat(dist_an)
# Compute ranking hinge loss
y = torch.ones_like(dist_an)
loss = self.ranking_loss(dist_an, dist_ap, y)
if self.mutual:
return loss, dist
return loss
class RandomSampler(sampler.Sampler):
def __init__(self, data_source, batch_id, batch_image):
super(RandomSampler, self).__init__(data_source)
self.data_source = data_source
self.batch_image = batch_image
self.batch_id = batch_id
self._id2index = collections.defaultdict(list)
for idx, path in enumerate(data_source.imgs):
_id = data_source.id(path)
self._id2index[_id].append(idx)
def __iter__(self):
unique_ids = self.data_source.unique_ids
random.shuffle(unique_ids)
imgs = []
for _id in unique_ids:
imgs.extend(self._sample(self._id2index[_id], self.batch_image))
return iter(imgs)
def __len__(self):
return len(self._id2index) * self.batch_image
@staticmethod
def _sample(population, k):
if len(population) < k:
population = population * k
return random.sample(population, k)
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=[0.4914, 0.4822, 0.4465]):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) > self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img
class SquareErasing(object):
def __init__(self,width=30,height=30):
self.width=width
self.height=height
def __call__(self, img):
channel=img.size()[0]
h=img.size()[1]
w=img.size()[2]
if channel==1:
img[0,0:self.height,0:self.width]=0
img[0,h-self.height:h,0:self.width]=0
img[0,0:self.height,w-self.width:w]=0
img[0,h-self.height:h,w-self.width:w]=0
else:
for i in range(3):
img[i, 0:self.height, 0:self.width] = 0
img[i, h - self.height:h, 0:self.width] = 0
img[i, 0:self.height, w - self.width:w] = 0
img[i, h - self.height:h, w - self.width:w] = 0
return img
|
python
|
# WRITE YOUR SOLUTION HERE:
def add_numbers_to_list(numbers: list):
if len(numbers) % 5 != 0:
numbers.append(numbers[-1] +1 )
add_numbers_to_list(numbers)
if __name__=="__main__":
numbers = [1,3,4,5,10,11]
add_numbers_to_list(numbers)
print(numbers)
|
python
|
#!/usr/bin/env python
import os, subprocess
from autopkglib import Processor, ProcessorError
__all__ = ["IzPackExecutor"]
class IzPackExecutor(Processor):
"""Runs IzPack installer with all install options checked."""
input_variables = {
"app_root": {
"required": True,
"description": "Path where the app should be temporarily unpacked (installed in this case)"
},
"app_installer": {
"required": True,
"description": "Path to IzPack installer JAR"
}
}
output_variables = {
}
description = __doc__
def main(self):
real_path = os.path.realpath(__file__)
expect_path = real_path.replace(".pyc", "-install.expect").replace(".py", "-install.expect")
subprocess.call(["expect", expect_path, self.env["app_installer"], self.env["app_root"]])
zsh_path = real_path.replace(".pyc", "-get_version.zsh").replace(".py", "-get_version.zsh")
izpack_app_ver = subprocess.check_output(["zsh", zsh_path, self.env["app_root"]]).decode('ascii').replace("\r\n", "").replace("\r", "").replace("\n", "")
self.env["izpack_app_ver"] = izpack_app_ver
print(izpack_app_ver)
if __name__ == "__main__":
processor = IzPackExecutor()
processor.execute_shell()
|
python
|
"""
@author Yuto Watanabe
@version 1.0.0
Copyright (c) 2020 Yuto Watanabe
"""
|
python
|
import cProfile
import argparse
from app import Application
def parse_args():
parser = argparse.ArgumentParser(
description="A keyboard-oriented image viewer")
parser.add_argument("path", type=str, nargs='?', default="",
help="the file or directory to open")
parser.add_argument("--profile", action="store_true", default=False,
help="the file or directory to open")
return parser.parse_args()
def run(args):
try:
app = Application(args.path)
app.run()
except IOError as e:
print("error: failed to open file \"%s\"" % args.path)
def main():
args = parse_args()
if args.profile:
profiler = cProfile.Profile()
profiler.runcall(run, args)
profiler.print_stats(sort=1)
else:
run(args)
|
python
|
__author__ = 'Mario'
import wx
import wx.xrc
from Engine_Asian import AsianOption
###########################################################################
## Class MainPanel
###########################################################################
class PanelAsian ( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 500,300 ), style = wx.TAB_TRAVERSAL )
txtCtrlSizer = wx.BoxSizer( wx.VERTICAL )
self.StockPrice = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
txtCtrlSizer.Add( self.StockPrice, 0, wx.ALL, 5 )
self.StockPriceText = wx.StaticText(self, -1, 'Stock Price', pos = wx.Point(125, 10))
self.OptionPrice = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
txtCtrlSizer.Add( self.OptionPrice, 0, wx.ALL, 5 )
self.OptionStrikeText = wx.StaticText(self, -1, 'Option Strike Price', pos = wx.Point(125, 42))
self.OptionYears = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
txtCtrlSizer.Add( self.OptionYears, 0, wx.ALL, 5 )
self.OptionYearsText = wx.StaticText(self, -1, 'Option Time Length', pos = wx.Point(125, 75))
self.Riskfree = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
txtCtrlSizer.Add( self.Riskfree, 0, wx.ALL, 5 )
self.RiskFreeText = wx.StaticText(self, -1, 'Risk Free Rate', pos = wx.Point(125, 110))
self.Volatility = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
txtCtrlSizer.Add( self.Volatility, 0, wx.ALL, 5 )
self.VolatilityText = wx.StaticText(self, -1, 'Input Volatility', pos = wx.Point(125, 142))
self.Fixings = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
txtCtrlSizer.Add(self.Fixings, 0, wx.ALL, 5)
self.FixingsText = wx.StaticText(self, -1, 'Number of Price Fixings', pos = wx.Point(125, 174))
self.Iterations = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
txtCtrlSizer.Add(self.Iterations, 0, wx.ALL, 5)
self.IterationsText = wx.StaticText(self, -1, 'Number of Iterations', pos = wx.Point(125, 206))
Choices = ['Call', 'Put']
self.ChoiceBox = wx.Choice(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, Choices, 0)
# self.ChoiceBox.SetSelection(0)
txtCtrlSizer.Add(self.ChoiceBox, 0, wx.ALL, 5)
buttonSizer = wx.BoxSizer( wx.HORIZONTAL )
self.computeButton = wx.Button( self, wx.ID_ANY, u"Compute", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonSizer.Add( self.computeButton, 0, wx.ALL, 5 )
self.clearButton = wx.Button( self, wx.ID_ANY, u"Clear", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonSizer.Add( self.clearButton, 0, wx.ALL, 5 )
## Bindings
self.computeButton.Bind(wx.EVT_BUTTON, self.OnCompute)
self.clearButton.Bind(wx.EVT_BUTTON, self.OnClear)
txtCtrlSizer.Add( buttonSizer, 1, wx.EXPAND, 5 )
self.SetSizer( txtCtrlSizer )
self.Layout()
def OnCompute(self, event):
stockPrice = self.StockPrice.GetValue()
optionStrike = self.OptionPrice.GetValue()
optionYears = self.OptionYears.GetValue()
Riskfree = self.Riskfree.GetValue()
Volatility = self.Volatility.GetValue()
Fixings = self.Fixings.GetValue()
Iter = self.Iterations.GetValue()
flag = 'c' if self.ChoiceBox.GetString(self.ChoiceBox.GetCurrentSelection()) == 'Call' else 'p'
EuroOption = AsianOption(stockPrice, Riskfree, Volatility, optionYears, Fixings, Iter, optionStrike, flag)
EuroOption.GetPrice()
print( 'The MonteCarlo Price of the European Option is:', EuroOption.GetPrice()[0])
print( 'The associated standard deviation and standard errors are:', EuroOption.GetPrice()[1], EuroOption.GetPrice()[2])
# print(stockPrice, optionStrike, optionYears, Riskfree, Volatility)
#
def OnClear(self, event):
self.StockPrice.Clear()
self.OptionPrice.Clear()
self.OptionYears.Clear()
self.Riskfree.Clear()
self.Volatility.Clear()
self.Fixings.Clear()
self.Iterations.Clear()
self.ChoiceBox.Clear()
# pass
def __del__( self ):
pass
|
python
|
import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from models.generator import ResNetGenerator
from train_script.utils import *
from utils.val import validation
from utils.quantize_model import *
def adjust_learning_rate(optimizer, epoch, base_lr):
lr = base_lr * (0.1 ** (epoch // 100))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def train_GDFQ(fp_model, q_model, val_dataloder,
num_class=1000, batch_size = 32, img_size = 224,
warmup_epoch = 4, total_epoch = 400, iter_per_epoch = 200,
q_lr = 1e-6, g_lr = 1e-3,
beta=0.1, gamma=1, for_incep=False):
default_iter = 200
train_iter = default_iter
FloatTensor = torch.cuda.FloatTensor
LongTensor = torch.cuda.LongTensor
generator = ResNetGenerator(num_classes=num_class, dim_z=100, img_size=img_size)
fp_model.cuda()
# freeze fp model weight and bn
for param in fp_model.parameters():
param.requires_grad = False
fp_model = freeze_bn(fp_model)
generator.train()
q_model.train()
q_model = freeze_bn(q_model)
q_model = un_freeze_act(q_model)
# fp_model = nn.DataParallel(fp_model).cuda()
generator = nn.DataParallel(generator).cuda()
q_model = nn.DataParallel(q_model).cuda()
g_optimizer = torch.optim.Adam(generator.parameters(), lr=g_lr)
q_optimizer = torch.optim.SGD(q_model.parameters(), lr=q_lr, momentum=0.9, weight_decay = 1e-4)
hooks, hook_handles, bn_stats = [], [], []
# get number of BatchNorm layers in the model
layers = sum([
1 if isinstance(layer, nn.BatchNorm2d) else 0
for layer in fp_model.modules()
])
eps = 0.8
for n, m in fp_model.named_modules():
# last layer (linear) does not follow with batch norm , so ignore linear linear
if isinstance(m, nn.Conv2d) and len(hook_handles) < layers:
hook = output_hook()
hooks.append(hook)
hook_handles.append(m.register_forward_hook(hook.hook))
if isinstance(m, nn.BatchNorm2d):
# get the statistics in the BatchNorm layers
bn_stats.append(
(m.running_mean.detach().clone().flatten().cuda(),
torch.sqrt(m.running_var + eps).detach().clone().flatten().cuda()))
assert len(hooks) == len(bn_stats)
criterion = nn.CrossEntropyLoss()
for epoch in range(total_epoch):
# both decay by 0.1 every 100 epoch
adjust_learning_rate(g_optimizer, epoch, g_lr)
adjust_learning_rate(q_optimizer, epoch, q_lr)
pbar = tqdm.trange(train_iter)
for _ in pbar:
input_mean = torch.zeros(1, 3).cuda()
input_std = torch.ones(1, 3).cuda()
fp_model.zero_grad()
g_optimizer.zero_grad()
train_gaussian_noise = np.random.normal(0, 1, (batch_size, 100))
train_gaussian_label = np.random.randint(0, num_class, batch_size)
input_data = Variable(FloatTensor(train_gaussian_noise)).cuda()
input_label = Variable(LongTensor(train_gaussian_label)).cuda()
fake_data = generator(input_data, input_label)
for hook in hooks:
hook.clear()
fake_label = fp_model(fake_data)
# BNS loss
mean_loss = 0
std_loss = 0
# compute the loss according to the BatchNorm statistics and the statistics of intermediate output
for cnt, (bn_stat, hook) in enumerate(zip(bn_stats, hooks)):
tmp_output = hook.outputs
bn_mean, bn_std = bn_stat[0], bn_stat[1]
# get batch's norm
tmp_mean = torch.mean(
tmp_output.view(
tmp_output.size(0),
tmp_output.size(1),
-1), dim=2)
tmp_std = torch.sqrt(
torch.var(
tmp_output.view(tmp_output.size(0),
tmp_output.size(1), -1),
dim=2
) + eps
)
mean_loss += own_loss(bn_mean, tmp_mean)
std_loss += own_loss(bn_std, tmp_std)
tmp_mean = torch.mean(fake_data.view(fake_data.size(0), 3,-1), dim=2)
tmp_std = torch.sqrt( torch.var(fake_data.view(fake_data.size(0), 3, -1), dim=2) + eps)
mean_loss += own_loss(input_mean, tmp_mean)
std_loss += own_loss(input_std, tmp_std)
bns_loss = mean_loss + std_loss
g_loss = criterion(fake_label, input_label)
g_loss = g_loss + beta * bns_loss
g_loss.backward()
g_optimizer.step()
# train q model
q_optimizer.zero_grad()
fp_model.zero_grad()
detach_fake_data = fake_data.detach()
# update activation
q_result = q_model(detach_fake_data)
if epoch >= warmup_epoch:
q_loss = criterion(q_result, input_label)
q_logit = F.log_softmax(q_model(detach_fake_data), dim = 1)
with torch.no_grad():
fp_logit = F.log_softmax(fp_model(detach_fake_data), dim = 1)
kd_loss = F.kl_div(q_logit, fp_logit, reduction='batchmean')
q_loss = q_loss + gamma * kd_loss
q_loss.backward()
q_optimizer.step()
pbar.set_description("epoch: {}, G_lr:{}, G_loss: {}, Q_lr:{}, Q_loss: {}".format(epoch+1,
get_lr(g_optimizer) , g_loss.item(),
get_lr(q_optimizer), q_loss.item()))
else:
pbar.set_description("epoch: {}, g_lr: {} ==> warm up ==> G_loss: {}".format(epoch+1, get_lr(g_optimizer), g_loss.item()))
if (epoch+1) < warmup_epoch:
pass
elif (epoch+1) == warmup_epoch:
print("Free activaiton after warm up")
q_model = freeze_act(q_model)
print("Eval after warmup")
q_top_1, q_top_5 = validation(val_dataloder, q_model, criterion)
else:
if (epoch+1) % 10 == 0:
q_top_1, q_top_5 = validation(val_dataloder, q_model, criterion)
torch.save(q_model.state_dict(), "q_model.pkl")
torch.save(generator.state_dict(), "generator.pkl")
for handle in hook_handles:
handle.remove()
return q_model
|
python
|
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root@localhost/flask_db'
db = SQLAlchemy(app)
class UserDB(db.Model):
id = db.Column(db.Integer,primary_key=True)
username = db.Column(db.String(32),unique=True)
password = db.Column(db.String(32))
def __init__(self,username,password):
self.username = username
self.password = password
def add(self):
try:
db.session.add(self)
db.session.commit()
return self.id
except Exception,e:
db.session.rollback()
return e
finally:
return 0
def isExisted(self):
temUser = UserDB.query.filter_by(username=self.username,password=self.password).first()
if temUser is None:
return 0
else:
return 1
class User(object):
def __init__(self,user_id,user_name):
self.user_id = user_id
self.user_name = user_name
|
python
|
from .DtnAbstractParser import DtnAbstractParser
from enum import Enum
from pydantic import PositiveInt, PositiveFloat
import sys
from typing import List, Optional, Set, Union
class RouterMode(str, Enum):
FAST = 'fast'
SLOW = 'slow'
class RouterAlgorithm(str, Enum):
CGR = 'cgr'
BFS = 'bfs'
class DtnLookupRouterParser(DtnAbstractParser):
""" Validator for YAML configuration parameters of DtnCGRouter """
# Excel file containing routes
routes: str
# Router mode
mode: RouterMode = RouterMode.FAST
# If True, all routes will be recomputed even if they there is a
# route file provided
recompute_routes: bool = False
# Excluded routes specified as a list
# Example: [['MOC', 'PSH', 'MCC'], ['MCC', 'MRO', 'MCC']]
excluded_routes: Optional[List[List[str]]] = None
# Maximum number of hops a valid route can have
max_relay_hops: PositiveInt = sys.maxsize
# Number of cores to use during the computation of the routes
num_cores: PositiveInt = 1
# Maximum number of neighbors to send a critical bundle.
# e.g. if a node has 10 neighbors and ``max_crit=2``, then only the
# two best neighbors will be used
max_crit: Optional[int] = None
# List of nodes that can be used as relays
relays: Union[Set[str], List[str], str] = set()
# Maximum speed of any node in the system in [miles/sec]
# Based on latest SABR specification
max_speed: PositiveFloat = 125
# Algorithm to use for route computation.
algorithm: RouterAlgorithm = RouterAlgorithm.BFS
|
python
|
import numpy as np
import tensorflow as tf
class InferenceGraph:
def __init__(self):
pass
def run_inference_for_single_input_frame(self, model, input_frame,log, log_path):
"""
Method Name: run_inference_for_single_input_frame
Description: This function make prediction on the given input frame and provides us the results
in a dictionary format
Output: output_dict
"""
log_file = open(log_path + 'run_inference_for_single_input_frame.txt', 'a+')
try:
input_tensor = tf.convert_to_tensor(input_frame)
# Initialize the model with a default set of data attributes that were used to build it
model_fn = model.signatures['serving_default']
# Make predictions for the input_frame from the model
output_dict = model_fn(input_tensor)
# Took out the num_detection from dictionary because od its 1D shape=(1,)
num_detections = int(output_dict.pop('num_detections'))
# Convert the output dictionary tensor values in numpy array
output_dict = {key: value[0, :num_detections].numpy() for key, value in output_dict.items()}
output_dict['num_detections'] = num_detections
output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int16)
log.log(log_file, 'Prediction from the input frame was successful')
log_file.close()
return output_dict
except Exception as e:
log.log(log_file, 'Error during prediction from the input frame')
log.log(log_file, str(e))
log_file.close()
|
python
|
import selenium
import glob
from numpy import arange
from random import sample
from sys import exit
from time import sleep
from progress.spinner import Spinner
from progress.bar import ChargingBar
from threading import Thread
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
def worker():
finish = False
sp = Spinner('Loading ')
cont = 0
while(not finish):
sleep(1)
cont +=1
if(cont == 60):
finish = True
sp.next()
return
class YouMotivate:
def __init__(self):
# UNCOMMENT FOR TEMPORAL BARS WHILE LOADING WITH 60 SECONDS TIME {
# print('Managing Firefox Info')
# t = Thread(target=worker)
# t.start()
# }
opts = Options()
# UNCOMMENT FOR adding firefox user info {
users = glob.glob(r"C:\Users\*")
print("PC USERS:")
users = [user.split("\\")[len(user.split("\\"))-1] for user in users]
print(users)
print("Choose one: ")
user = input()
if(not user in users):
print("That user does not exist")
exit()
binary = FirefoxBinary(r'C:\Program Files\Mozilla Firefox\firefox.exe')
profiles = glob.glob('C:\\Users\\'+str(user)+'\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\*')
profiles = [profile.split("\\")[len(profile.split("\\"))-1] for profile in profiles]
print("choose profile (normally the one with default-release): ")
print(profiles)
profile = input()
if(not profile in profiles):
print("That profile does not exist")
exit()
fp = ('C:\\Users\\'+str(user)+'\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\'+str(profile))
opts.profile = fp
# }
self.driver = webdriver.Firefox(options=opts,
executable_path='geckodriver')
print('Firefox Info Loaded succesfully')
print('Opening Youtube...')
self.driver.get("https://www.youtube.com/playlist?list=FLHcrPEhUkZW37RI7c5FQvtw")
sleep(4)
#get num of videos in the list
num = self.driver.find_element_by_xpath('/html/body/ytd-app/div/ytd-page-manager/ytd-browse/ytd-playlist-sidebar-renderer/div/ytd-playlist-sidebar-primary-info-renderer/div[1]/yt-formatted-string[1]')
num = int(num.text.split(' ')[0])
# print('NUM OF VIDEOS:\t' + str(num))
vids = sample(list(arange(1,num+1)), 3)
# print('CHOOSEN:\t' + str(vids))
#choose those videos and open it in new tabs
bar = ChargingBar(' Opening videos', max=len(vids))
for i in vids:
vid_ref = self.driver.find_element_by_xpath('/html/body/ytd-app/div/ytd-page-manager/ytd-browse/ytd-two-column-browse-results-renderer/div[1]/ytd-section-list-renderer/div[2]/ytd-item-section-renderer/div[3]/ytd-playlist-video-list-renderer/div[3]/ytd-playlist-video-renderer['+str(i)+']/div[2]/a')
ref = vid_ref.get_attribute("href")
# print(ref)
self.driver.execute_script("window.open('"+str(ref)+"', 'new_window_"+str(i)+"')")
# self.driver.execute_script('browser.tabs.create({url:"'+str(ref)+'"});')
bar.next()
bar.finish()
ym = YouMotivate()
exit()
|
python
|
from .page_article import *
|
python
|
SECRET_KEY = "it's-a-secret-to-everyone"
INSTALLED_APPS = [
'channels',
'channels_irc',
]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
}
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer",
},
}
|
python
|
# -*- coding: utf-8 -*-
import io
import os
import time
import fcntl
import socket
import struct
import picamera
import threading
from Led import *
from Servo import *
from Thread import *
from Buzzer import *
from Control import *
from ADS7830 import *
from Ultrasonic import *
from Command import COMMAND as cmd
class Server:
def __init__(self):
self.tcp_flag=False
self.led=Led()
self.servo=Servo()
self.adc=ADS7830()
self.buzzer=Buzzer()
self.control=Control()
self.sonic=Ultrasonic()
self.control.Thread_conditiona.start()
self.battery_voltage=[8.4,8.4,8.4,8.4,8.4]
def get_interface_ip(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(),
0x8915,
struct.pack('256s',b'wlan0'[:15])
)[20:24])
def turn_on_server(self):
#ip adress
HOST=self.get_interface_ip()
#Port 8000 for video transmission
self.server_socket = socket.socket()
self.server_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEPORT,1)
self.server_socket.bind((HOST, 8001))
self.server_socket.listen(1)
#Port 5000 is used for instruction sending and receiving
self.server_socket1 = socket.socket()
self.server_socket1.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEPORT,1)
self.server_socket1.bind((HOST, 5001))
self.server_socket1.listen(1)
print('Server address: '+HOST)
def turn_off_server(self):
try:
self.connection.close()
self.connection1.close()
except :
print ('\n'+"No client connection")
def reset_server(self):
self.turn_off_server()
self.turn_on_server()
self.video=threading.Thread(target=self.transmission_video)
self.instruction=threading.Thread(target=self.receive_instruction)
self.video.start()
self.instruction.start()
def send_data(self,connect,data):
try:
connect.send(data.encode('utf-8'))
#print("send",data)
except Exception as e:
print(e)
def transmission_video(self):
try:
self.connection,self.client_address = self.server_socket.accept()
self.connection=self.connection.makefile('wb')
except:
pass
self.server_socket.close()
try:
with picamera.PiCamera() as camera:
camera.resolution = (400,300) # pi camera resolution
camera.framerate = 15 # 15 frames/sec
camera.saturation = 80 # Set image video saturation
camera.brightness = 50 # Set the brightness of the image (50 indicates the state of white balance)
start = time.time()
stream = io.BytesIO()
# send jpeg format video stream
print ("Start transmit ... ")
for foo in camera.capture_continuous(stream, 'jpeg', use_video_port = True):
try:
self.connection.flush()
stream.seek(0)
b = stream.read()
lengthBin = struct.pack('L', len(b))
self.connection.write(lengthBin)
self.connection.write(b)
stream.seek(0)
stream.truncate()
except BaseException as e:
#print (e)
print ("End transmit ... " )
break
except BaseException as e:
#print(e)
print ("Camera unintall")
def measuring_voltage(self,connect):
try:
for i in range(5):
self.battery_voltage[i]=round(self.adc.power(0),2)
command=cmd.CMD_POWER+'#'+str(max(self.battery_voltage))+"\n"
self.send_data(connect,command)
self.sednRelaxFlag()
self.battery_reminder()
except Exception as e:
print(e)
def battery_reminder(self):
if max(self.battery_voltage) < 6.4:
self.turn_off_server()
self.control.relax(True)
print("The batteries power are too low. Please recharge the batteries or replace batteries.")
print("Close the server")
os._exit(0)
def sednRelaxFlag(self):
if self.control.move_flag!=2:
command=cmd.CMD_RELAX+"#"+str(self.control.move_flag)+"\n"
self.send_data(self.connection1,command)
self.control.move_flag= 2
def receive_instruction(self):
try:
self.connection1,self.client_address1 = self.server_socket1.accept()
print ("Client connection successful !")
except:
print ("Client connect failed")
self.server_socket1.close()
while True:
try:
allData=self.connection1.recv(1024).decode('utf-8')
#print(allData)
except:
if self.tcp_flag:
if max(self.battery_voltage) > 6.4:
self.reset_server()
break
else:
break
if allData=="" and self.tcp_flag:
self.reset_server()
break
else:
cmdArray=allData.split('\n')
#print(cmdArray)
if cmdArray[-1] !="":
cmdArray==cmdArray[:-1]
for oneCmd in cmdArray:
data=oneCmd.split("#")
if data==None or data[0]=='':
continue
elif cmd.CMD_BUZZER in data:
self.buzzer.run(data[1])
elif cmd.CMD_LED in data:
try:
stop_thread(thread_led)
except:
pass
thread_led=threading.Thread(target=self.led.light,args=(data,))
thread_led.start()
elif cmd.CMD_LED_MOD in data:
try:
stop_thread(thread_led)
except:
pass
thread_led=threading.Thread(target=self.led.light,args=(data,))
thread_led.start()
elif cmd.CMD_HEAD in data:
self.servo.setServoAngle(15,int(data[1]))
elif cmd.CMD_SONIC in data:
command=cmd.CMD_SONIC+'#'+str(self.sonic.getDistance())+"\n"
self.send_data(self.connection1,command)
elif cmd.CMD_POWER in data:
self.measuring_voltage(self.connection1)
elif cmd.CMD_WORKING_TIME in data:
if self.control.move_timeout!=0 and self.control.relax_flag==True:
if self.control.move_count >180:
command=cmd.CMD_WORKING_TIME+'#'+str(180)+'#'+str(round(self.control.move_count-180))+"\n"
else:
if self.control.move_count==0:
command=cmd.CMD_WORKING_TIME+'#'+str(round(self.control.move_count))+'#'+str(round((time.time()-self.control.move_timeout)+60))+"\n"
else:
command=cmd.CMD_WORKING_TIME+'#'+str(round(self.control.move_count))+'#'+str(round(time.time()-self.control.move_timeout))+"\n"
else:
command=cmd.CMD_WORKING_TIME+'#'+str(round(self.control.move_count))+'#'+str(0)+"\n"
self.send_data(self.connection1,command)
else:
self.control.order=data
self.control.timeout=time.time()
try:
stop_thread(thread_power)
except:
pass
try:
stop_thread(thread_led)
except:
pass
print("close_recv")
self.control.relax_flag=False
self.control.order[0]=cmd.CMD_RELAX
if __name__ == '__main__':
pass
|
python
|
import os
data_path_root = "E:\Projects\projectHO\data"
stock_list_path = os.path.join(data_path_root, "stock_list.csv")
split_span = 365 * 5 # split dates to download pre-historical data
retry_count = 5 # downloader max retry count
log_path = "E:\Projects\projectHO\log"
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
"""
This module for test.
"""
import os
import sys
import unittest
import uuid
file_path = os.path.normpath(os.path.dirname(__file__))
sys.path.append(file_path + '/../../')
import baidubce
from baidubce.auth.bce_credentials import BceCredentials
from baidubce.bce_client_configuration import BceClientConfiguration
from baidubce.services.vpc import vpc_client
if sys.version < '3':
reload(sys)
sys.setdefaultencoding('utf-8')
# config parameters
vpc_id = 'vpc-51csm6rxs9mg'
def generate_client_token_by_uuid():
"""
The default method to generate the random string for client_token
if the optional parameter client_token is not specified by the user.
:return:
:rtype string
"""
return str(uuid.uuid4())
generate_client_token = generate_client_token_by_uuid
class TestVpcClient(unittest.TestCase):
"""
unit test
"""
def setUp(self):
"""
set up
"""
HOST = b'bcc.bj.baidubce.com'
AK = b''
SK = b''
config = BceClientConfiguration(credentials=BceCredentials(AK, SK), endpoint=HOST)
self.the_client = vpc_client.VpcClient(config)
def test_create_vpc(self):
"""
test case for create_vpc
"""
client_token = generate_client_token()
vpc_name = 'test_vpc_name' + client_token
vpc_cidr = '192.168.240.0/20'
description = 'test_vpc_descrition' + client_token
self.assertEqual(
type(self.the_client.create_vpc(vpc_name,
vpc_cidr,
description,
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_list_vpcs(self):
"""
test case for list_vpcs
"""
print(self.the_client.list_vpcs())
def test_get_vpc(self):
"""
test case for get_vpc
"""
self.assertEqual(
type(self.the_client.get_vpc(vpc_id)),
baidubce.bce_response.BceResponse)
def test_delete_vpc(self):
"""
test case for delete_vpc
"""
self.assertEqual(
type(self.the_client.delete_vpc(vpc_id)),
baidubce.bce_response.BceResponse)
def test_update_vpc(self):
"""
test case for delete_vpc
"""
self.assertEqual(
type(self.the_client.update_vpc(vpc_id, 'test_update_name', 'test_update_description')),
baidubce.bce_response.BceResponse)
if __name__ == "__main__":
suite = unittest.TestSuite()
# suite.addTest(TestVpcClient("test_create_vpc"))
# suite.addTest(TestVpcClient("test_list_vpcs"))
# suite.addTest(TestVpcClient("test_get_vpc"))
# suite.addTest(TestVpcClient("test_delete_vpc"))
suite.addTest(TestVpcClient("test_update_vpc"))
runner = unittest.TextTestRunner()
runner.run(suite)
|
python
|
#!/usr/bin/python3 -OO
# Copyright 2007-2022 The SABnzbd-Team <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
tests.test_newsunpack - Tests of various functions in newspack
"""
import glob
import logging
import os.path
import shutil
from unittest.mock import call
from sabnzbd.filesystem import build_filelists
from tests.testhelper import *
import sabnzbd
import sabnzbd.newsunpack as newsunpack
from sabnzbd.constants import JOB_ADMIN
class TestNewsUnpackFunctions:
def test_is_sfv_file(self):
assert newsunpack.is_sfv_file("tests/data/good_sfv_unicode.sfv")
assert newsunpack.is_sfv_file("tests/data/one_line.sfv")
assert not newsunpack.is_sfv_file("tests/data/only_comments.sfv")
assert not newsunpack.is_sfv_file("tests/data/random.bin")
def test_is_sevenfile(self):
# False, because the command is not set
assert not newsunpack.SEVENZIP_COMMAND
assert not newsunpack.is_sevenfile("tests/data/test_7zip/testfile.7z")
# Set the command to get some real results
newsunpack.find_programs(".")
assert newsunpack.SEVENZIP_COMMAND
assert not newsunpack.is_sevenfile("tests/data/only_comments.sfv")
assert not newsunpack.is_sevenfile("tests/data/random.bin")
assert not newsunpack.is_sevenfile("tests/data/par2file/basic_16k.par2")
assert newsunpack.is_sevenfile("tests/data/test_7zip/testfile.7z")
def test_sevenzip(self):
testzip = newsunpack.SevenZip("tests/data/test_7zip/testfile.7z")
assert testzip.namelist() == ["testfile.bin"]
# Basic check that we can get data from the 7zip
assert len(testzip.open(testzip.namelist()[0]).read()) == 102400
# Test with a non-7zip file
with pytest.raises(TypeError):
newsunpack.SevenZip("tests/data/basic_rar5/testfile.rar")
@pytest.mark.usefixtures("clean_cache_dir")
class TestPar2Repair:
@staticmethod
def _run_par2repair(test_dir, caplog, break_file=None, remove_file=None):
# Create data-directory with copy of our test-files
temp_test_dir = os.path.join(SAB_CACHE_DIR, "par2repair_temp")
test_dir_admin = os.path.join(temp_test_dir, JOB_ADMIN)
os.mkdir(temp_test_dir)
assert os.path.exists(temp_test_dir)
os.mkdir(test_dir_admin)
assert os.path.exists(test_dir_admin)
# Copy all test files
for file in glob.glob(test_dir + "/*"):
shutil.copy(file, temp_test_dir)
# Break a specific file, if requested
if break_file:
with open(os.path.join(temp_test_dir, break_file), "wb") as bf:
bf.seek(10)
bf.write(b"booh")
# Remove a specific file, if requested
if remove_file:
os.unlink(os.path.join(temp_test_dir, remove_file))
# Make sure all programs are found
newsunpack.find_programs(".")
# Needed to store the POpen-reference
sabnzbd.PostProcessor = mock.Mock()
# Mock basic NZO structure
nzo = mock.Mock()
nzo.download_path = temp_test_dir
nzo.admin_path = test_dir_admin
nzo.fail_msg = ""
nzo.extrapars = {"test": []}
nzo.md5packs = {"test": None}
for file in glob.glob(test_dir + "/*.par2"):
# Simple NZF mock for the filename
parfile = mock.Mock()
parfile.filename = os.path.basename(file)
nzo.extrapars["test"].append(parfile)
# We want to collect all updates
nzo.set_action_line = mock.Mock()
nzo.set_unpack_info = mock.Mock()
nzo.renamed_file = mock.Mock()
# Run repair
with caplog.at_level(logging.DEBUG):
readd, result = newsunpack.par2_repair(nzo=nzo, setname="test")
# Verify we only have the rar-files left
dir_contents = os.listdir(temp_test_dir)
dir_contents.sort()
# Always cleanup, to be sure
shutil.rmtree(temp_test_dir)
assert not os.path.exists(temp_test_dir)
# Verify result
assert result
assert not readd
# Verify history updates
nzo.set_unpack_info.assert_has_calls(
[
call("Repair", "[test] Verified in 0 seconds, repair is required"),
call("Repair", "[test] Repaired in 0 seconds"),
]
)
# Check externally
return nzo, dir_contents
def test_basic(self, caplog):
# Run code
nzo, dir_contents = self._run_par2repair("tests/data/par2repair/basic", caplog)
assert dir_contents == [
"__ADMIN__",
"notarealfile.rar",
"par2test.part1.rar",
"par2test.part2.rar",
"par2test.part3.rar",
"par2test.part4.rar",
"par2test.part5.rar",
"par2test.part6.rar",
]
# Verify renames
nzo.renamed_file.assert_has_calls(
[
call(
{
"par2test.part3.rar": "foorbar.rar",
"par2test.part4.rar": "stillrarbutnotagoodname.txt",
"par2test.part1.rar": "par2test.part1.11.rar",
}
)
]
)
if sabnzbd.WIN32:
# Multipar output status updates
nzo.set_action_line.assert_has_calls(
[
call("Repair", "Quick Checking"),
call("Repair", "Starting Repair"),
call("Checking", "01/06"),
call("Checking", "02/06"),
call("Checking", "03/06"),
call("Checking", "04/06"),
call("Checking", "05/06"),
call("Checking", "06/06"),
# We only know total of missing files, so not how many will be found
call("Checking extra files", "01/05"),
call("Checking extra files", "02/05"),
call("Verifying", "01/03"),
call("Verifying", "02/03"),
call("Verifying", "03/03"),
call("Repairing", " 0%"),
call("Repairing", "100% "),
call("Verifying repair", "01/03"),
call("Verifying repair", "02/03"),
call("Verifying repair", "03/03"),
]
)
else:
# par2cmdline output status updates
# Verify output in chunks, as it outputs every single % during repair
nzo.set_action_line.assert_has_calls(
[
call("Repair", "Quick Checking"),
call("Repair", "Starting Repair"),
call("Verifying", "01/06"),
call("Verifying", "02/06"),
call("Verifying", "03/06"),
call("Verifying", "04/06"),
call("Verifying", "05/06"),
call("Verifying", "06/06"),
call("Checking extra files", "01"),
call("Checking extra files", "02"),
call("Checking extra files", "03"),
call("Repairing", " 0%"),
]
)
nzo.set_action_line.assert_has_calls(
[
call("Repairing", "100% "),
call("Verifying repair", "01/03"),
call("Verifying repair", "02/03"),
call("Verifying repair", "03/03"),
]
)
def test_filejoin(self, caplog):
# Run code
nzo, dir_contents = self._run_par2repair("tests/data/par2repair/filejoin", caplog)
# All joinable files will be removed
assert dir_contents == ["__ADMIN__", "par2test.bin"]
# There are no renames in case of filejoin by par2repair!
nzo.renamed_file.assert_not_called()
if sabnzbd.WIN32:
# Multipar output status updates, which is limited because Multipar doesn't say much..
nzo.set_action_line.assert_has_calls(
[
call("Repair", "Quick Checking"),
call("Repair", "Starting Repair"),
call("Checking", "01/01"),
call("Verifying", "01"),
call("Verifying", "02"),
call("Verifying", "03"),
call("Verifying", "04"),
call("Verifying", "05"),
call("Verifying", "06"),
call("Verifying", "07"),
call("Verifying", "08"),
call("Verifying", "09"),
call("Verifying", "10"),
call("Verifying", "11"),
call("Joining", "11"),
call("Verifying repair", "01/01"),
]
)
else:
# par2cmdline output status updates
# Verify output in chunks, as it outputs every single % during repair
nzo.set_action_line.assert_has_calls(
[
call("Repair", "Quick Checking"),
call("Repair", "Starting Repair"),
call("Verifying", "01/01"),
call("Checking extra files", "01"),
call("Checking extra files", "02"),
call("Checking extra files", "03"),
call("Checking extra files", "04"),
call("Checking extra files", "05"),
call("Checking extra files", "06"),
call("Checking extra files", "07"),
call("Checking extra files", "08"),
call("Checking extra files", "09"),
call("Checking extra files", "10"),
call("Checking extra files", "11"),
call("Repairing", " 0%"),
]
)
nzo.set_action_line.assert_has_calls(
[
call("Repairing", "100% "),
call("Verifying repair", "01/01"),
]
)
def test_broken_filejoin(self, caplog):
# Run code
nzo, dir_contents = self._run_par2repair(
"tests/data/par2repair/filejoin", caplog, break_file="par2test.bin.005", remove_file="par2test.bin.010"
)
# There are no renames in case of filejoin by par2repair!
nzo.renamed_file.assert_not_called()
# All joinable files should be removed
assert dir_contents == ["__ADMIN__", "par2test.bin"]
if sabnzbd.WIN32:
# Multipar output status updates, which is limited because Multipar doesn't say much..
nzo.set_action_line.assert_has_calls(
[
call("Repair", "Quick Checking"),
call("Repair", "Starting Repair"),
call("Checking", "01/01"),
call("Verifying", "01"),
call("Verifying", "02"),
call("Verifying", "03"),
call("Verifying", "04"),
call("Verifying", "05"),
call("Verifying", "06"),
call("Verifying", "07"),
call("Verifying", "08"),
call("Verifying", "09"),
call("Repairing", " 0%"),
call("Repairing", "100% "),
call("Verifying repair", "01/01"),
]
)
else:
# Verify output in chunks, as it outputs every single % during repair
nzo.set_action_line.assert_has_calls(
[
call("Repair", "Quick Checking"),
call("Repair", "Starting Repair"),
call("Verifying", "01/01"),
call("Checking extra files", "01"),
call("Checking extra files", "02"),
call("Checking extra files", "03"),
call("Checking extra files", "04"),
call("Checking extra files", "05"),
call("Checking extra files", "06"),
call("Checking extra files", "07"),
call("Checking extra files", "08"),
call("Checking extra files", "09"),
call("Repairing", " 0%"),
]
)
nzo.set_action_line.assert_has_calls(
[
call("Repairing", "100% "),
call("Verifying repair", "01/01"),
]
)
|
python
|
'''
generic functions
'''
def myfunc( x:int ):
print( x * 100 )
def myfunc( x:string ):
print( x + 'world' )
def main():
myfunc( 10 )
myfunc( 'hello' )
|
python
|
# Third-Party Imports
from django.core.exceptions import ValidationError
from django.db.models import ProtectedError
# App Imports
from core.tests import CoreBaseTestCase
from ..models import AssetCategory
class AssetCategoryModelTest(CoreBaseTestCase):
""" Tests for the Asset Category Model """
def test_can_save_a_category(self):
AssetCategory.objects.create(name="Electronics")
new_category = AssetCategory.objects.get(name="Electronics")
new_category_count = AssetCategory.objects.count()
self.assertEqual(new_category_count, 2)
self.assertIn(new_category.name, "Electronics")
def test_cannot_add_existing_category_name(self):
self.assertEqual(AssetCategory.objects.count(), 1)
cat_name = AssetCategory.objects.first().name
with self.assertRaises(ValidationError):
AssetCategory.objects.create(name=cat_name)
self.assertEqual(AssetCategory.objects.count(), 1)
def test_can_edit_a_category(self):
self.category.name = "Accessory"
self.category.save()
self.assertIn("Accessory", self.category.name)
def test_asset_category_model_string_representation(self):
self.assertEqual(str(self.category), self.category.name)
def test_cannot_delete_category_with_existing_subcategories(self):
count_before_delete = AssetCategory.objects.count()
with self.assertRaises(ProtectedError):
self.category.delete()
count_after_delete = AssetCategory.objects.count()
self.assertEqual(count_before_delete, count_after_delete)
def test_can_delete_category_without_existing_subcategories(self):
new_category_without_subcategories = AssetCategory.objects.create(
name="New Category"
)
count_before_delete = AssetCategory.objects.count()
new_category_without_subcategories.delete()
count_after_delete = AssetCategory.objects.count()
self.assertEqual(count_after_delete, count_before_delete - 1)
|
python
|
from unittest.mock import Mock, patch, call
import pytest
from sqlalchemy import column
from sqlalchemy import text
from sqlalchemy import func
from sqlalchemy import not_
from sqlalchemy_filters import operators
from sqlalchemy_filters.exceptions import InvalidParamError
from tests.utils import compares_expressions
def compile_sql(expression):
return expression.compile(compile_kwargs={"literal_binds": True})
def test_register_operator():
op = Mock()
@operators.register_operator(sql_operator=op)
class F:
pass
assert F().operator is op
@pytest.mark.parametrize("version", ["1", "1.1", "1.2", "1.3", "1.3.22"])
def test_sa_1_4_compatible_for_older_versions(version):
with patch.object(operators, "SQLALCHEMY_VERSION", version):
f = Mock()
assert operators.sa_1_4_compatible(f) is f
@pytest.mark.parametrize(
"sql_exp, params",
[
[text(""), column("a").is_(None)],
[text("1 = 1"), column("a").is_(None)],
],
)
def test_sa_1_4_compatible(sql_exp, params):
with patch.object(operators, "SQLALCHEMY_VERSION", "1.4"):
to_sql = Mock()
self = Mock(get_sql_expression=Mock(return_value=sql_exp), params=[params])
to_sql_v2 = operators.sa_1_4_compatible(to_sql)
to_sql_v2(self)
assert not to_sql.called
assert self.operator.called_once
param1, param2 = self.operator.call_args[0]
assert compares_expressions(param1, params)
assert compares_expressions(param2, sql_exp)
@pytest.mark.parametrize(
"sql_exp, params",
[
[column("a").is_(None), text("")],
[column("a").is_(None), text("1 = 1")],
],
)
def test_sa_1_4_compatible_should_not_alter_params(sql_exp, params):
with patch.object(operators, "SQLALCHEMY_VERSION", "1.4"):
to_sql = Mock()
self = Mock(get_sql_expression=Mock(return_value=sql_exp), params=[params])
to_sql_v2 = operators.sa_1_4_compatible(to_sql)
to_sql_v2(self)
assert to_sql.called
assert not self.operator.called
assert to_sql.call_args == call(self)
def test_operator_init():
op = operators.BaseOperator(sql_expression="A", params=["B"])
assert op.sql_expression == "A"
assert op.params == ["B"]
assert str(op.get_sql_expression()) == '"A"'
def test_equals_operator():
_column = column("my_column")
op = operators.EqualsOperator(sql_expression=_column, params=["A"])
assert op.to_sql().compare(_column == "A")
def test_is_operator():
_column = column("my_column")
op = operators.IsOperator(sql_expression=_column, params=["A"])
assert op.to_sql().compare(_column.is_("A"))
def test_is_not_operator():
_column = column("my_column")
op = operators.IsOperator(sql_expression=_column, params=["A"])
assert op.to_sql().compare(not_(_column.is_("A")))
def test_gte_operator():
_column = column("my_column")
op = operators.GTEOperator(sql_expression=_column, params=["A"])
assert op.to_sql().compare(_column >= "A")
def test_lte_operator():
_column = column("my_column")
op = operators.LTEOperator(sql_expression=_column, params=["A"])
assert op.to_sql().compare(_column <= "A")
def test_starts_with_operator():
_column = column("my_column")
op = operators.StartsWithOperator(sql_expression=_column, params=["A"])
assert op.to_sql().compare(_column.startswith("A"))
def test_istarts_with_operator():
_column = column("my_column")
op = operators.IStartsWithOperator(sql_expression=_column, params=["A"])
expected = compile_sql(func.lower(_column).startswith(func.lower("A")))
assert str(compile_sql(op.to_sql())) == str(expected)
def test_ends_with_operator():
_column = column("my_column")
op = operators.EndsWithOperator(sql_expression=_column, params=["A"])
assert op.to_sql().compare(_column.endswith("A"))
def test_iends_with_operator():
_column = column("my_column")
op = operators.IEndsWithOperator(sql_expression=_column, params=["A"])
expected = compile_sql(func.lower(_column).endswith(func.lower("A")))
assert str(compile_sql(op.to_sql())) == str(expected)
def test_contains_operator():
_column = column("my_column")
op = operators.ContainsOperator(sql_expression=_column, params=["A"])
assert op.to_sql().compare(_column.contains("A"))
@pytest.mark.parametrize("value", [[], ["A"], ["A", "B"]])
def test_in_operator(value):
_column = column("my_column")
op = operators.INOperator(sql_expression=_column, params=value)
assert op.to_sql().compare(_column.in_(value))
def test_range_operator():
_column = column("my_column")
op = operators.RangeOperator(sql_expression=_column, params=[0, 10])
assert op.to_sql().compare(_column.between(0, 10))
def test_in_operator_invalid_params():
with pytest.raises(InvalidParamError) as exc:
operators.BaseOperator(sql_expression="A", params="A")
assert str(exc.value) == "BaseOperator.params expected to be a list, got str."
def test_range_operator_invalid_params():
with pytest.raises(InvalidParamError) as exc:
operators.RangeOperator(sql_expression="A", params=[])
assert str(exc.value) == "RangeOperator.params should have exactly 2 values, got 0."
|
python
|
import auditor
from event_manager.events import tensorboard
auditor.subscribe(tensorboard.TensorboardStartedEvent)
auditor.subscribe(tensorboard.TensorboardStartedTriggeredEvent)
auditor.subscribe(tensorboard.TensorboardSoppedEvent)
auditor.subscribe(tensorboard.TensorboardSoppedTriggeredEvent)
auditor.subscribe(tensorboard.TensorboardViewedEvent)
auditor.subscribe(tensorboard.TensorboardBookmarkedEvent)
auditor.subscribe(tensorboard.TensorboardUnBookmarkedEvent)
auditor.subscribe(tensorboard.TensorboardNewStatusEvent)
auditor.subscribe(tensorboard.TensorboardFailedEvent)
auditor.subscribe(tensorboard.TensorboardSucceededEvent)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 13 09:50:51 2017
@author: mkonrad
"""
import math
import pytest
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from pdftabextract.geom import (pt, ptdist, vecangle, vecrotate, overlap, lineintersect,
rect, rectcenter, rectarea, rectintersect,
normalize_angle, normalize_angle_halfcircle,
project_polarcoord_lines)
FMIN = np.finfo(np.float32).min
FMAX = np.finfo(np.float32).max
def test_pt():
x = 0
y = 1
pt0 = pt(x, y)
assert type(pt0) is np.ndarray
assert pt0.dtype == np.float
assert pt0[0] == x
assert pt0[1] == y
pt1 = pt(x, y, np.int)
assert pt1.dtype == np.int
assert pt1[0] == x
assert pt1[1] == y
def test_ptdist():
p1 = pt(0, 0)
p2 = pt(1, 0)
p3 = pt(1, 1)
assert ptdist(p1, p1) == 0
assert ptdist(p1, p2) == 1
assert ptdist(p2, p1) == ptdist(p1, p2)
assert ptdist(p1, p3) == math.sqrt(2)
def test_vecangle():
v1 = pt(1, 0)
v2 = pt(2, 0)
v3 = pt(1, 1)
v4 = pt(0, 1)
v5 = pt(0, -1)
assert np.isnan(vecangle(pt(0, 0), v1)) # pt(0, 0) is vec of no length
assert vecangle(v1, v2) == 0
assert round(vecangle(v1, v3), 4) == round(math.radians(45), 4)
assert vecangle(v2, v4) == vecangle(v1, v4) == math.radians(90)
assert vecangle(v2, v5) == math.radians(90) # always the smaller angle
@given(st.floats(min_value=FMIN, max_value=FMAX),
st.floats(min_value=FMIN, max_value=FMAX),
st.floats(min_value=FMIN, max_value=FMAX),
st.floats(min_value=FMIN, max_value=FMAX))
def test_vecangle_2(x1, y1, x2, y2):
v0 = pt(0, 0)
v1 = pt(x1, y1)
v2 = pt(x2, y2)
try:
alpha = vecangle(v1, v2)
except ValueError: # math domain error in some edge cases?
return
if np.allclose(v1, v0) or np.allclose(v2, v0):
assert np.isnan(alpha)
else:
assert 0 <= alpha <= np.pi
def test_vecrotate():
assert np.array_equal(vecrotate(pt(0, 0), 0.123), pt(0, 0))
assert np.allclose(vecrotate(pt(1, 0), math.radians(90)), pt(0, 1))
assert np.allclose(vecrotate(pt(1, 0), math.radians(90), about=pt(1, 1)), pt(2, 1))
def test_overlap():
assert overlap(0, 1, 0, 1) is True
assert overlap(0, 0, 1, 1) is False
assert overlap(0, 10, 5, 15) is True
assert overlap(-10, 10, -20, -10) is True
assert overlap(-9, 10, -20, -10) is False
def test_lineintersect():
# first with check_in_segm = True
X = lineintersect(pt(0, 0), pt(0, 0), pt(0, 0), pt(0, 0)) # coincident I
assert sum(np.isnan(X)) == len(X)
X = lineintersect(pt(0, 0), pt(0, 1), pt(0, 0), pt(0, 1)) # coincident II
assert sum(np.isnan(X)) == len(X)
assert lineintersect(pt(0, 0), pt(0, 1), pt(1, 0), pt(1, 1)) is None # parallel, non coincident
assert lineintersect(pt(0, 0), pt(0, 1), pt(1, 1), pt(2, 2)) is None # non-parellel, no intersection
assert lineintersect(pt(0, 0), pt(2, 2), pt(0, 5), pt(5, 0)) is None # non-parellel, no intersection II
assert np.array_equal(lineintersect(pt(0, 0), pt(0, 1), pt(0, 1), pt(2, 2)), pt(0, 1)) # intersection - touch
assert np.array_equal(lineintersect(pt(0, 0), pt(2, 2), pt(0, 2), pt(2, 0)), pt(1, 1)) # intersection
# now with check_in_segm = False
X = lineintersect(pt(0, 0), pt(0, 0), pt(0, 0), pt(0, 0), False) # coincident I
assert sum(np.isnan(X)) == len(X)
X = lineintersect(pt(0, 0), pt(0, 1), pt(0, 0), pt(0, 1), False) # coincident II
assert sum(np.isnan(X)) == len(X)
X = lineintersect(pt(0, 0), pt(1, 1), pt(2, 2), pt(3, 3), False) # coincident III
assert sum(np.isnan(X)) == len(X)
assert np.array_equal(lineintersect(pt(0, 0), pt(0, 1), pt(1, 1), pt(2, 2), False), pt(0, 0)) # intersection (out of segments)
assert np.array_equal(lineintersect(pt(0, 0), pt(0, 1), pt(0, 1), pt(2, 2), False), pt(0, 1)) # intersection - touch
assert np.array_equal(lineintersect(pt(0, 0), pt(2, 2), pt(0, 2), pt(2, 0), False), pt(1, 1)) # intersection
def test_rect():
with pytest.raises(ValueError):
rect(pt(0, 0), pt(1, 1, dtype=np.int)) # dtypes do not match
with pytest.raises(ValueError):
rect(pt(0, 0), pt(0, 0)) # doesn't form rect
with pytest.raises(ValueError):
rect(pt(1, 1), pt(0, 0)) # doesn't form rect
with pytest.raises(ValueError):
rect(pt(0, 0), pt(1, 0)) # doesn't form rect
a = pt(0, 0)
b = pt(1, 1)
r = rect(a, b)
assert r.dtype == a.dtype == b.dtype
assert np.array_equal(r[0], a)
assert np.array_equal(r[1], b)
a = pt(-3, -1)
b = pt(8, 1.2)
r = rect(a, b)
assert r.dtype == a.dtype == b.dtype
assert np.array_equal(r[0], a)
assert np.array_equal(r[1], b)
def test_rectcenter():
a = pt(0, 0)
b = pt(1, 1)
r = rect(a, b)
center = rectcenter(r)
assert type(center) is np.ndarray
assert np.array_equal(center, pt(0.5, 0.5))
a = pt(-3, -1)
b = pt(2, 5)
r = rect(a, b)
assert np.array_equal(rectcenter(r), pt(-0.5, 2))
def test_rectarea():
a = pt(0, 0)
b = pt(1, 1)
r = rect(a, b)
assert rectarea(r) == 1
a = pt(-3, -1)
b = pt(2, 5)
r = rect(a, b)
assert rectarea(r) == 30
def test_rectintersect():
a = rect(pt(0, 0), pt(1, 1))
b = rect(pt(-3, -1), pt(2, 5))
assert rectintersect(a, a) == rectarea(a)
assert rectintersect(b, b) == rectarea(b)
assert rectintersect(a, a, norm_intersect_area='a') == 1
assert rectintersect(a, a, norm_intersect_area='b') == 1
with pytest.raises(ValueError):
rectintersect(a, a, norm_intersect_area='c')
# complete intersect
assert rectintersect(a, b) == rectarea(a)
assert rectintersect(b, a) == rectarea(a)
assert rectintersect(a, b, norm_intersect_area='a') == 1
assert rectintersect(b, a, norm_intersect_area='b') == 1
assert rectintersect(b, a, norm_intersect_area='a') < 1
assert rectintersect(a, b, norm_intersect_area='b') < 1
# partial intersect
a = rect(pt(0, 0), pt(1, 1))
b = rect(pt(0.5, 0.5), pt(1.5, 1.5))
assert rectintersect(a, b) == 0.25
assert rectintersect(a, b, norm_intersect_area='a') == 0.25
assert rectintersect(a, b, norm_intersect_area='b') == 0.25
b = rect(pt(0.75, 0.5), pt(1.5, 1.5))
assert rectintersect(a, b) == 0.125
# touch
a = rect(pt(0, 0), pt(1, 1))
b = rect(pt(1, 1), pt(1.5, 1.5))
assert rectintersect(a, b) == 0
# no intersection
a = rect(pt(0, 0), pt(1, 1))
b = rect(pt(1.1, 1.1), pt(1.5, 1.5))
assert rectintersect(a, b) is None
def test_normalize_angle():
for i in range(-10, 10):
theta = i * np.pi
norm = normalize_angle(theta)
assert 0 <= norm < 2 * np.pi
assert norm / np.pi == i % 2
def test_normalize_angle_halfcircle():
for i in range(-10, 10):
theta = 0.5 * i * np.pi
norm = normalize_angle_halfcircle(theta)
assert 0 <= norm < np.pi
assert norm / np.pi * 2 == i % 2
@given(
st.lists(st.lists(st.floats(allow_nan=False, allow_infinity=False), min_size=2, max_size=2)),
st.integers(),
st.integers()
)
def test_project_polarcoord_lines(hough_lines, img_w, img_h):
if img_w <= 0 or img_h <= 0:
with pytest.raises(ValueError):
project_polarcoord_lines(hough_lines, img_w, img_h)
return
else:
res = project_polarcoord_lines(hough_lines, img_w, img_h)
assert type(res) is list
assert len(res) == len(hough_lines)
for pts in res:
assert len(pts) == 2
assert type(pts[0]) == type(pts[1]) == np.ndarray
assert len(pts[0]) == len(pts[1]) == 2
|
python
|
"""
Open stuff in Chromium
"""
from albertv0 import Item, ProcAction, ClipAction
import json
from shutil import which
__iid__ = "PythonInterface/v0.1"
__prettyname__ = "Web Browser"
__version__ = "1.0"
__trigger__ = "web "
__author__ = "Michael Farber Brodsky"
def handleQuery(query):
if query.isTriggered:
stripped = query.string.strip()
items = []
url = stripped
if not (url[:7] in ["http://", "https:/"]):
url = "https://" + url
normal_item = Item(
id="website-"+stripped,
text=stripped,
subtext="open in chromium",
completion=stripped,
actions=[
ProcAction(text="Open this", commandline=["/usr/bin/chromium", "--app="+url])
]
)
items.append(normal_item)
return items
|
python
|
from urllib.request import urlopen
SIMPLE_URL = "http://www.dinopass.com/password/simple"
COMPLEX_URL = "http://www.dinopass.com/password/strong"
def GetSimplePassword():
response = urlopen(SIMPLE_URL)
bytes = response.readline()
return bytes.decode()
def GetComplexPassword():
response = urlopen(COMPLEX_URL)
bytes = response.readline()
return bytes.decode()
|
python
|
# Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
import io
import os
from urllib.parse import urlparse
from io import BytesIO
from speakeasy.errors import NetworkEmuError
def is_empty(bio):
if len(bio.getbuffer()) == bio.tell():
return True
return False
def normalize_response_path(path):
def _get_speakeasy_root():
return os.path.join(os.path.dirname(__file__), os.pardir)
root_var = "$ROOT$"
if root_var in path:
root = _get_speakeasy_root()
return path.replace(root_var, root)
return path
class Socket(object):
"""
Represents a Windows network socket
"""
def __init__(self, fd, family, stype, protocol, flags):
self.fd = fd
self.family = family
self.type = stype
self.protocol = protocol
self.flags = flags
self.connected_host = ""
self.connected_port = 0
self.curr_packet = BytesIO(b"")
self.packet_queue = []
def get_fd(self):
return self.fd
def get_type(self):
return self.type
def set_connection_info(self, host, port):
self.connected_host = host
self.connected_port = port
def get_connection_info(self):
return (self.connected_host, self.connected_port)
def fill_recv_queue(self, responses):
for resp in responses:
mode = resp.get("mode", "")
if mode.lower() == "default":
default_resp_path = resp.get("path")
if default_resp_path:
default_resp_path = normalize_response_path(default_resp_path)
with open(default_resp_path, "rb") as f:
self.curr_packet = BytesIO(f.read())
def get_recv_data(self, size, peek=False):
data = self.curr_packet.read(size)
if not peek:
return data
elif peek:
self.curr_packet.seek(-size, os.SEEK_CUR)
return data
class WSKSocket(Socket):
"""
Represents a WSK socket used in kernel mode applications
"""
def __init__(self, fd, family, stype, protocol, flags):
super(WSKSocket, self).__init__(self, fd, family, stype, protocol, flags)
class WininetComponent(object):
"""
Base class used for WinInet connections
"""
curr_handle = 0x20
config = None
def __init__(self):
super(WininetComponent, self).__init__()
self.handle = self.new_handle()
def new_handle(self):
tmp = WininetComponent.curr_handle
WininetComponent.curr_handle += 4
return tmp
def get_handle(self):
return self.handle
class WininetRequest(WininetComponent):
"""
WinInet request object
"""
def __init__(self, session, verb, objname, ver, ref, accepts, flags, ctx):
super(WininetRequest, self).__init__()
# The WiniNet APIs default to a HTTP "GET" if no verb is specified
if not verb:
self.verb = "get"
else:
self.verb = verb.lower()
self.objname = objname
if not self.objname:
self.objname = ""
self.objname = urlparse(self.objname)
self.session = session
if not ver:
ver = "HTTP/1.1"
self.ver = ver
self.referrer = ref
self.accept_types = accepts
self.flags = flags
self.ctx = ctx
self.response = None
def get_session(self):
return self.session
def get_server(self):
return self.get_session().server
def get_port(self):
return self.get_session().port
def get_instance(self):
sess = self.get_session()
return sess.get_instance()
def is_secure(self):
if "INTERNET_FLAG_SECURE" in self.flags:
return True
return False
def format_http_request(self, headers=None):
request_string = ""
if headers:
request_string += headers
inst = self.get_instance()
sess = self.get_session()
host = sess.server
request_string += "Host: %s\n" % (host)
ua = inst.get_user_agent()
if ua:
request_string += "User-Agent: %s\n" % (ua)
if "INTERNET_FLAG_KEEP_CONNECTION" in self.flags:
request_string += "Connection: Keep-Alive\n"
else:
request_string += "Connection: Close\n"
if "INTERNET_FLAG_DONT_CACHE" in self.flags:
request_string += "Cache-Control: no-cache\n"
return request_string
def get_response_size(self):
resp = self.get_response()
off = resp.tell()
size = len(resp.read())
resp.seek(off, io.SEEK_SET)
return size
def get_response(self):
"""
Check the configuration file so see if there is a
handler for the current WinInet request
"""
cfg = WininetComponent.config
if self.response:
return self.response
http = cfg.get("http")
if not http:
raise NetworkEmuError("No HTTP configuration supplied")
resps = http.get("responses")
if not resps:
raise NetworkEmuError("No HTTP responses supplied")
self.response = None
for res in resps:
verb = res.get("verb", "")
if verb.lower() == self.verb:
resp_files = res.get("files", [])
if resp_files:
for file in resp_files:
mode = file.get("mode", "")
if mode.lower() == "by_ext":
ext = file.get("ext", "")
fn, obj_ext = os.path.splitext(self.objname.path)
if ext.lower().strip(".") == obj_ext.lower().strip("."):
path = file.get("path")
path = normalize_response_path(path)
with open(path, "rb") as f:
self.response = BytesIO(f.read())
elif mode.lower() == "default":
default_resp_path = file.get("path")
default_resp_path = normalize_response_path(
default_resp_path
)
if not self.response and default_resp_path:
default_resp_path = normalize_response_path(default_resp_path)
with open(default_resp_path, "rb") as f:
self.response = BytesIO(f.read())
return self.response
def get_object_path(self):
return self.objname
class WininetSession(WininetComponent):
def __init__(self, instance, server, port, user, password, service, flags, ctx):
super(WininetSession, self).__init__()
self.server = server
self.port = port
self.user = user
self.password = password
self.service = service
self.flags = flags
self.ctx = ctx
self.requests = {}
self.instance = instance
def get_instance(self):
return self.instance
def get_flags(self):
return self.flags
def new_request(self, verb, objname, ver, ref, accepts, flags, ctx):
req = WininetRequest(self, verb, objname, ver, ref, accepts, flags, ctx)
hdl = req.get_handle()
self.requests.update({hdl: req})
return req
class WininetInstance(WininetComponent):
def __init__(self, user_agent, access, proxy, bypass, flags):
super(WininetInstance, self).__init__()
self.user_agent = user_agent
self.access = access
self.proxy = proxy
self.bypass = bypass
self.flags = flags
self.sessions = {}
def get_session(self, sess_handle):
self.sessions.get(sess_handle)
def add_session(self, handle, session):
self.sessions.update({handle: session})
def new_session(self, server, port, user, password, service, flags, ctx):
sess = WininetSession(self, server, port, user, password, service, flags, ctx)
hdl = sess.get_handle()
self.sessions.update({hdl: sess})
return sess
def get_user_agent(self):
return self.user_agent
class NetworkManager(object):
"""
Class that manages network connections during emulation
"""
def __init__(self, config):
super(NetworkManager, self).__init__()
self.sockets = {}
self.wininets = {}
self.curr_fd = 4
self.curr_handle = 0x20
self.config = config
self.dns = {}
WininetComponent.config = config
self.dns = self.config.get("dns")
def new_socket(self, family, stype, protocol, flags):
fd = self.curr_fd
sock = Socket(fd, family, stype, protocol, flags)
self.curr_fd += 4
if self.config:
winsock = self.config.get("winsock")
if winsock:
responses = winsock.get("responses")
if responses:
sock.fill_recv_queue(responses)
self.sockets.update({fd: sock})
return sock
def name_lookup(self, domain):
if not self.dns:
return None
names = self.dns.get("names")
# Do we have an IP for this name?
if domain.lower() not in names.keys():
# use the default IP (if any)
return names.get("default")
return names.get(domain)
def get_dns_txt(self, domain):
"""
Return a configured DNS TXT record (if any)
"""
def _read_txt_data(txt):
path = txt.get("path")
if path:
path = normalize_response_path(path)
with open(path, "rb") as f:
return f.read()
if not self.dns:
return None
txts = self.dns.get("txt", [])
txt = [t for t in txts if t.get("name", "") == domain]
if txt:
return _read_txt_data(txt[0])
txt = [t for t in txts if t.get("name", "") == "default"]
if txt:
return _read_txt_data(txt[0])
def ip_lookup(self, ip):
for item in self.dns:
if item["response"] == ip:
return item["query"]
return None
def new_wininet_inst(self, user_agent, access, proxy, bypass, flags):
wini = WininetInstance(user_agent, access, proxy, bypass, flags)
self.wininets.update({wini.get_handle(): wini})
return wini
def get_wininet_object(self, handle):
for hinst, inst in self.wininets.items():
if hinst == handle:
return inst
for hsess, sess in inst.sessions.items():
if hsess == handle:
return sess
for hreq, req in sess.requests.items():
if hreq == handle:
return req
def close_wininet_object(self, handle):
if self.wininets.get(handle):
self.wininets.pop(handle)
def get_socket(self, fd):
return self.sockets.get(fd)
def close_socket(self, fd):
self.sockets.pop(fd)
|
python
|
from os.path import join, dirname, realpath
from re import match
def parse_input():
planties = set()
with open(join(dirname(realpath(__file__)), "input.txt")) as f:
# Parse initial config
m = match("initial state: (?P<init>[#.]+)", f.readline())
for i, c in enumerate(m.group("init")):
if c == '#':
planties.add(i)
# Empty line
f.readline()
# Rest of the lines are rules
rule_dict = {}
for rule in f:
m = match("(?P<in>[#.]{5}) => (?P<out>[#.])", rule)
rule_in = [True if c == '#' else False for c in m.group("in")]
rule_out = True if m.group("out") == '#' else False
rule_dict[tuple(rule_in)] = rule_out
return planties, rule_dict
def grow(p):
new_p = set()
for i in range(min(p)-2, max(p)+3):
needed_rule = tuple([True if j in p else False for j in range(i-2, i+3)])
if rules[needed_rule]:
new_p.add(i)
return new_p
plants, rules = parse_input()
nr_of_generations = 50000000000
gen = 0
new_sum = previous_sum = 0
new_delta = previous_delta = 0
while gen < nr_of_generations:
plants = grow(plants)
gen += 1
previous_sum = new_sum
new_sum = sum(plants)
previous_delta = new_delta
new_delta = new_sum - previous_sum
if previous_delta == new_delta:
print("[GENERATION {}] Delta stayed the same (at {})".format(gen, new_delta))
break
final_sum = new_sum + (new_delta * (nr_of_generations - gen))
print("Sum of all plant indices is: {}".format(final_sum))
|
python
|
#!/usr/bin/env python
"""
Generate an input MAF file for MutSigCV by converting SNV/INDEL calls by the Genomon2 pipeline.
The input is a multi-sample variant call in post_analysis/ directory.
Usage: python genomon2maf.py merge_mutation_filt.txt
"""
__author__ = "Masashi Fujita <[email protected]>"
__version__ = "0.0.1"
__date__ = "September 17, 2019"
import sys
import argparse
import pandas as pd
import numpy as np
func_dict = {
'intronic': 'Intron',
'ncRNA_exonic': 'RNA',
'ncRNA_intronic': 'RNA',
'ncRNA_splicing': 'RNA',
'splicing': 'Splice_Site',
'UTR3': '3\'UTR',
'UTR5': '5\'UTR'
}
exonic_func_dict = {
'frameshift deletion': 'Frame_Shift_Del',
'frameshift insertion': 'Frame_Shift_Ins',
'nonframeshift deletion': 'In_Frame_Del',
'nonframeshift insertion': 'In_Frame_Ins',
'nonsynonymous SNV': 'Missense_Mutation',
'stopgain': 'Nonsense_Mutation',
'stoploss': 'Nonstop_Mutation',
'synonymous SNV': 'Silent'
}
def fetch_variant_classification(row):
func = row['Func.refGene']
exonicFunc = row['ExonicFunc.refGene']
if func in 'exonic' and exonicFunc in exonic_func_dict:
return exonic_func_dict[exonicFunc]
elif func in func_dict:
return func_dict[func]
else:
return np.nan
def fetch_variant_type(row):
ref = row['Reference_Allele']
alt = row['Tumor_Seq_Allele1']
if ref == "-":
return "INS"
elif alt == "-":
return "DEL"
else:
return "SNP"
#
# parse args
#
parser = argparse.ArgumentParser(description='Convert Genomon2 SNV/INDEL calls to MAF.')
parser.add_argument('infile', metavar='genomon_file', help='Genomon2 SNV/INDEL file for a single sample')
parser.add_argument('--out', '-o', metavar='MAF', help='Output MAF file [default: stdout]')
args = parser.parse_args()
#
# open input
#
df = pd.read_csv(args.infile, sep='\t', skiprows=3, dtype={'Chr': 'str'}, low_memory=False)
#
# format into MAF
#
df.rename(columns={
'Gene.refGene': 'Hugo_Symbol',
'Chr': 'Chromosome',
'Start': 'Start_position',
'End': 'End_position',
'Ref': 'Reference_Allele',
'Alt': 'Tumor_Seq_Allele1',
'id': 'Tumor_Sample_Barcode'
}, inplace=True)
df['NCBI_Build'] = '37'
df['Tumor_Seq_Allele2'] = df['Tumor_Seq_Allele1']
df['Variant_Classification'] = df.apply(lambda row: fetch_variant_classification(row), axis=1)
df['Variant_Type'] = df.apply(lambda row: fetch_variant_type(row), axis=1)
df = df[['Hugo_Symbol', 'NCBI_Build', 'Chromosome', 'Start_position', 'End_position', 'Variant_Classification', 'Variant_Type',
'Reference_Allele', 'Tumor_Seq_Allele1', 'Tumor_Seq_Allele2', 'Tumor_Sample_Barcode',
'ExonicFunc.refGene', 'Func.refGene']]
#
# save
#
if args.out is not None:
f_out = open(args.out, 'w')
else:
f_out = sys.stdout
df.to_csv(f_out, sep='\t', header=True, index=False, na_rep="NA")
f_out.close()
|
python
|
import asyncio
from pypuss.app import Master
async def main():
my_app = Master()
await my_app.run()
if __name__ == '__main__':
print("[info]", "started running")
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
except BaseException:
print("[info]", "shutting down")
|
python
|
from rest_framework import generics, status, permissions, mixins
from .models import Post, Vote
from .serializers import PostSerializer, VoteSerializer
from .permissions import IsPostAuthorOrReadOnly
from helpers.response import Response
class PostView(generics.ListCreateAPIView):
serializer_class = PostSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, )
queryset = Post.objects.all()
def perform_create(self, serializer):
serializer.save(author=self.request.user)
def list(self, request):
queryset = self.get_queryset()
serializer = PostSerializer(queryset, many=True)
return Response({'posts': serializer.data, 'total': queryset.count()}, status=status.HTTP_200_OK)
def create(self, request):
serializer = PostSerializer(data=request.data)
if serializer.is_valid():
self.perform_create(serializer)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
return Response(errors=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class PostDetailView(generics.RetrieveUpdateDestroyAPIView):
serializer_class = PostSerializer
permission_classes = (permissions.IsAuthenticated, IsPostAuthorOrReadOnly)
queryset = Post.objects.all()
lookup_url_kwarg = 'id'
lookup_field = 'id'
def get(self, request, id):
try:
post = Post.objects.get(id=id)
serializer = PostSerializer(post)
return Response(data=serializer.data, status=status.HTTP_200_OK)
except Post.DoesNotExist:
return Response(errors={'detail': 'post does not exist'}, status=status.HTTP_404_NOT_FOUND)
def perform_update(self, serializer):
serializer.save(author=self.request.user)
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = PostSerializer(
instance, data=request.data, partial=partial)
if serializer.is_valid():
self.perform_update(serializer)
return Response(data=serializer.data, status=status.HTTP_200_OK)
return Response(errors=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response(data={'detail': 'post deleted'}, status=status.HTTP_204_NO_CONTENT)
class VoteView(generics.CreateAPIView, mixins.DestroyModelMixin):
serializer_class = VoteSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def get_queryset(self):
voter = self.request.user
post = Post.objects.get(id=self.kwargs['id'])
return Vote.objects.filter(voter=voter, post=post)
def perform_create(self, serializer):
post = Post.objects.get(id=self.kwargs['id'])
serializer.save(voter=self.request.user, post=post)
def create(self, request, *args, **kwargs):
serializer = VoteSerializer(data=request.data)
if self.get_queryset().exists():
return Response(errors={'detail': 'you have already voted for this post'}, status=status.HTTP_400_BAD_REQUEST)
if serializer.is_valid():
self.perform_create(serializer)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
def delete(self, request, *args, **kwargs):
if self.get_queryset().exists():
self.get_queryset().delete()
return Response(data={'detail': 'vote deleted'}, status=status.HTTP_204_NO_CONTENT)
return Response(errors={'detail': 'you have not voted for this post'}, status=status.HTTP_400_BAD_REQUEST)
|
python
|
import numpy as np
import xarray as xr
from constants import R_earth
from xr_DataArrays import dll_from_arb_da
def xr_int_global(da, AREA, DZ):
""" global volume integral *[m^3] """
(z, lat, lon) = dll_from_arb_da(da)
return (da*AREA*DZ).sum(dim=[z, lat, lon]) # 0D
def xr_int_global_level(da, AREA, DZ):
""" global volume integral *[m^3] """
(z, lat, lon) = dll_from_arb_da(da)
return (da*AREA*DZ).sum(dim=[lat, lon]) # 1D (z)
def xr_int_vertical(da, DZ):
""" vertical integral *[m] """
(z, lat, lon) = dll_from_arb_da(da)
return (da*DZ).sum(dim=z) # 2D (lat, lon)
def xr_int_zonal(da, HTN, LATS, AREA, DZ):
""" integral along depth and zonal coordinates *[m^2] rectangular grid"""
shape = np.shape(da)
assert shape[-2:]==np.shape(HTN)[-2:]
assert shape[-2:]==np.shape(DZ)[-2:]
(z, lat, lon) = dll_from_arb_da(da)
if shape[-1] in [900, 320]: # rectangular `ocn_low` or `ocn_rect` grid
# print(np.shape(da))
# print(np.shape(HTN))
# print(np.shape(DZ))
int_zonal = (da*HTN*DZ).sum(dim=[z, lon]) # 1D (lat)
elif shape[-1]==3600: # tripolar grid
int_vert = xr_int_vertical(da, DZ) # 2D
int_zonal = xr_zonal_int_bins(int_vert, LATS, AREA)
return int_zonal
def xr_int_zonal_level(da, HTN, LATS, AREA, DZ, dx=1):
""" zonal integrals for each level *[m] rectangular grid"""
(z, lat, lon) = dll_from_arb_da(da)
shape = np.shape(da)
assert shape[-2:]==np.shape(HTN)[-2:]
assert shape[-2:]==np.shape(DZ)[-2:]
if shape[-1] in [900, 320]: # rectangular grid
int_zonal_level = (da*HTN).sum(dim=[lon]) # 2D (z, lat)
elif shape[-1]==3600: # tripolar grid
lat_bins, lat_centers, lat_width = lat_binning(dx)
km = len(da[z])
dz = DZ.max(dim=(lon,lat))
# construct new xr DataArray
# assert 'time' in da.coords
lat_bin_name = f'TLAT_bins'
if da.coords['time'].size==1: # single time files
array = np.zeros((km, len(lat_centers)))
coords = {z: da.coords[z], lat_bin_name: lat_centers}
int_zonal_level = xr.DataArray(data=array, coords=coords, dims=(z, lat_bin_name))
for k in range(km):
da_k = (da[k,:,:]*DZ[k,:,:]).drop('z_t')
int_zonal_level[k,:] = xr_zonal_int_bins(da_k, LATS, AREA)/dz[k]
else:
array = np.zeros((da.coords['time'].size, km, len(lat_centers)))
coords = {'time': da.coords['time'], z: da.coords[z], lat_bin_name: lat_centers}
int_zonal_level = xr.DataArray(data=array, coords=coords, dims=('time', z, lat_bin_name))
for k in range(km):
da_k = (da[:,k,:,:]*DZ[k,:,:]).drop('z_t')
int_zonal_level[:,k,:] = xr_zonal_int_bins(da_k, LATS, AREA)/dz[k]
return int_zonal_level
def xr_zonal_int_bins(da, LATS, AREA, dx=1):
""" integral over dx wide latitude bins
integrates da with AREA, then divides by width of zonal strip dx
input:
da .. 2D xr DataArray to be "zonally" integrated
LATS .. 2D xr DataArray latitude values of each cell
AREA .. 2D xr DataArray
dx .. width of latitude bands in degrees
lat_name .. xa/AREA coordinate name of the latitude variable
output:
xa_zonal_int .. 1D xr DataArray
lat centers can be accessed through xa_zonal_int.coords[f'{lat_name}_bins']
"""
assert type(da)==xr.core.dataarray.DataArray
assert type(AREA)==xr.core.dataarray.DataArray
assert np.shape(da)[-2:]==np.shape(AREA)
(z, lat, lon) = dll_from_arb_da(da)
lat_bins, lat_centers, lat_width = lat_binning(dx)
da_new = da*AREA
da_zonal_int = da_new.groupby_bins(LATS, lat_bins, labels=lat_centers).sum(dim=f'stacked_{lat}_{lon}')/lat_width
return da_zonal_int
def lat_binning(dx):
""" create latitude bins """
lat_width = dx*R_earth*np.pi/180
lat_bins = np.arange(-90, 90+dx, dx)
lat_centers = np.arange(-90+dx/2, 90, dx)
return lat_bins, lat_centers, lat_width
def xr_vol_int(xa, AREA, DZ, levels=False, zonal=False):
""" volume integral of xarray *[m^3]
input:
xa .. 3D xr DataArray with data to be integrated
AREA .. 2D xr DataArray of cell areas
DZ .. 3D xr DataArray of cell depths
levels .. option to output results for all level
zonal .. option to output zonal integrals
output:
integral .. float integral
int_levels .. integrals of each level
xa_zonal_int .. 1D array of vert.+zonally integrated quantity
xa_zonal_level_int .. 2D (km, lat_bin) *[m^2] (integrated in depth and lon)
xa_zonal_level_mean .. 2D (km, lat_bin) *[m^1]
(weighted by bottom cell depth)
"""
assert type(xa)==xr.core.dataarray.DataArray
assert len(np.shape(xa))==3
assert type(AREA)==xr.core.dataarray.DataArray
assert type(DZ)==xr.core.dataarray.DataArray
assert np.shape(AREA)==np.shape(xa)[-2:]
assert np.shape(DZ)==np.shape(xa)[-3:]
if zonal==True:
dx = 1 # latitude bin width
if np.shape(DZ)==(2,3,4): # simple test case
lat_name = 'y'
elif np.shape(DZ)==(42,2400,3600): # hires ocean
lat_name = 'nlat'
elif np.shape(DZ)==(30,384,576): # atm fields
lat_name = 'lat'
else:
raise ValueError('unknown shape: lat_name not implemented')
assert lat_name in DZ.coords
if levels==False:
integral = np.sum(xa[:,:,:]*AREA[:,:]*DZ[:,:,:]).item()
if zonal==False: # just global integral
return integral
elif zonal==True:
xa_vert = xr_int_along_axis(xa, DZ, 0)
xa_zonal_int = xr_zonal_int(xa_vert, AREA, dx, lat_name)
return integral, xa_zonal_int
elif levels==True:
km = len(xa[:,0,0])
int_levels = np.zeros((km))
for k in range(km):
int_levels[k] = np.sum(xa[k,:,:]*AREA[:,:]*DZ[k,:,:]).item()
integral = np.sum(int_levels)
if zonal==False:
return integral, int_levels
if zonal==True:
ONES = AREA.copy()
ONES[:,:] = 1.
for k in range(km):
xa_zonal_int = xr_zonal_int(xa[k,:,:]*DZ[k,:,:], AREA, dx, lat_name)
DZ_zonal_int = xr_zonal_int(DZ[k,:,:] , ONES, dx, lat_name)
if k==0:
xa_zonal_level_int = np.zeros((km, len(xa_zonal_int)))
xa_zonal_level_mean = np.zeros((km, len(xa_zonal_int)))
xa_zonal_level_int[k,:] = xa_zonal_int
xa_zonal_level_mean[k,:] = xa_zonal_int/DZ_zonal_int
return integral, int_levels, xa_zonal_level_int, xa_zonal_level_mean
def xr_int_along_axis(xa, DZ, axis):
""" integral of xr DataArray along a specific axis
input:
xa .. 3D xr DataArray of quantity to be integrated
DZ .. 3D xr DataArray of vertical cell extents [m]
axis .. int axis to be integrated over
output:
int .. 2D xr DataArray of integrated quantitity
"""
assert type(axis)==np.dtype(int) or axis in xa.dims
assert np.shape(xa)==np.shape(DZ)
assert axis<=len(np.shape(xa))
integral = np.sum(xa*DZ, axis=axis)
return integral
def xr_vol_int_regional(xa, AREA, DZ, MASK):
""" volumen integral with regional MASK
input:
xa, AREA, DZ .. same as in 'xr_vol_int'
MASK .. 2D xr DataArray of booleans with the same dimensions as xa
output:
integral, int_levels .. same as in 'xr_vol_int'
"""
assert type(xa)==xr.core.dataarray.DataArray
assert type(AREA)==xr.core.dataarray.DataArray
assert type(DZ)==xr.core.dataarray.DataArray
assert np.shape(AREA)==np.shape(xa)[-2:]
assert np.shape(DZ)==np.shape(xa)[-3:]
assert np.dtype(MASK)==np.dtype('bool')
# determine min/max i/j of masked region
(imin, imax, jmin, jmax) = find_regional_coord_extent(MASK)
xa_reg = xa.where(MASK)[:,jmin:jmax+1,imin:imax+1]
AREA_reg = AREA.where(MASK)[jmin:jmax+1,imin:imax+1]
DZ_reg = DZ.where(MASK)[:,jmin:jmax+1,imin:imax+1]
integral, int_levels = xr_vol_int(xa_reg, AREA_reg, DZ_reg)
return integral, int_levels
def find_regional_coord_extent(MASK):
""" finds coordinates of a boolean mask
input:
MASK .. 2D xr DataArray of booleans
output:
(imin, imax, jmin, jmax) .. lon/lat extent of True area
"""
assert type(MASK)==xr.core.dataarray.DataArray
jmin = np.where(MASK)[0].min()
jmax = np.where(MASK)[0].max()
imin = np.where(MASK)[1].min()
imax = np.where(MASK)[1].max()
return (imin, imax, jmin, jmax)
def xr_vol_mean(xa, AREA, DZ):
""" mean over quantity stored in xa
input:
xa .. 3D xr DataArray of quantity
AREA .. 2D xr DataArray of cell surface area
DZ .. 3D xr DataArray of cell depths
output:
mean .. (float)
"""
assert type(xa)==xr.core.dataarray.DataArray
assert type(DZ)==xr.core.dataarray.DataArray
assert type(AREA)==xr.core.dataarray.DataArray
assert np.shape(xa)==np.shape(DZ)
assert np.shape(xa[0,:,:])==np.shape(AREA)
integral = xr_vol_int(xa, AREA, DZ, levels=False, zonal=False)
ONES = xa.copy()
ONES[:,:,:] = 1.
volume = xr_vol_int(ONES, AREA, DZ, levels=False, zonal=False)
mean = integral/volume
return mean
def xr_surf_int(xa, AREA):
""" surface integral of xarray DataArray *[m^2]
input:
xa .. 2D xr DataArray
AREA .. 2D xr DataArray of surface area
output:
integral .. float integrated
"""
assert type(xa)==xr.core.dataarray.DataArray
assert type(AREA)==xr.core.dataarray.DataArray
assert np.shape(xa)==np.shape(AREA)
assert len(np.shape(xa))==2
integral = np.sum(xa*AREA)
return integral.item()
def xr_surf_mean(xa, AREA):
""" mean over a surface *[1]
input:
xa .. 2D xr DataArray of quantity
AREA .. 2D xr DataArrayof cell surfaces
output:
mean .. (float) mean of quantity in xa
"""
assert type(xa)==xr.core.dataarray.DataArray
assert type(AREA)==xr.core.dataarray.DataArray
assert np.shape(xa)==np.shape(AREA)
assert len(np.shape(xa))==2
integral = xr_surf_int(xa, AREA)
ONES = xa.copy()
ONES[:,:] = 1.
surface = xr_surf_int(ONES, AREA)
mean = integral/surface
return mean
def xr_zonal_mean(xa, AREA, dx, lat_name):
""" area weighted mean over dx wide latitude bins
input:
xa .. 2D xr DataArray
AREA .. 2D xr DataArray
dx .. width of latitude bands
lat_name .. xa/AREA coordinate name of the latitude variable
output:
xa_zonal_mean .. 1D xr DataArray
"""
assert type(xa)==xr.core.dataarray.DataArray
assert type(AREA)==xr.core.dataarray.DataArray
assert len(np.shape(xa))==2
assert np.shape(xa)==np.shape(AREA)
assert dx>180/len(AREA[0,:])
xa_zonal_int = xr_zonal_int(xa, AREA, dx, lat_name)
AREA_zonal_int = xr_zonal_int(AREA/AREA, AREA, dx, lat_name)
xa_zonal_mean = xa_zonal_int/AREA_zonal_int
return xa_zonal_mean
|
python
|
class Packet_Ops():
def __init__( self ):
self.frame_id = 1
self.rx_packet_table = {
0x88: self.parse_command_resp,
0x8a: self.parse_modem_status,
0x8b: self.parse_tx_status,
0x8d: self.parse_route_information,
0x8e: self.aggreate_address_update,
0x90: self.parse_rx_indicator,
0x91: self.parse_rx_explict_indicator,
0x92: self.parse_sample_rx_indicator,
0x95: self.parse_node_identifier,
0x97: self.parse_remote_commad_resp }
def construct_packet( self, input_data ): #data is a length
return_value = []
return_value.append(0x7e)
length = len(input_data)
return_value.append( (length >>8 ) & 0xff )
return_value.append( length & 0xff )
sum = 0
for i in input_data:
return_value.append(i)
sum = sum + i
return_value.append((0xff-sum)&0xff)
return return_value
def generate_AT_cmd( self, command, parameters = None, frame_id = None ):
data = []
if frame_id == None:
frame_id = self.frame_id
else:
self.frame_id = frame_id
data.append( 0x8)
data.append( frame_id )
data.append(ord(command[0] ))
data.append(ord(command[1]))
if parameters != None:
data.extend(parameters)
self.frame_id = self.frame_id + 1
return self.construct_packet(data)
def generate_ATQueue_cmd( self, command, parameters = None, frame_id = None ):
data = []
if frame_id == None:
frame_id = self.frame_id
else:
self.frame_id = frame_id
data.append( 0x9)
data.append( frame_id )
data.append(ord(command[0] ))
data.append(ord(command[1]))
if parameters != None:
data.extend(parameters)
self.frame_id = self.frame_id + 1
return self.construct_packet(data)
def generate_TX_req( self, destination_address, tx_data, broadcast_radius = 0, transmit_option = 0, frame_id = None ):
data = []
if frame_id == None:
frame_id = self.frame_id
else:
self.frame_id = frame_id
data.append( 0x10)
data.append( frame_id )
data.extend( destination_address )
data.extend( [0xff, 0xfe] )
data.append( broadcast_radius )
data.append( transmit_option )
data.extend(tx_data)
self.frame_id = self.frame_id + 1
return self.construct_packet(data)
def generate_Explicit_Tx_cmd( self, destination_address, source_endpoint, dest_endpoint, cluster_id, profile_id, tx_data, broadcast_radius = 0, transmit_option = 0, frame_id = None ):
data = []
if frame_id == None:
frame_id = self.frame_id
else:
self.frame_id = frame_id
data.append( 0x11)
data.append( frame_id )
data.extend( destination_address)
data.extend( [ 0xff, 0xfe ] )
data.append( source_endpoint)
data.append( dest_endpoint )
data.extend( cluster_id)
data.extend( profile_id)
data.append( broadcast_radius )
data.append( transmit_option )
data.extend(tx_data)
self.frame_id = self.frame_id + 1
return self.construct_packet(data)
def generate_Remote_cmd( self,destination_address, command, parameters = None, frame_id = None, update_flag = True ):
data = []
if frame_id == None:
frame_id = self.frame_id
else:
self.frame_id = frame_id
data.append( 0x17)
data.append( frame_id )
data.extend( destination_address)
data.extend( [ 0xff , 0xfe ] )
if update_flag == True:
data.append(2)
else:
data.append(0)
data.append(ord(command[0] ))
data.append(ord(command[1]))
if parameters != None:
data.extend(parameters)
self.frame_id = self.frame_id + 1
return self.construct_packet(data)
def parse_command_resp( self, data ):
return_value = {}
return_value["frame_type"] = data[0]
return_value["frame_id"] = data[1]
return_value["at_command"] = [ chr(data[2]),chr(data[3]) ]
return_value["command_status"] = data[4]
if len(data) >= 5:
return_value["command_data"] = data[5:]
else:
return_value["command_data"] = None
return return_value
def parse_modem_status( self, data ):
return_value = {}
return_value["frame_type"] = data[0]
return_value["status"] = data[1]
return return_value
def parse_tx_status( self, data ):
return_value = {}
return_value["frame_type"] = data[0]
return_value["frame_id"] = data[1]
return_value["retry_count"] = data[4]
return_value["delivery_status"] = data[5]
return_value["discovery_status"] = data[6]
return return_value
def parse_route_information( self, data ):
return_value = {}
return_value["frame_type"] = data[0]
return_value["source_event"] = data[1]
return_value["length"] = data[2]
return_value["time_stamp"] = data[3:7]
return_value["ack_timeout_count"] = data[7]
return_value["addresses"] = []
number = data[2] -7
address_number = number/8
index_start = 10
for i in range( 0,address_number):
return_value["addresses"].append( data[index_start : index_start+8 ] )
index_start = index_start+8
return return_value
def aggreate_address_update( self, data ):
return_value = {}
return_value["frame_type"] = data[0]
return_value["format_id"] = data[1]
return_value["new_address"] = data[2:10]
return_value["old_address"] = data[10:18]
return return_value
def parse_rx_indicator( self, data ):
return_value = {}
return_value["frame_type"] = data[0]
return_value["address"] = data[1:9]
return_value["receive_option"] = data[11]
return_value["data"] = data[12:]
return return_value
def parse_rx_explict_indicator( self, data ):
return_value = {}
return_value["frame_type"] = data[0]
return_value["address"] = data[1:9]
return_value["source_endpoint"] = data[11]
return_value["destination_endpoint"] = data[12]
return_value["cluster_id"] = data[13]
return_value["profile_id"] = data[14:16]
return_value["receive_options"] = data[16]
return_value["data"] = data[17:]
return return_value
#implemented later
def parse_sample_rx_indicator( self, data ):
pass
### implinented later
def parse_node_identifier( self,data ):
pass
def parse_remote_commad_resp( self, data ):
return_value = {}
return_value["frame_type"] = data[0]
return_value["frame_id"] = data[1]
return_value["address"] = data[2:10]
return_value["at_command"] = [ chr(data[12]), chr(data[13]) ]
return_value["command_status"] = data[14]
return_value["command_data"] = data[15:]
return return_value
def parse_packet( self,data ):
#print "parse_packet data",data
if self.rx_packet_table.has_key( data[0] ):
return [ True, self.rx_packet_table[data[0] ](data) ]
else:
return [ False]
|
python
|
from importlib import import_module
import six
from six.moves import reload_module
from typing import Optional, Union # noqa
from configloader import ConfigLoader
from wrapt import ObjectProxy
"""
Usage:
from flexisettings.conf import settings
There are two important env vars:
`<inital_namespace>_CONFIG_NAMESPACE`
Sets the prefix used for loading further config values from env
and config file. Set when instantiating the `Settings` object.
`<inital_namespace>_APP_CONFIG`
Import path to a python object to load futher config values from.
Defaults to None. e.g. 'django.conf.settings' or 'celeryconfig'.
Although we can load further keys from the env, now prefixed using our
custom namespace, by preference all further keys should be loaded from a
python obj because all values loaded from env will be strings, there is no
way to automatically type cast them.
Keys in the config obj must be prefixed with the namespace, but will be
provided in `settings` without the prefix.
e.g. if you set your env to:
MYAPP_CONFIG_NAMESPACE=MYAPP_EVENTS
MYAPP_APP_CONFIG=django.conf.settings
and in your Django settings you have:
MYAPP_EVENTS_SERIALIZER = 'json'
then:
from event_consumer.conf import settings
print(settings.SERIALIZER)
> json
"""
if six.PY2:
import_error_cls = ImportError
else:
import_error_cls = ModuleNotFoundError # noqa
class Settings(ObjectProxy):
def __init__(self, initial_namespace=None, defaults=None):
# type: (Optional[str], Optional[str]) -> None
self._self_initial_namespace = initial_namespace
self._self_defaults = defaults
config = _load_config(initial_namespace, defaults)
super(Settings, self).__init__(config)
def __dir__(self):
base = super(Settings, self).__dir__()
return base + list(self.keys())
def _reload(self):
# type: () -> None
if self._self_defaults:
# we have to reload the `defaults` module otherwise
# changed values (e.g. loaded from env var) won't show up
try:
module = import_module(self._self_defaults)
except import_error_cls:
# must have been a '{module.attr}' import path
module = import_module(self._self_defaults.rsplit('.', 1)[0])
reload_module(module)
self._replace_wrapped(
_load_config(self._self_initial_namespace, self._self_defaults)
)
def _replace_wrapped(self, new):
# type: (ConfigLoader) -> None
self.__wrapped__ = new
def _load_config(initial_namespace=None, defaults=None):
# type: (Optional[str], Optional[str]) -> ConfigLoader
"""
Kwargs:
initial_namespace:
defaults:
"""
# load defaults
if defaults:
config = ConfigLoader()
config.update_from_object(defaults)
namespace = getattr(config, 'CONFIG_NAMESPACE', initial_namespace)
app_config = getattr(config, 'APP_CONFIG', None)
# load customised config
if app_config:
if namespace is None:
config.update_from_object(app_config)
else:
_temp = ConfigLoader()
_temp.update_from_object(app_config, lambda key: key.startswith(namespace))
config.update(_temp.namespace(namespace))
return config
|
python
|
"""
Contains a class that converts the pre-processed binary file into a numpy array. The logic behind
each conversion step is thoroughly document through comments in the code.
"""
import pickle
import os
import struct
import logging
import hashlib
import numpy as np
from tqdm import tqdm
import matplotlib
class Preprocessor():
""" Reads a pre-processed binary file (see README) into a pretty numpy array, which can be
feed to a ML model.
:param settings: a dictionary of data-related simulation settings
"""
def __init__(self, settings):
self.input_file = settings['input_file']
self.preprocessed_file = settings['preprocessed_file']
self.run_sanity_checks = settings['run_sanity_checks']
# Inputs that dictate the dataset ID:
self.max_time = settings['max_time']
self.sample_freq = settings['sample_freq']
self.beamformings = settings['beamformings']
self.power_offset = settings['power_offset']
self.power_scale = settings['power_scale']
self.pos_grid = settings['pos_grid']
self.pos_shift = settings['pos_shift']
self.keep_timeslots = settings['keep_timeslots']
# Precomputes other needed variables, and initializes others
self.dataset_id = get_dataset_id(settings)
self.time_slots = self.max_time * self.sample_freq
self.features_size = self.time_slots * self.beamformings
self.features = None
self.labels = None
def check_existing_dataset(self):
""" Checks whether the dataset we are trying to create already exists
:returns: Boolean flag, with `True` meaning that the dataset already exists
"""
dataset_exists = False
if os.path.isfile(self.preprocessed_file):
with open(self.preprocessed_file, 'rb') as dataset_file:
_, _, target_dataset_id = pickle.load(dataset_file)
if target_dataset_id == self.dataset_id:
dataset_exists = True
return dataset_exists
def create_bff_dataset(self):
""" Creates a BFF experiments-ready dataset. The dataset contains `X`, the matrix
containing the received radiation, and `y`, the true position for that received
radiation.
:returns: `X` and `y`. `X` is a matrix with dimentions (number_of_positions x
radiation_samples_per_position). y is matrix with dimentions (number_of_positions x 2)
The radiation samples per position's size is given by the number of used beamformings
times the sampling frequency time the receiving time per beamforming
"""
labels_size = 2
sample_size = int(self.features_size + labels_size)
# Converts the dataset into features/labels
logging.info("Converting the dataset into features/labels...")
features, labels = self._data_to_dataset(sample_size)
# Converting the features/labels into numpy arrays
logging.info("Converting features/labels into numpy arrays...")
self.features = np.array(features)
self.labels = np.array(labels)
del features, labels
# Printing the ranges of the features
logging.info("[label] x range: %s - %s",
self.labels[:, 0].min(), self.labels[:, 0].max())
logging.info("[label] y range: %s - %s",
self.labels[:, 1].min(), self.labels[:, 1].max())
logging.info("[features] power range: %s - %s",
self.features[:].min(), self.features[:].max())
# Removes unwanted timeslots
self._delete_timeslots()
# Removes dataless positions
self._remove_dataless_positions()
def _data_to_dataset(self, sample_size):
""" `create_bff_dataset` auxiliary function. Converts the raw (floating point)
input data into features and labels, that will be further filtered. The features
that come out of this operation should have a range close to [0, 1] -- please
set the simulation parameters accordingly.
:param sample_size: length of the input data for each position in the dataset
"""
# Unpacks stored variables
x_shift, y_shift = self.pos_shift
x_grid, y_grid = self.pos_grid
if x_grid != y_grid:
logging.warning("WARNING: the area on which the experiments are going to be "
"performes is not square. The distance metric during the model training "
"will be ill defined. (e.g. the validation error in meters will not be correct)"
"\nPlease keep that in mind (or add that exception to the code :D).")
# Loads the binary mode dataset (the step that creates this binary data will
# be rewritten in python in the near future)
logging.info("Loading the binary dataset from %s...", self.input_file)
with open(self.input_file, mode='rb') as file:
data_binary = file.read()
# Converts the binary dataset into float 32
logging.info("Converting the binary data to float_32...")
logging.info("[** this may take a couple of minutes and it will not print any progress **]")
binary_size = os.path.getsize(self.input_file)
size_bytes = int(binary_size/4)
data = struct.unpack('f'*size_bytes, data_binary)
del data_binary
num_samples = int(size_bytes / sample_size)
features = []
labels = []
# For each sample in the data
logging.info("Creating features and labels from the float_32 data...")
for sample_idx in tqdm(range(num_samples)):
tmp_features = []
tmp_labels = []
data_start_pos = sample_idx * sample_size
# For each data item in the sample
# (0 = Position data - X)
# (1 = Position data - Y)
# (2, ..., sample_size-1 = Feature data)
for data_idx in range(sample_size):
item = data[data_start_pos + data_idx]
if data_idx == 0:
item += x_shift
item /= x_grid
tmp_labels.append(item)
elif data_idx == 1:
item += y_shift
item /= y_grid
tmp_labels.append(item)
else:
# Important notes regarding feature data:
# 1) item == 0 -> there is no data here (there are no values > 0)
# 2) The check for the minimum power threshold (e.g. -100 dBm) is performed
# after the noise is added, not here.
# 3) Nevertheless, to speed up downstream operations code, filters out values with
# very little power. For the default simulation parameters, this filters samples
# with less than -170 dBm. Since the default "minimum_power" is -125 dBm [check
# an example for the meaning of this variable], this means we can reliably test
# (log-normal) noises with STD up to 15 dB [margin = (-125) - -170 = 45 dB =
# 3*STD of 15 dB]
if -(self.power_offset) < item < 0:
tmp_features.append((item + self.power_offset) * self.power_scale)
else:
assert item <= 0.0, "There cannot be any value here above 0.0 (got {})"\
.format(item)
tmp_features.append(0.0)
features.append(tmp_features)
labels.append(tmp_labels)
return features, labels
def _delete_timeslots(self):
""" Removes unwanted timeslots (Keep in mind that this feature's usefulness is super
dataset-dependent! In my experiments, I removed the timeslots with very little data,
corresponding to less than 1% of the non-zero features)
"""
if self.keep_timeslots:
logging.warning("Removing unwanted timeslots (keeping timeslots with indexes between"
" '%s' and '%s')", self.keep_timeslots[0], self.keep_timeslots[1]-1)
mask = np.ones(self.features.shape[1], dtype=bool)
ts_to_keep = [ts for ts in range(*self.keep_timeslots)]
ts_to_delete = [ts for ts in range(self.time_slots) if ts not in ts_to_keep]
logging.info("Time slots to remove: %s", ts_to_delete)
for idx in tqdm(range(self.features.shape[1])):
# DIM 1 = BF, DIM 2 = TS
if idx % self.time_slots in ts_to_delete:
mask[idx] = False
# Removes those slots from the data
logging.info("Shape before TS reduction: %s", self.features.shape)
self.features = self.features[:, mask]
logging.info("Shape after TS reduction: %s", self.features.shape)
def _remove_dataless_positions(self):
""" Removes invalid [x, y] positions (invalid positions = positions with no data,
i.e. only zeroes)
"""
logging.info("Detecting the invalid (data-less) positions... ")
mask = np.ones(self.features.shape[0], dtype=bool)
removed_pos = 0
for idx in tqdm(range(self.features.shape[0])):
if sum(self.features[idx, :]) == 0:
mask[idx] = False
removed_pos += 1
self.features = self.features[mask, :]
self.labels = self.labels[mask, :]
logging.info("%s data-less positions removed.", removed_pos)
def store_dataset(self):
""" Stores the result of data preprocessing
"""
# Final data reports
logging.info("Usable positions: %s", self.features.shape[0])
target_folder = os.path.split(self.preprocessed_file)[0]
if not os.path.exists(target_folder):
logging.info("Target folder (%s) not found, creating it...", target_folder)
os.makedirs(target_folder)
logging.info("Storing the result ...")
with open(self.preprocessed_file, 'wb') as data_file:
pickle.dump([self.features, self.labels, self.dataset_id], data_file)
# Optional: plots the existing data points on a 2D image
if self.run_sanity_checks:
logging.info("Preparing plot to double-check existing data points...")
# Creates (N+1) by (M+1) matrix. This means that its indexes go from 0 through N/M
to_plot = np.full([int(self.pos_grid[0]) + 1, int(self.pos_grid[1]) + 1], 0.0)
for pos_idx in tqdm(range(self.labels.shape[0])):
# Scales 0-1 to 0-N/M
pos_x = int(round(self.labels[pos_idx, 0] * self.pos_grid[0]))
pos_y = int(round(self.labels[pos_idx, 1] * self.pos_grid[1]))
# Flips Y (to correctly plot with imshow)
to_plot[pos_x, int(round(self.pos_grid[1] - pos_y))] = 1.0
# Local import to avoid messing non-gaphical interfaces
matplotlib.use('agg')
import matplotlib.pyplot as plt
plt.imshow(np.transpose(to_plot))
image_locaton = os.path.join(target_folder, 'existing_data_points.pdf')
plt.savefig(image_locaton)
logging.info("Done! (Check %s)", image_locaton)
def load_dataset(self):
""" Loads the previously stored dataset, returning it
:returns: previously stored features and labels
"""
assert self.check_existing_dataset(), "The dataset with the specified path ({}) either "\
"does not exists or was built with different simulation settings. Please run the "\
"data preprocessing step with the new simulation settings!".format(
self.preprocessed_file)
with open(self.preprocessed_file, 'rb') as dataset_file:
features, labels, _ = pickle.load(dataset_file)
return features, labels
def get_dataset_id(settings):
""" Creates and returns an unique ID (for practical purposes), given the data parameters.
The main use of this ID is to make sure we are using the correct data source,
and that the data parameters weren't changed halfway through the simulation sequence.
"""
hashing_features = [
settings['max_time'],
settings['sample_freq'],
settings['beamformings'],
settings['power_offset'],
settings['power_scale'],
settings['pos_grid'],
settings['pos_shift'],
settings['keep_timeslots'],
]
hash_sha256 = hashlib.sha256()
for feature in hashing_features:
if isinstance(feature, list):
inner_features = feature
else:
inner_features = [feature]
for item in inner_features:
if isinstance(item, float):
item = "{:.4f}".format(item)
hash_sha256.update(bytes(item, encoding='utf8'))
else:
hash_sha256.update(bytes(item))
return hash_sha256.hexdigest()
|
python
|
# ConnexionApiGenerator.py - Creates an object implementing a Connexion API
import os
from jinja2 import Environment, Template, FileSystemLoader
from smoacks.sconfig import sconfig
class ConnexionApiGenerator:
def __init__(self, app_object):
self._app_object = app_object
self.name = self._app_object.name
def getJinjaDict(self):
# Establish constant values and the overall dictionary structure
result = {
'name': self.name,
'idList': None,
'pkList': None,
'idCount': self._app_object._idCount,
'hasSearch': False
}
# Loop through the properties and update the structure where needed
properties = self._app_object.getAllProperties()
for prop in properties:
if prop.isId:
result['name_id'] = prop.name
if not result['idList']:
result['idList'] = prop.name
result['idStrings'] = "'" + prop.name + "'"
result['pkList'] = self.name + '.' + prop.name
else:
result['idList'] += ', ' + prop.name
result['idStrings'] += ", '" + prop.name + "'"
result['pkList'] += ', ' + self.name + '.' + prop.name
if prop.searchField:
result['hasSearch'] = True
result['search_field'] = prop.name
if self._app_object._idCount == 1:
result['primary_keys'] = result['pkList']
result['id_list'] = result['idList']
result['id_strings'] = result['idStrings']
else:
result['primary_keys'] = '[' + result['pkList'] + ']'
result['id_list'] = '[' + result['idList'] + ']'
result['id_strings'] = '[' + result['idStrings'] + ']'
result.update(sconfig['env_defaults'])
return result
def render(self):
env = Environment(
loader = FileSystemLoader('templates')
)
template = env.get_template('ConnexionAPIs.jinja')
filedir = os.path.join(sconfig['structure']['root'], sconfig['structure']['apiobjectdir'])
if not os.path.isdir(filedir):
os.makedirs(filedir, exist_ok=True)
outfilename = os.path.join(filedir, "{}s.py".format(self._app_object.getSnakeName()))
if not os.path.isfile(outfilename):
outfile = open(outfilename, "w")
outfile.write(template.render(self.getJinjaDict()))
outfile.close()
|
python
|
"""PyTest fixtures and helper functions, etc."""
import pprint
import uuid
from configparser import ConfigParser
from configparser import ExtendedInterpolation
from inspect import getframeinfo
from pathlib import Path
import pytest
# =========================================================
# H E L P E R S
# =========================================================
class Helpers:
"""Generic helper class.
This class provides utility methods that can be accessed
from within test fumnctions.
"""
@staticmethod
def pp(capsys, data, frame=None):
"""(Magic) Pretty Print function."""
with capsys.disabled():
_PP_ = pprint.PrettyPrinter(indent=4)
print("\n")
if frame is not None:
print(f"LINE #: {getframeinfo(frame).lineno}\n")
_PP_.pprint(data)
# =========================================================
# G L O B A L P Y T E S T F I X T U R E S
# =========================================================
_KWD_TEST_SCTN_ = "test_sctn" # NOTE: ConfigParser converts all keys to
_KWD_TEST_KEY_ = "test_key" # lower case as they follow .ini syntax
_KWD_TEST_VAL_ = "test_val" # rules for key attributes
_DEFAULT_ATTRIBS_DICT_ = {
_KWD_TEST_KEY_: _KWD_TEST_VAL_,
"k11": "v11",
"k12": "v12",
}
_DEFAULT_CONFIG_DICT_ = {_KWD_TEST_SCTN_: _DEFAULT_ATTRIBS_DICT_}
_DEFAULT_CONFIG_STR_ = (
f"{_KWD_TEST_SCTN_}|{_KWD_TEST_KEY_}:{_KWD_TEST_VAL_},k11:v11,k12:v12"
)
_DEFAULT_CHANNELS_STR_ = "f451_twitter|f451_slack"
_DEFAULT_TEST_SECRETS_ = {
"f451_mailgun": {
"priv_api_key": "_YOUR_PRIVATE_API_KEY_",
"publ_val_key": "_YOUR_PUBLIC_API_KEY_",
"webhook_sign_key": "_YOUR_WEBHOOK_SIGNING_KEY_",
"from_domain": "_YOUR_DOMAIN_NAME_",
},
"f451_slack": {
"signing_secret": "_YOUR_SLACK_SIGNING_SECRET_",
"auth_token": "_YOUR_SLACK_AUTH_TOKEN_",
"app_token": "_YOUR_SLACK_APP_TOKEN_",
},
"f451_twilio": {
"acct_sid": "_YOUR_TWILIO_SID_",
"auth_token": "_YOUR_TWILIO_TOKEN_",
"from_phone": "_YOUR_TWILIO_FROM_PHN_",
},
"f451_twitter": {
"user_key": "_YOUR_TWITTER_USER_KEY_",
"user_secret": "_YOUR_TWITTER_SECRET_KEY_",
"auth_token": "_YOUR_TWITTER_AUTH_TOKEN_",
"auth_secret": "_YOUR_TWITTER_AUTH_SECRET_",
},
}
_DEFAULT_TEST_CONFIG_ = {
"f451_main": {
"channels": _DEFAULT_CHANNELS_STR_,
"channel_map": "email:f451_mailgun|sms:f451_twilio|twitter:f451_twitter|slack:f451_slack|forums:f451_slack", # noqa: B950
},
"f451_mailgun": {
"from_name": "_DEFAULT_FROM_NAME_",
"from_email": "[email protected]",
},
"f451_slack": {
"from_name": "_DEFAULT_FROM_NAME_",
},
"f451_twitter": {},
"f451_twilio": {
"to_phone": "_DEFAULT_TO_PHONE_",
},
}
@pytest.fixture()
def default_test_section():
"""Return default test values."""
return _KWD_TEST_SCTN_
@pytest.fixture()
def default_test_key():
"""Return default test values."""
return _KWD_TEST_KEY_
@pytest.fixture()
def default_test_val():
"""Return default test values."""
return _KWD_TEST_VAL_
@pytest.fixture()
def valid_config():
"""Return valid config values."""
parser = ConfigParser(interpolation=ExtendedInterpolation())
parser.read_dict(_DEFAULT_CONFIG_DICT_)
return parser
@pytest.fixture()
def valid_config_dict():
"""Return valid config values as `dict`."""
return _DEFAULT_CONFIG_DICT_
@pytest.fixture()
def valid_config_string():
"""Return valid config values as `str`."""
return _DEFAULT_CONFIG_STR_
@pytest.fixture()
def valid_attribs_dict():
"""Return attributes."""
return _DEFAULT_ATTRIBS_DICT_
@pytest.fixture()
def new_config_file(tmpdir_factory):
"""Only create the filename, but not the actual file."""
configFile = tmpdir_factory.mktemp("test").join(f"{uuid.uuid4().hex}.ini")
configFile.write("[section]\nkey = value")
return str(configFile)
@pytest.fixture(scope="session")
def new_attachment_file(tmpdir_factory):
"""Create an actual dummy file."""
testFile = tmpdir_factory.mktemp("test").join(f"{uuid.uuid4().hex}.txt")
testFile.write("THIS IS A TEST FILE")
return str(testFile)
@pytest.fixture(scope="session")
def new_media_file():
"""Link to an actual test image file."""
testFile = Path("test/test-image-small.gif")
return str(testFile)
@pytest.fixture()
def helpers():
"""Return `Helper` object.
This makes it easier to use helper functions inside tests.
"""
return Helpers
@pytest.fixture()
def default_test_msg(prefix="", suffix="", sep=" "):
"""Create a random test string."""
return sep.join([prefix, uuid.uuid4().hex, suffix])
@pytest.fixture()
def invalid_file():
"""Create an invalid filename string."""
return "/tmp/INVALID.FILE" # noqa: S108
@pytest.fixture()
def invalid_string():
"""Create an invalid string."""
return "INVALID_STRING"
@pytest.fixture()
def valid_settings():
"""Return valid config values."""
parser = ConfigParser()
parser.read_dict(_DEFAULT_TEST_CONFIG_)
parser.read_dict(_DEFAULT_TEST_SECRETS_)
return parser
@pytest.fixture()
def default_channels_string():
"""Return test values."""
return _DEFAULT_CHANNELS_STR_
|
python
|
import copy
import itertools
import re
import threading
import html5lib
import requests
import urllib.parse
from bs4 import BeautifulSoup
from typing import List
try:
import basesite
except (ModuleNotFoundError, ImportError) as e:
from . import basesite
class DaocaorenshuwuSite(basesite.BaseSite):
def __init__(self):
self.site_info = basesite.SiteInfo(
type='文学',
statue='上线版本',
url='https://www.daocaorenshuwu.com',
name='稻草人书屋',
brief_name='稻草人',
version='1.1',
max_threading_number=10, # 每个chapter会进行多线程下载(get_chapter_content),所以总线程数量设置为10
)
super().__init__(self.site_info)
self.base_url = 'https://www.daocaorenshuwu.com'
self.encoding = 'utf-8'
self.search_url = 'https://www.daocaorenshuwu.com/plus/search.php?q=%s'
self.session = requests.session()
@basesite.print_in_out
def get_books(self, search_info: str) -> List[basesite.Book]:
url = self.search_url % urllib.parse.quote(search_info)
r = self.try_get_url(self.session, url, try_timeout=5)
soup = BeautifulSoup(r.content, 'html.parser')
book_soup_list = soup.select('tbody > tr')
search_book_results = []
for book_soup in book_soup_list:
td_soup_list = book_soup.select('td')
book_url = self.base_url + td_soup_list[0].select_one('a').attrs['href']
if book_url.find('search.html') != -1:
continue
book_name = td_soup_list[0].text
book_author = td_soup_list[1].text
book_brief = "无"
book = basesite.Book(site=self, url=book_url, name=book_name, author=book_author,
brief=book_brief)
search_book_results.append(book)
return search_book_results
@basesite.print_in_out
def get_chapters(self, book: basesite.Book) -> List[basesite.Chapter]:
r = self.try_get_url(self.session, book.url)
if r is None:
return []
soup = BeautifulSoup(r.content, 'html.parser')
chapter_soup_list = soup.select('div#all-chapter div.panel-body div.item a')
chapters = [basesite.Chapter(site=self,
url='https:' + chapter.attrs['href'],
title=chapter.text)
for chapter in chapter_soup_list]
return chapters
def get_chapter_content(self, chapter: basesite.Chapter) -> str:
class _InnerDown(threading.Thread):
def __init__(self, func, session_, url):
super().__init__()
self.func = func
self.session = copy.deepcopy(session_)
self.url = url
self.r = None
def run(self) -> None:
self.r = self.func(self.session, self.url)
self.session.close()
session = copy.deepcopy(self.session)
partial_url = chapter.url[:-5]
# step1: 先下载第一页和第二页, 判断总共多少页
tasks = [_InnerDown(self.try_get_url, session, chapter.url),
_InnerDown(self.try_get_url, session, partial_url + "_2.html")]
for task in tasks:
task.start()
for task in tasks:
task.join()
r1, r2 = tasks[0].r, tasks[1].r
if r1 is None:
session.close()
return f'\r\n{chapter.title}\r\n下载失败'
soup1 = BeautifulSoup(r1.content, 'html5lib') # 文档格式有错误,不能使用速度较快的html.parser
has_multipages = False
try:
if soup1.select('div.text-center')[0].select('button.btn-info')[2].text.find('下一页') >= 0:
has_multipages = True
except IndexError:
pass
if has_multipages:
if r2 is None:
session.close()
return f'\r\n{chapter.title}\r\n下载失败'
soup2 = BeautifulSoup(r2.content, 'html5lib')
page_info = soup2.select_one('div.book-type li.active').text
pages = int(re.search(r'/(\d+)页)', page_info).group(1))
soup_list = [soup1, soup2]
else:
pages = 1
soup_list = [soup1]
# step2: 多线程下载
url_list = ([f'{partial_url}_{i}.html' for i in range(3, pages + 1)])
tasks = [_InnerDown(self.try_get_url, session, url) for url in url_list]
for task in tasks:
task.start()
for task in tasks:
task.join()
session.close()
for task in tasks:
if task.r is None:
return f'\r\n{chapter.title}\r\n下载失败'
else:
soup_list.append(BeautifulSoup(task.r.content, 'html5lib'))
# step3: 合并下载内容
content_list = []
for soup in soup_list:
content_soup = soup.select_one('div#cont-text')
for i in content_soup.select('script,style,[class]'):
i.decompose()
content_list.append(content_soup.text.strip())
# return f'\r\n{chapter.title}\r\n{"".join(content_list)}'
return "\r\n".join(content_list)
def save_chapter(self, chapter, filename):
content = self.get_chapter_content(chapter)
with open(filename, 'w', encoding=self.encoding) as f:
f.write(content)
|
python
|
from flask import render_template, flash, redirect, url_for, session
from flask_login import login_user, current_user, login_required
from app import app, db, lm
from app.models.tables import User, Challenges
from app.models.forms import FlagForm
import datetime
from notifications import *
@lm.user_loader
def load_user(id):
return User.query.filter_by(id=id).first()
@app.route("/index")
@app.route("/")
def index():
return render_template('index.html')
@app.route("/dashboard", methods=["GET", "POST"])
@login_required
def dashboard():
errors = []
users = User.query.filter_by(username=current_user.username).first()
chall = Challenges.query.all()
if not current_user.is_authenticated:
# if user is logged in we get out of here
return redirect(url_for('login'))
form = FlagForm()
chall = Challenges.query.all()
if form.validate_on_submit():
chall1 = Challenges.query.filter_by(flag=form.flag.data).first()
if not chall1:
flash(FLAG_INCORRECT)
return redirect(url_for('dashboard'))
if chall1.flag == form.flag.data:
user = User.query.filter_by(username=current_user.username).first()
if str(chall1.id) in user.solved:
flash(FLAG_SUBMITTED_ALREADY)
print(chall)
return render_template('dashboard/index.html', form=form, users=users, chall=chall)
user.score = str(int(user.score) + int(chall1.value))
user.solved = user.solved + str(chall1.id) + ', '
user.lastSubmit = datetime.datetime.utcnow()
db.session.commit()
flash(FLAG_SUCCESS)
return redirect(url_for('dashboard'))
return render_template('dashboard/index.html', form=form, users=users, chall=chall )
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import re
import requests
from bs4 import BeautifulSoup
def get_last_series(url: str) -> int:
rs = requests.get(url)
root = BeautifulSoup(rs.content, 'html.parser')
fields_str = root.select_one('ul.flist').get_text(strip=True)
if not fields_str:
raise Exception('Не удалось найти описание полей аниме!')
m = re.search(r'Добавлена:\s*(\d+)\s*серия', fields_str)
if not m:
raise Exception('Не удалось найти номер последней серии!')
return int(m.group(1))
if __name__ == '__main__':
url = 'https://anivost.org/24-chernyy-klever.html'
print(get_last_series(url))
# 170
|
python
|
import os
from pathlib import Path
from shutil import rmtree
from tests.lib.base import DeloggerTestBase
from tests.lib.urlopen_mock import UrlopenMock
class TestPresets(DeloggerTestBase):
def teardown_method(self):
log_dir = self.OUTPUT_DIRPATH
if not Path(log_dir).is_dir():
return False
rmtree(log_dir)
def test_info(self, capsys):
from delogger.presets.info import logger
self.execute_log(logger)
self.check_normal_stream_log(logger, capsys, is_color=True)
assert getattr(logger, "debuglog")
def test_debug(self, capsys):
from delogger.presets.debug import logger
self.execute_log(logger)
self.check_debug_stream_log(logger, capsys, is_color=True)
run_rotating_hdlr = logger.handlers[1]
self.check_log_file(run_rotating_hdlr.filepath)
assert getattr(logger, "debuglog")
def test_debug_stream(self, capsys):
from delogger.presets.debug_stream import logger
self.execute_log(logger)
self.check_debug_stream_log(logger, capsys, is_color=True)
assert getattr(logger, "debuglog")
def test_output(self, capsys):
from delogger.presets.output import logger
self.execute_log(logger)
assert getattr(logger, "debuglog")
assert Path(self.OUTPUT_DIRPATH).is_dir()
def test_output_env(self, capsys):
urlopen_mock = UrlopenMock()
filepath = f"{self.OUTPUT_DIRPATH}/test_output.log"
os.environ["DELOGGER_NAME"] = "test_output_env"
os.environ["DELOGGER_FILEPATH"] = filepath
os.environ["DELOGGER_SLACK_WEBHOOK"] = "http://slack_webhook"
from delogger.presets.output import OutputPresets
logger = OutputPresets("no_name").get_logger()
del os.environ["DELOGGER_NAME"]
del os.environ["DELOGGER_FILEPATH"]
del os.environ["DELOGGER_SLACK_WEBHOOK"]
self.execute_log(logger)
assert getattr(logger, "debuglog")
assert Path(filepath).is_file()
assert urlopen_mock.call_count == 4
def test_profiler(self):
from delogger.presets.profiler import logger
self.execute_log(logger)
assert getattr(logger, "debuglog")
assert getattr(logger, "line_profile")
assert getattr(logger, "add_line_profile")
assert getattr(logger, "memory_profile")
assert getattr(logger, "line_memory_profile")
|
python
|
"""Top-level package for the 'dltf' framework.
Running ``import dltf`` will recursively import all important subpackages and modules.
"""
import logging
import dogs_vs_cats.src.inception_resnet_v2
logger = logging.getLogger("dogs_vs_cats")
__url__ = "https://github.com/ShiNik/DeepLearning_Tensorflow"
__version__ = "0.1.0"
|
python
|
"""Class for symbolic expression object or program."""
import array
import os
import warnings
from textwrap import indent
import numpy as np
from sympy.parsing.sympy_parser import parse_expr
from sympy import pretty
from dsr.functions import PlaceholderConstant
from dsr.const import make_const_optimizer
from dsr.utils import cached_property
import dsr.utils as U
def _finish_tokens(tokens):
"""
Complete the pre-order traversal.
Parameters
----------
tokens : list of integers
A list of integers corresponding to tokens in the library. The list
defines an expression's pre-order traversal.
Returns
_______
tokens : list of integers
A list of integers corresponding to tokens in the library. The list
defines an expression's pre-order traversal. "Dangling" programs are
completed with repeated "x1" until the expression completes.
"""
arities = np.array([Program.library.arities[t] for t in tokens])
dangling = 1 + np.cumsum(arities - 1)
if 0 in dangling:
expr_length = 1 + np.argmax(dangling == 0)
tokens = tokens[:expr_length]
else:
# Extend with first variable until complete
tokens = np.append(tokens, np.random.choice(Program.library.input_tokens, size=dangling[-1]))
return tokens
def from_str_tokens(str_tokens, optimize, skip_cache=False):
"""
Memoized function to generate a Program from a list of str and/or float.
See from_tokens() for details.
Parameters
----------
str_tokens : str | list of (str | float)
Either a comma-separated string of tokens and/or floats, or a list of
str and/or floats.
optimize : bool
See from_tokens().
skip_cache : bool
See from_tokens().
Returns
-------
program : Program
See from_tokens().
"""
# Convert str to list of str
if isinstance(str_tokens, str):
str_tokens = str_tokens.split(",")
# Convert list of str|float to list of tokens
if isinstance(str_tokens, list):
traversal = []
constants = []
for s in str_tokens:
if s in Program.library.names:
t = Program.library.names.index(s.lower())
elif U.is_float(s):
assert "const" not in str_tokens, "Currently does not support both placeholder and hard-coded constants."
assert not optimize, "Currently does not support optimization with hard-coded constants."
t = Program.library.const_token
constants.append(float(s))
else:
raise ValueError("Did not recognize token {}.".format(s))
traversal.append(t)
traversal = np.array(traversal, dtype=np.int32)
else:
raise ValueError("Input must be list or string.")
# Generate base Program (with "const" for constants)
p = from_tokens(traversal, optimize=optimize, skip_cache=skip_cache)
# Replace any constants
p.set_constants(constants)
return p
def from_tokens(tokens, optimize, skip_cache=False):
"""
Memoized function to generate a Program from a list of tokens.
Since some tokens are nonfunctional, this first computes the corresponding
traversal. If that traversal exists in the cache, the corresponding Program
is returned. Otherwise, a new Program is returned.
Parameters
----------
tokens : list of integers
A list of integers corresponding to tokens in the library. The list
defines an expression's pre-order traversal. "Dangling" programs are
completed with repeated "x1" until the expression completes.
optimize : bool
Whether to optimize the program before returning it.
skip_cache : bool
Whether to bypass the cache when creating the program.
Returns
_______
program : Program
The Program corresponding to the tokens, either pulled from memoization
or generated from scratch.
"""
'''
Truncate expressions that complete early; extend ones that don't complete
'''
tokens = _finish_tokens(tokens)
# For stochastic Tasks, there is no cache; always generate a new Program.
# For deterministic Programs, if the Program is in the cache, return it;
# otherwise, create a new one and add it to the cache.
if skip_cache:
p = Program(tokens, optimize=optimize)
elif Program.task.stochastic:
p = Program(tokens, optimize=optimize)
else:
key = tokens.tostring()
if key in Program.cache:
p = Program.cache[key]
p.count += 1
else:
p = Program(tokens, optimize=optimize)
Program.cache[key] = p
return p
class Program(object):
"""
The executable program representing the symbolic expression.
The program comprises unary/binary operators, constant placeholders
(to-be-optimized), input variables, and hard-coded constants.
Parameters
----------
tokens : list of integers
A list of integers corresponding to tokens in the library. "Dangling"
programs are completed with repeated "x1" until the expression
completes.
optimize : bool
Whether to optimize the program upon initializing it.
Attributes
----------
traversal : list
List of operators (type: Function) and terminals (type: int, float, or
str ("const")) encoding the pre-order traversal of the expression tree.
tokens : np.ndarry (dtype: int)
Array of integers whose values correspond to indices
const_pos : list of int
A list of indicies of constant placeholders along the traversal.
float_pos : list of float
A list of indices of constants placeholders or floating-point constants
along the traversal.
sympy_expr : str
The (lazily calculated) SymPy expression corresponding to the program.
Used for pretty printing _only_.
base_r : float
The base reward (reward without penalty) of the program on the training
data.
complexity : float
The (lazily calcualted) complexity of the program.
r : float
The (lazily calculated) reward of the program on the training data.
count : int
The number of times this Program has been sampled.
str : str
String representation of tokens. Useful as unique identifier.
"""
# Static variables
task = None # Task
library = None # Library
const_optimizer = None # Function to optimize constants
cache = {}
# Cython-related static variables
have_cython = None # Do we have cython installed
execute = None # Link to execute. Either cython or python
cyfunc = None # Link to cyfunc lib since we do an include inline
def __init__(self, tokens, optimize):
"""
Builds the Program from a list of Tokens, optimizes the Constants
against reward function, and evalutes the reward.
"""
self.traversal = [Program.library[t] for t in tokens]
self.const_pos = [i for i, t in enumerate(tokens) if Program.library[t].name == "const"] # Just constant placeholder positions
self.len_traversal = len(self.traversal)
if self.have_cython and self.len_traversal > 1:
self.is_input_var = array.array('i', [t.input_var is not None for t in self.traversal])
self.invalid = False
self.str = tokens.tostring()
if optimize:
_ = self.optimize()
self.count = 1
def cython_execute(self, X):
"""Executes the program according to X using Cython.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
y_hats : array-like, shape = [n_samples]
The result of executing the program on X.
"""
if self.len_traversal > 1:
return self.cyfunc.execute(X, self.len_traversal, self.traversal, self.is_input_var)
else:
return self.python_execute(X)
def python_execute(self, X):
"""Executes the program according to X using Python.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
y_hats : array-like, shape = [n_samples]
The result of executing the program on X.
"""
# # Check for single-node programs
# node = self.traversal[0]
# if isinstance(node, float):
# return np.repeat(node, X.shape[0])
# if isinstance(node, int):
# return X[:, node]
apply_stack = []
for node in self.traversal:
apply_stack.append([node])
while len(apply_stack[-1]) == apply_stack[-1][0].arity + 1:
# Apply functions that have sufficient arguments
token = apply_stack[-1][0]
terminals = apply_stack[-1][1:]
# terminals = [np.repeat(t, X.shape[0]) if isinstance(t, float)
# else X[:, t] if isinstance(t, int)
# else t for t in apply_stack[-1][1:]]
if token.input_var is not None:
intermediate_result = X[:, token.input_var]
else:
intermediate_result = token(*terminals)
if len(apply_stack) != 1:
apply_stack.pop()
apply_stack[-1].append(intermediate_result)
else:
return intermediate_result
# We should never get here
assert False, "Function should never get here!"
return None
def optimize(self):
"""
Optimizes the constant tokens against the training data and returns the
optimized constants.
This function generates an objective function based on the training
dataset, reward function, and constant optimizer. It ignores penalties
because the Program structure is fixed, thus penalties are all the same.
It then optimizes the constants of the program and returns the optimized
constants.
Returns
_______
optimized_constants : vector
Array of optimized constants.
"""
# Create the objective function, which is a function of the constants being optimized
def f(consts):
self.set_constants(consts)
r = self.task.reward_function(self)
obj = -r # Constant optimizer minimizes the objective function
# Need to reset to False so that a single invalid call during
# constant optimization doesn't render the whole Program invalid.
self.invalid = False
return obj
assert self.execute is not None, "set_execute needs to be called first"
if len(self.const_pos) > 0:
# Do the optimization
x0 = np.ones(len(self.const_pos)) # Initial guess
optimized_constants = Program.const_optimizer(f, x0)
self.set_constants(optimized_constants)
else:
# No need to optimize if there are no constants
optimized_constants = []
return optimized_constants
def set_constants(self, consts):
"""Sets the program's constants to the given values"""
for i, const in enumerate(consts):
# Create a new instance of PlaceholderConstant instead of changing
# the "values" attribute, otherwise all Programs will have the same
# instance and just overwrite each other's value.
self.traversal[self.const_pos[i]] = PlaceholderConstant(const)
@classmethod
def clear_cache(cls):
"""Clears the class' cache"""
cls.cache = {}
@classmethod
def set_task(cls, task):
"""Sets the class' Task"""
Program.task = task
Program.library = task.library
@classmethod
def set_const_optimizer(cls, name, **kwargs):
"""Sets the class' constant optimizer"""
const_optimizer = make_const_optimizer(name, **kwargs)
Program.const_optimizer = const_optimizer
@classmethod
def set_complexity_penalty(cls, name, weight):
"""Sets the class' complexity penalty"""
all_functions = {
# No penalty
None : lambda p : 0.0,
# Length of tree
"length" : lambda p : len(p)
}
assert name in all_functions, "Unrecognzied complexity penalty name"
if weight == 0:
Program.complexity_penalty = lambda p : 0.0
else:
Program.complexity_penalty = lambda p : weight * all_functions[name](p)
@classmethod
def set_execute(cls, protected):
"""Sets which execute method to use"""
"""
If cython ran, we will have a 'c' file generated. The dynamic libary can be
given different names, so it's not reliable for testing if cython ran.
"""
cpath = os.path.join(os.path.dirname(__file__),'cyfunc.c')
if os.path.isfile(cpath):
from . import cyfunc
Program.cyfunc = cyfunc
execute_function = Program.cython_execute
Program.have_cython = True
else:
execute_function = Program.python_execute
Program.have_cython = False
if protected:
Program.execute = execute_function
else:
class InvalidLog():
"""Log class to catch and record numpy warning messages"""
def __init__(self):
self.error_type = None # One of ['divide', 'overflow', 'underflow', 'invalid']
self.error_node = None # E.g. 'exp', 'log', 'true_divide'
self.new_entry = False # Flag for whether a warning has been encountered during a call to Program.execute()
def write(self, message):
"""This is called by numpy when encountering a warning"""
if not self.new_entry: # Only record the first warning encounter
message = message.strip().split(' ')
self.error_type = message[1]
self.error_node = message[-1]
self.new_entry = True
def update(self, p):
"""If a floating-point error was encountered, set Program.invalid
to True and record the error type and error node."""
if self.new_entry:
p.invalid = True
p.error_type = self.error_type
p.error_node = self.error_node
self.new_entry = False
invalid_log = InvalidLog()
np.seterrcall(invalid_log) # Tells numpy to call InvalidLog.write() when encountering a warning
# Define closure for execute function
def unsafe_execute(p, X):
"""This is a wrapper for execute_function. If a floating-point error
would be hit, a warning is logged instead, p.invalid is set to True,
and the appropriate nan/inf value is returned. It's up to the task's
reward function to decide how to handle nans/infs."""
with np.errstate(all='log'):
y = execute_function(p, X)
invalid_log.update(p)
return y
Program.execute = unsafe_execute
@cached_property
def complexity(self):
"""Evaluates and returns the complexity of the program"""
return Program.complexity_penalty(self.traversal)
@cached_property
def base_r(self):
"""Evaluates and returns the base reward of the program on the training
set"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return self.task.reward_function(self)
@cached_property
def r(self):
"""Evaluates and returns the reward of the program on the training
set"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return self.base_r - self.complexity
@cached_property
def evaluate(self):
"""Evaluates and returns the evaluation metrics of the program."""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return self.task.evaluate(self)
@cached_property
def complexity_eureqa(self):
"""Computes sum of token complexity based on Eureqa complexity measures."""
complexity = sum([t.complexity for t in self.traversal])
return complexity
@cached_property
def sympy_expr(self):
"""
Returns the attribute self.sympy_expr.
This is actually a bit complicated because we have to go: traversal -->
tree --> serialized tree --> SymPy expression
"""
tree = self.traversal.copy()
tree = build_tree(tree)
tree = convert_to_sympy(tree)
try:
expr = parse_expr(tree.__repr__()) # SymPy expression
except:
expr = "N/A"
return expr
def pretty(self):
"""Returns pretty printed string of the program"""
return pretty(self.sympy_expr)
def print_stats(self):
"""Prints the statistics of the program"""
print("\tReward: {}".format(self.r))
print("\tBase reward: {}".format(self.base_r))
print("\tCount: {}".format(self.count))
print("\tInvalid: {}".format(self.invalid))
print("\tTraversal: {}".format(self))
print("\tExpression:")
print("{}\n".format(indent(self.pretty(), '\t ')))
def __repr__(self):
"""Prints the program's traversal"""
return ','.join([repr(t) for t in self.traversal])
###############################################################################
# Everything below this line is currently only being used for pretty printing #
###############################################################################
# Possible library elements that sympy capitalizes
capital = ["add", "mul", "pow"]
class Node(object):
"""Basic tree class supporting printing"""
def __init__(self, val):
self.val = val
self.children = []
def __repr__(self):
children_repr = ",".join(repr(child) for child in self.children)
if len(self.children) == 0:
return self.val # Avoids unnecessary parantheses, e.g. x1()
return "{}({})".format(self.val, children_repr)
def build_tree(traversal):
"""Recursively builds tree from pre-order traversal"""
op = traversal.pop(0)
n_children = op.arity
val = repr(op)
if val in capital:
val = val.capitalize()
node = Node(val)
for _ in range(n_children):
node.children.append(build_tree(traversal))
return node
def convert_to_sympy(node):
"""Adjusts trees to only use node values supported by sympy"""
if node.val == "div":
node.val = "Mul"
new_right = Node("Pow")
new_right.children.append(node.children[1])
new_right.children.append(Node("-1"))
node.children[1] = new_right
elif node.val == "sub":
node.val = "Add"
new_right = Node("Mul")
new_right.children.append(node.children[1])
new_right.children.append(Node("-1"))
node.children[1] = new_right
elif node.val == "inv":
node.val = Node("Pow")
node.children.append(Node("-1"))
elif node.val == "neg":
node.val = Node("Mul")
node.children.append(Node("-1"))
elif node.val == "n2":
node.val = "Pow"
node.children.append(Node("2"))
elif node.val == "n3":
node.val = "Pow"
node.children.append(Node("3"))
elif node.val == "n4":
node.val = "Pow"
node.children.append(Node("4"))
for child in node.children:
convert_to_sympy(child)
return node
|
python
|
import os.path
import sys
import warnings
from setuptools import find_packages, setup
if sys.version_info < (2, 7):
raise NotImplementedError(
"""\n
##############################################################
# globus-sdk does not support python versions older than 2.7 #
##############################################################"""
)
# warn on older/untested python3s
# it's not disallowed, but it could be an issue for some people
if sys.version_info > (3,) and sys.version_info < (3, 5):
warnings.warn(
"Installing globus-sdk on Python 3 versions older than 3.5 "
"may result in degraded functionality or even errors."
)
# single source of truth for package version
version_ns = {}
with open(os.path.join("globus_sdk", "version.py")) as f:
exec(f.read(), version_ns)
setup(
name="globus-sdk",
version=version_ns["__version__"],
description="Globus SDK for Python",
long_description=open("README.rst").read(),
author="Globus Team",
author_email="[email protected]",
url="https://github.com/globus/globus-sdk-python",
packages=find_packages(exclude=["tests", "tests.*"]),
install_requires=[
"requests>=2.9.2,<3.0.0",
"six>=1.10.0,<2.0.0",
"pyjwt[crypto]>=1.5.3,<2.0.0",
],
extras_require={
# empty extra included to support older installs
"jwt": [],
# the development extra is for SDK developers only
"development": [
# drive testing with tox
"tox>=3.5.3,<4.0",
# linting
"flake8>=3.0,<4.0",
'isort>=5.1.4,<6.0;python_version>="3.6"',
# black requires py3.6+
# refrain from using 19.10b0 or later until
# https://github.com/psf/black/issues/1288
# is fixed
'black==19.3b0;python_version>="3.6"',
# flake-bugbear requires py3.6+
'flake8-bugbear==20.1.4;python_version>="3.6"',
# testing
"pytest<5.0",
"pytest-cov<3.0",
"pytest-xdist<2.0",
# mock on py2, py3.4 and py3.5
# not just py2: py3 versions of mock don't all have the same
# interface!
'mock==2.0.0;python_version<"3.6"',
# mocking HTTP responses
"httpretty==0.9.5",
# builds + uploads to pypi
'twine==3.2.0;python_version>="3.6"',
'wheel==0.34.2;python_version>="3.6"',
# docs
'sphinx==3.1.2;python_version>="3.6"',
'sphinx-material==0.0.30;python_version>="3.6"',
],
},
include_package_data=True,
keywords=["globus", "file transfer"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Communications :: File Sharing",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
python
|
import json
from collections import defaultdict
from decimal import Decimal
import bleach
import dateutil.parser
import pytz
from django.dispatch import receiver
from django.urls import reverse
from django.utils.formats import date_format
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import pgettext_lazy, ugettext_lazy as _
from i18nfield.strings import LazyI18nString
from pretix.base.models import (
CheckinList, Event, ItemVariation, LogEntry, OrderPosition,
)
from pretix.base.signals import logentry_display
from pretix.base.templatetags.money import money_filter
OVERVIEW_BLACKLIST = [
'pretix.plugins.sendmail.order.email.sent'
]
def _display_order_changed(event: Event, logentry: LogEntry):
data = json.loads(logentry.data)
text = _('The order has been changed:')
if logentry.action_type == 'pretix.event.order.changed.item':
old_item = str(event.items.get(pk=data['old_item']))
if data['old_variation']:
old_item += ' - ' + str(ItemVariation.objects.get(item__event=event, pk=data['old_variation']))
new_item = str(event.items.get(pk=data['new_item']))
if data['new_variation']:
new_item += ' - ' + str(ItemVariation.objects.get(item__event=event, pk=data['new_variation']))
return text + ' ' + _('Position #{posid}: {old_item} ({old_price}) changed '
'to {new_item} ({new_price}).').format(
posid=data.get('positionid', '?'),
old_item=old_item, new_item=new_item,
old_price=money_filter(Decimal(data['old_price']), event.currency),
new_price=money_filter(Decimal(data['new_price']), event.currency),
)
elif logentry.action_type == 'pretix.event.order.changed.subevent':
old_se = str(event.subevents.get(pk=data['old_subevent']))
new_se = str(event.subevents.get(pk=data['new_subevent']))
return text + ' ' + _('Position #{posid}: Event date "{old_event}" ({old_price}) changed '
'to "{new_event}" ({new_price}).').format(
posid=data.get('positionid', '?'),
old_event=old_se, new_event=new_se,
old_price=money_filter(Decimal(data['old_price']), event.currency),
new_price=money_filter(Decimal(data['new_price']), event.currency),
)
elif logentry.action_type == 'pretix.event.order.changed.price':
return text + ' ' + _('Price of position #{posid} changed from {old_price} '
'to {new_price}.').format(
posid=data.get('positionid', '?'),
old_price=money_filter(Decimal(data['old_price']), event.currency),
new_price=money_filter(Decimal(data['new_price']), event.currency),
)
elif logentry.action_type == 'pretix.event.order.changed.cancel':
old_item = str(event.items.get(pk=data['old_item']))
if data['old_variation']:
old_item += ' - ' + str(ItemVariation.objects.get(pk=data['old_variation']))
return text + ' ' + _('Position #{posid} ({old_item}, {old_price}) canceled.').format(
posid=data.get('positionid', '?'),
old_item=old_item,
old_price=money_filter(Decimal(data['old_price']), event.currency),
)
elif logentry.action_type == 'pretix.event.order.changed.add':
item = str(event.items.get(pk=data['item']))
if data['variation']:
item += ' - ' + str(ItemVariation.objects.get(item__event=event, pk=data['variation']))
if data['addon_to']:
addon_to = OrderPosition.objects.get(order__event=event, pk=data['addon_to'])
return text + ' ' + _('Position #{posid} created: {item} ({price}) as an add-on to '
'position #{addon_to}.').format(
posid=data.get('positionid', '?'),
item=item, addon_to=addon_to.positionid,
price=money_filter(Decimal(data['price']), event.currency),
)
else:
return text + ' ' + _('Position #{posid} created: {item} ({price}).').format(
posid=data.get('positionid', '?'),
item=item,
price=money_filter(Decimal(data['price']), event.currency),
)
elif logentry.action_type == 'pretix.event.order.changed.secret':
return text + ' ' + _('A new secret has been generated for position #{posid}.').format(
posid=data.get('positionid', '?'),
)
elif logentry.action_type == 'pretix.event.order.changed.split':
old_item = str(event.items.get(pk=data['old_item']))
if data['old_variation']:
old_item += ' - ' + str(ItemVariation.objects.get(pk=data['old_variation']))
url = reverse('control:event.order', kwargs={
'event': event.slug,
'organizer': event.organizer.slug,
'code': data['new_order']
})
return mark_safe(escape(text) + ' ' + _('Position #{posid} ({old_item}, {old_price}) split into new order: {order}').format(
old_item=escape(old_item),
posid=data.get('positionid', '?'),
order='<a href="{}">{}</a>'.format(url, data['new_order']),
old_price=money_filter(Decimal(data['old_price']), event.currency),
))
elif logentry.action_type == 'pretix.event.order.changed.split_from':
return _('This order has been created by splitting the order {order}').format(
order=data['original_order'],
)
def _display_checkin(event, logentry):
data = logentry.parsed_data
show_dt = False
if 'datetime' in data:
dt = dateutil.parser.parse(data.get('datetime'))
show_dt = abs((logentry.datetime - dt).total_seconds()) > 60 or 'forced' in data
tz = pytz.timezone(event.settings.timezone)
dt_formatted = date_format(dt.astimezone(tz), "SHORT_DATETIME_FORMAT")
if 'list' in data:
try:
checkin_list = event.checkin_lists.get(pk=data.get('list')).name
except CheckinList.DoesNotExist:
checkin_list = _("(unknown)")
else:
checkin_list = _("(unknown)")
if data.get('first'):
if show_dt:
return _('Position #{posid} has been checked in at {datetime} for list "{list}".').format(
posid=data.get('positionid'),
datetime=dt_formatted,
list=checkin_list
)
else:
return _('Position #{posid} has been checked in for list "{list}".').format(
posid=data.get('positionid'),
list=checkin_list
)
else:
if data.get('forced'):
return _(
'A scan for position #{posid} at {datetime} for list "{list}" has been uploaded even though it has '
'been scanned already.'.format(
posid=data.get('positionid'),
datetime=dt_formatted,
list=checkin_list
)
)
return _(
'Position #{posid} has been scanned and rejected because it has already been scanned before '
'on list "{list}".'.format(
posid=data.get('positionid'),
list=checkin_list
)
)
@receiver(signal=logentry_display, dispatch_uid="pretixcontrol_logentry_display")
def pretixcontrol_logentry_display(sender: Event, logentry: LogEntry, **kwargs):
plains = {
'pretix.event.comment': _('The event\'s internal comment has been updated.'),
'pretix.event.order.modified': _('The order details have been changed.'),
'pretix.event.order.unpaid': _('The order has been marked as unpaid.'),
'pretix.event.order.secret.changed': _('The order\'s secret has been changed.'),
'pretix.event.order.expirychanged': _('The order\'s expiry date has been changed.'),
'pretix.event.order.expired': _('The order has been marked as expired.'),
'pretix.event.order.paid': _('The order has been marked as paid.'),
'pretix.event.order.refunded': _('The order has been refunded.'),
'pretix.event.order.canceled': _('The order has been canceled.'),
'pretix.event.order.deleted': _('The test mode order {code} has been deleted.'),
'pretix.event.order.placed': _('The order has been created.'),
'pretix.event.order.placed.require_approval': _('The order requires approval before it can continue to be processed.'),
'pretix.event.order.approved': _('The order has been approved.'),
'pretix.event.order.denied': _('The order has been denied.'),
'pretix.event.order.contact.changed': _('The email address has been changed from "{old_email}" '
'to "{new_email}".'),
'pretix.event.order.locale.changed': _('The order locale has been changed.'),
'pretix.event.order.invoice.generated': _('The invoice has been generated.'),
'pretix.event.order.invoice.regenerated': _('The invoice has been regenerated.'),
'pretix.event.order.invoice.reissued': _('The invoice has been reissued.'),
'pretix.event.order.comment': _('The order\'s internal comment has been updated.'),
'pretix.event.order.checkin_attention': _('The order\'s flag to require attention at check-in has been '
'toggled.'),
'pretix.event.order.payment.changed': _('A new payment {local_id} has been started instead of the previous one.'),
'pretix.event.order.email.sent': _('An unidentified type email has been sent.'),
'pretix.event.order.email.error': _('Sending of an email has failed.'),
'pretix.event.order.email.attachments.skipped': _('The email has been sent without attachments since they '
'would have been too large to be likely to arrive.'),
'pretix.event.order.email.custom_sent': _('A custom email has been sent.'),
'pretix.event.order.email.download_reminder_sent': _('An email has been sent with a reminder that the ticket '
'is available for download.'),
'pretix.event.order.email.expire_warning_sent': _('An email has been sent with a warning that the order is about '
'to expire.'),
'pretix.event.order.email.order_canceled': _('An email has been sent to notify the user that the order has been canceled.'),
'pretix.event.order.email.order_changed': _('An email has been sent to notify the user that the order has been changed.'),
'pretix.event.order.email.order_free': _('An email has been sent to notify the user that the order has been received.'),
'pretix.event.order.email.order_paid': _('An email has been sent to notify the user that payment has been received.'),
'pretix.event.order.email.order_denied': _('An email has been sent to notify the user that the order has been denied.'),
'pretix.event.order.email.order_approved': _('An email has been sent to notify the user that the order has '
'been approved.'),
'pretix.event.order.email.order_placed': _('An email has been sent to notify the user that the order has been received and requires payment.'),
'pretix.event.order.email.order_placed_require_approval': _('An email has been sent to notify the user that '
'the order has been received and requires '
'approval.'),
'pretix.event.order.email.resend': _('An email with a link to the order detail page has been resent to the user.'),
'pretix.event.order.payment.confirmed': _('Payment {local_id} has been confirmed.'),
'pretix.event.order.payment.canceled': _('Payment {local_id} has been canceled.'),
'pretix.event.order.payment.started': _('Payment {local_id} has been started.'),
'pretix.event.order.payment.failed': _('Payment {local_id} has failed.'),
'pretix.event.order.quotaexceeded': _('The order could not be marked as paid: {message}'),
'pretix.event.order.overpaid': _('The order has been overpaid.'),
'pretix.event.order.refund.created': _('Refund {local_id} has been created.'),
'pretix.event.order.refund.created.externally': _('Refund {local_id} has been created by an external entity.'),
'pretix.event.order.refund.requested': _('The customer requested you to issue a refund.'),
'pretix.event.order.refund.done': _('Refund {local_id} has been completed.'),
'pretix.event.order.refund.canceled': _('Refund {local_id} has been canceled.'),
'pretix.event.order.refund.failed': _('Refund {local_id} has failed.'),
'pretix.control.auth.user.created': _('The user has been created.'),
'pretix.user.settings.2fa.enabled': _('Two-factor authentication has been enabled.'),
'pretix.user.settings.2fa.disabled': _('Two-factor authentication has been disabled.'),
'pretix.user.settings.2fa.regenemergency': _('Your two-factor emergency codes have been regenerated.'),
'pretix.user.settings.2fa.device.added': _('A new two-factor authentication device "{name}" has been added to '
'your account.'),
'pretix.user.settings.2fa.device.deleted': _('The two-factor authentication device "{name}" has been removed '
'from your account.'),
'pretix.user.settings.notifications.enabled': _('Notifications have been enabled.'),
'pretix.user.settings.notifications.disabled': _('Notifications have been disabled.'),
'pretix.user.settings.notifications.changed': _('Your notification settings have been changed.'),
'pretix.user.anonymized': _('This user has been anonymized.'),
'pretix.user.oauth.authorized': _('The application "{application_name}" has been authorized to access your '
'account.'),
'pretix.control.auth.user.forgot_password.mail_sent': _('Password reset mail sent.'),
'pretix.control.auth.user.forgot_password.recovered': _('The password has been reset.'),
'pretix.organizer.deleted': _('The organizer "{name}" has been deleted.'),
'pretix.voucher.added': _('The voucher has been created.'),
'pretix.voucher.added.waitinglist': _('The voucher has been created and sent to a person on the waiting list.'),
'pretix.voucher.changed': _('The voucher has been changed.'),
'pretix.voucher.deleted': _('The voucher has been deleted.'),
'pretix.voucher.redeemed': _('The voucher has been redeemed in order {order_code}.'),
'pretix.event.item.added': _('The product has been created.'),
'pretix.event.item.changed': _('The product has been changed.'),
'pretix.event.item.deleted': _('The product has been deleted.'),
'pretix.event.item.variation.added': _('The variation "{value}" has been created.'),
'pretix.event.item.variation.deleted': _('The variation "{value}" has been deleted.'),
'pretix.event.item.variation.changed': _('The variation "{value}" has been changed.'),
'pretix.event.item.addons.added': _('An add-on has been added to this product.'),
'pretix.event.item.addons.removed': _('An add-on has been removed from this product.'),
'pretix.event.item.addons.changed': _('An add-on has been changed on this product.'),
'pretix.event.quota.added': _('The quota has been added.'),
'pretix.event.quota.deleted': _('The quota has been deleted.'),
'pretix.event.quota.changed': _('The quota has been changed.'),
'pretix.event.category.added': _('The category has been added.'),
'pretix.event.category.deleted': _('The category has been deleted.'),
'pretix.event.category.changed': _('The category has been changed.'),
'pretix.event.question.added': _('The question has been added.'),
'pretix.event.question.deleted': _('The question has been deleted.'),
'pretix.event.question.changed': _('The question has been changed.'),
'pretix.event.taxrule.added': _('The tax rule has been added.'),
'pretix.event.taxrule.deleted': _('The tax rule has been deleted.'),
'pretix.event.taxrule.changed': _('The tax rule has been changed.'),
'pretix.event.checkinlist.added': _('The check-in list has been added.'),
'pretix.event.checkinlist.deleted': _('The check-in list has been deleted.'),
'pretix.event.checkinlist.changed': _('The check-in list has been changed.'),
'pretix.event.settings': _('The event settings have been changed.'),
'pretix.event.tickets.settings': _('The ticket download settings have been changed.'),
'pretix.event.plugins.enabled': _('A plugin has been enabled.'),
'pretix.event.plugins.disabled': _('A plugin has been disabled.'),
'pretix.event.live.activated': _('The shop has been taken live.'),
'pretix.event.live.deactivated': _('The shop has been taken offline.'),
'pretix.event.testmode.activated': _('The shop has been taken into test mode.'),
'pretix.event.testmode.deactivated': _('The test mode has been disabled.'),
'pretix.event.added': _('The event has been created.'),
'pretix.event.changed': _('The event settings have been changed.'),
'pretix.event.question.option.added': _('An answer option has been added to the question.'),
'pretix.event.question.option.deleted': _('An answer option has been removed from the question.'),
'pretix.event.question.option.changed': _('An answer option has been changed.'),
'pretix.event.permissions.added': _('A user has been added to the event team.'),
'pretix.event.permissions.invited': _('A user has been invited to the event team.'),
'pretix.event.permissions.changed': _('A user\'s permissions have been changed.'),
'pretix.event.permissions.deleted': _('A user has been removed from the event team.'),
'pretix.waitinglist.voucher': _('A voucher has been sent to a person on the waiting list.'),
'pretix.event.orders.waitinglist.deleted': _('An entry has been removed from the waiting list.'),
'pretix.event.orders.waitinglist.changed': _('An entry has been changed on the waiting list.'),
'pretix.event.orders.waitinglist.added': _('An entry has been added to the waiting list.'),
'pretix.team.created': _('The team has been created.'),
'pretix.team.changed': _('The team settings have been changed.'),
'pretix.team.deleted': _('The team has been deleted.'),
'pretix.subevent.deleted': pgettext_lazy('subevent', 'The event date has been deleted.'),
'pretix.subevent.changed': pgettext_lazy('subevent', 'The event date has been changed.'),
'pretix.subevent.added': pgettext_lazy('subevent', 'The event date has been created.'),
'pretix.subevent.quota.added': pgettext_lazy('subevent', 'A quota has been added to the event date.'),
'pretix.subevent.quota.changed': pgettext_lazy('subevent', 'A quota has been changed on the event date.'),
'pretix.subevent.quota.deleted': pgettext_lazy('subevent', 'A quota has been removed from the event date.'),
'pretix.device.created': _('The device has been created.'),
'pretix.device.changed': _('The device has been changed.'),
'pretix.device.revoked': _('Access of the device has been revoked.'),
'pretix.device.initialized': _('The device has been initialized.'),
'pretix.device.keyroll': _('The access token of the device has been regenerated.'),
'pretix.device.updated': _('The device has notified the server of an hardware or software update.'),
}
data = json.loads(logentry.data)
if logentry.action_type.startswith('pretix.event.item.variation'):
if 'value' not in data:
# Backwards compatibility
var = ItemVariation.objects.filter(id=data['id']).first()
if var:
data['value'] = str(var.value)
else:
data['value'] = '?'
else:
data['value'] = LazyI18nString(data['value'])
if logentry.action_type in plains:
data = defaultdict(lambda: '?', data)
return plains[logentry.action_type].format_map(data)
if logentry.action_type.startswith('pretix.event.order.changed'):
return _display_order_changed(sender, logentry)
if logentry.action_type.startswith('pretix.event.payment.provider.'):
return _('The settings of a payment provider have been changed.')
if logentry.action_type.startswith('pretix.event.tickets.provider.'):
return _('The settings of a ticket output provider have been changed.')
if logentry.action_type == 'pretix.event.order.consent':
return _('The user confirmed the following message: "{}"').format(
bleach.clean(logentry.parsed_data.get('msg'), tags=[], strip=True)
)
if logentry.action_type == 'pretix.event.checkin':
return _display_checkin(sender, logentry)
if logentry.action_type == 'pretix.control.views.checkin':
# deprecated
dt = dateutil.parser.parse(data.get('datetime'))
tz = pytz.timezone(sender.settings.timezone)
dt_formatted = date_format(dt.astimezone(tz), "SHORT_DATETIME_FORMAT")
if 'list' in data:
try:
checkin_list = sender.checkin_lists.get(pk=data.get('list')).name
except CheckinList.DoesNotExist:
checkin_list = _("(unknown)")
else:
checkin_list = _("(unknown)")
if data.get('first'):
return _('Position #{posid} has been checked in manually at {datetime} on list "{list}".').format(
posid=data.get('positionid'),
datetime=dt_formatted,
list=checkin_list,
)
return _('Position #{posid} has been checked in again at {datetime} on list "{list}".').format(
posid=data.get('positionid'),
datetime=dt_formatted,
list=checkin_list
)
if logentry.action_type in ('pretix.control.views.checkin.reverted', 'pretix.event.checkin.reverted'):
if 'list' in data:
try:
checkin_list = sender.checkin_lists.get(pk=data.get('list')).name
except CheckinList.DoesNotExist:
checkin_list = _("(unknown)")
else:
checkin_list = _("(unknown)")
return _('The check-in of position #{posid} on list "{list}" has been reverted.').format(
posid=data.get('positionid'),
list=checkin_list,
)
if logentry.action_type == 'pretix.team.member.added':
return _('{user} has been added to the team.').format(user=data.get('email'))
if logentry.action_type == 'pretix.team.member.removed':
return _('{user} has been removed from the team.').format(user=data.get('email'))
if logentry.action_type == 'pretix.team.member.joined':
return _('{user} has joined the team using the invite sent to {email}.').format(
user=data.get('email'), email=data.get('invite_email')
)
if logentry.action_type == 'pretix.team.invite.created':
return _('{user} has been invited to the team.').format(user=data.get('email'))
if logentry.action_type == 'pretix.team.invite.resent':
return _('Invite for {user} has been resent.').format(user=data.get('email'))
if logentry.action_type == 'pretix.team.invite.deleted':
return _('The invite for {user} has been revoked.').format(user=data.get('email'))
if logentry.action_type == 'pretix.team.token.created':
return _('The token "{name}" has been created.').format(name=data.get('name'))
if logentry.action_type == 'pretix.team.token.deleted':
return _('The token "{name}" has been revoked.').format(name=data.get('name'))
if logentry.action_type == 'pretix.user.settings.changed':
text = str(_('Your account settings have been changed.'))
if 'email' in data:
text = text + ' ' + str(_('Your email address has been changed to {email}.').format(email=data['email']))
if 'new_pw' in data:
text = text + ' ' + str(_('Your password has been changed.'))
if data.get('is_active') is True:
text = text + ' ' + str(_('Your account has been enabled.'))
elif data.get('is_active') is False:
text = text + ' ' + str(_('Your account has been disabled.'))
return text
if logentry.action_type == 'pretix.control.auth.user.impersonated':
return str(_('You impersonated {}.')).format(data['other_email'])
if logentry.action_type == 'pretix.control.auth.user.impersonate_stopped':
return str(_('You stopped impersonating {}.')).format(data['other_email'])
|
python
|
import mcradar as mcr
import xarray as xr
import numpy as np
import pandas as pd
import os
from IPython.core.debugger import Tracer ; debug=Tracer() #insert this line somewhere to debug
def getApectRatio(radii):
# imput radii [mm]
# auer et all 1970 (The Dimension of Ice Crystals in Natural Clouds)
diameter = 2 * radii *1e3 # calculating the diameter in [mu m]
h = 2.020 * (diameter)**0.449
as_ratio = h / diameter
return as_ratio
#reading the data file
dataPath = "data"
fileName = "mass2fr_0300-0600min_avtstep_5.ncdf"
filePath = os.path.join(dataPath, fileName)
data = xr.open_dataset(filePath)
#fake time
time = np.ones_like(data.dim_SP_all_av150)
#calculating the aspec ratio
sPhi = np.ones_like(data.dim_SP_all_av150)*np.nan
sPhi = getApectRatio(data.diam * 1e3)
sPhi[data.mm.values > 1]=0.6
#converting to pandas dataframe
dataTable = data.to_dataframe()
dataTable = dataTable.rename(columns={'m_tot':'mTot', 'height':'sHeight',
'vt':'vel', 'diam':'dia','xi':'sMult'})
#settings
dicSettings = mcr.loadSettings(dataPath='_', freq=np.array([9.6e9]),
maxHeight=3000, minHeight=2500,
heightRes=5)
#adding required variables
dataTable['radii'] = dataTable['dia'] / 2.# particle radius in m
dataTable['time']=time
PSD_method="bin" #"bin": count SP and their multiplicity in height and size bins; "1D_KDE": #DOES NOT WORK YET!! 1-dimensional kernel density estimate, "discrete_SP": calculate scattering properties of each SP individually
if PSD_method in ["bin","1D_KDE"]:
#some definitions
nbins = 100 #number of used bins
n_heights = 50
model_top = 3850 #[m] #TODO: read this from output
minR =-4 #minimum R considered (log10-space)
maxR = 0 #maximum R considered (log10-space)
area_box = 5 #[m2] #TODO: read this from output
Rgrid=np.logspace(minR,maxR,nbins)
Rgrid_log=np.linspace(minR,maxR,nbins)
Rgrid_logdiff=Rgrid_log[1]-Rgrid_log[0]
heightvec_bound = np.linspace(0,model_top,n_heights)
#heightvec_bound = np.linspace(2900,3000,5) #TODO: remove (only for debugging)
Vbox = area_box*heightvec_bound[1]-heightvec_bound[0] #[m3]
reducedDataTable = pd.DataFrame()
for i_height in range(len(heightvec_bound)-1):
print("calculate h=",heightvec_bound[i_height])
#initialize as many dataFrame as categories
#one category must have the same particle properties (mass, velocity) at the same size
dataBINmono = pd.DataFrame(data={"Rgrid": Rgrid}) #initialize dataFrame
dataBINagg = pd.DataFrame(data={"Rgrid": Rgrid}) #initialize dataFrame
#select subset of particles in a given height range
condition_in_height = np.logical_and(heightvec_bound[i_height]<dataTable["sHeight"],heightvec_bound[i_height+1]>dataTable["sHeight"])
#select monomers and aggregates
cond_mono=np.logical_and(dataTable["mm"]==1, condition_in_height) #monomers
cond_agg=np.logical_and(dataTable["mm"]>1, condition_in_height) #aggregates
datamono = dataTable[cond_mono]
dataagg = dataTable[cond_agg]
for key in ["sMult","vel","mTot"]:
dataBINmono[key] = np.zeros_like(Rgrid)
dataBINagg[key] = np.zeros_like(Rgrid)
for i_rad,rad in enumerate(Rgrid[:-1]):
inbinmono = np.logical_and(Rgrid[i_rad]<datamono["radii"],Rgrid[i_rad+1]>datamono["radii"])
inbinagg = np.logical_and(Rgrid[i_rad]<dataagg["radii"],Rgrid[i_rad+1]>dataagg["radii"])
if sum(inbinmono)>0:
for var_key in ["mTot","vel"]:
dataBINmono[var_key][i_rad] = datamono[inbinmono].iloc[0][var_key] #mass in grams #TODO: calculate m (either by picking a particle from inside the bin or from the m-D relation or build an average of the particle)
if sum(inbinagg)>0:
for var_key in ["mTot","vel"]:
dataBINagg[var_key][i_rad] = dataagg[inbinagg].iloc[0][var_key] #mass in grams #TODO: calculate m (either by picking a particle from inside the bin or from the m-D relation or build an average of the particle)
if PSD_method=="bin":
for i_rad,rad in enumerate(Rgrid[:-1]):
inbinmono = np.logical_and(Rgrid[i_rad]<datamono["radii"],Rgrid[i_rad+1]>datamono["radii"])
inbinagg = np.logical_and(Rgrid[i_rad]<dataagg["radii"],Rgrid[i_rad+1]>dataagg["radii"])
if sum(inbinmono)>0:
dataBINmono["sMult"][i_rad] = np.nansum(datamono[inbinmono]["sMult"])
if sum(inbinagg)>0:
dataBINagg["sMult"][i_rad] = np.nansum(dataagg[inbinagg]["sMult"])
#print(i_rad,dataBINmono["sMult"][i_rad],dataBINagg["sMult"][i_rad],dataBINagg["mTot"][i_rad],dataBINagg["vel"][i_rad])
elif PSD_method=="1D_KDE": #does not work yet!!
#calculating number density [#/m3]
#MONOMERS
dataBINmono["sMult"] = mcr.tableOperator.kernel_estimate(dataTable["radii"][cond_mono],np.log(Rgrid),weight=dataTable["sMult"][cond_mono],sigma0=0.001)*Rgrid_logdiff #/Vbox #TODO:
#AGGREGATES
dataBINagg["sMult"] = mcr.tableOperator.kernel_estimate(dataTable["radii"][cond_agg],np.log(Rgrid),weight=dataTable["sMult"][cond_agg])*Rgrid_logdiff/Vbox
#for i_rad,Mult in enumerate(dataBINagg["sMult"]):
# print(i_rad,dataBINmono["sMult"][i_rad],dataBINagg["sMult"][i_rad],dataBINagg["mTot"][i_rad],dataBINagg["vel"][i_rad])
#some general properties and conversions which are independent of the actual SP-list
dataBINmono['radii_mm'] = dataBINmono['Rgrid'] * 1e3 # particle radius in mm
dataBINagg['radii_mm'] = dataBINagg['Rgrid'] * 1e3 # particle radius in mm
dataBINmono['sPhi'] = getApectRatio(dataBINmono.radii_mm)
dataBINagg['sPhi'] = 0.6
for df in [dataBINmono,dataBINagg]:
df['dia_cm'] = df['Rgrid'] * 1e2*2 # particle radius in mm
df['time']=np.ones_like(df.radii_mm)
df['sHeight'] = (heightvec_bound[i_height+1]+heightvec_bound[i_height+1])/2
df['mTot_g'] = dataTable['mTot'] * 1e3 # mass in grams
#calculating density
df = mcr.tableOperator.calcRho(df)
df = df[(df['sPhi'] >= 0.015)] #TODO: this kills everything larger than 3.8mm
reducedDataTable = pd.concat([reducedDataTable, df])
reducedDataTable = reducedDataTable[(reducedDataTable['sMult']>1.0)]
print(reducedDataTable)
print("?")
#starting the simulation
output = mcr.fullRadar(dicSettings, reducedDataTable)
print(output)
elif PSD_method=="discrete_SP":
#adding required variables
dataTable['radii_mm'] = dataTable['dia'] * 1e3 /2.# particle radius in mm
dataTable['mTot_g'] = dataTable['mTot'] * 1e3 # mass in grams
dataTable['dia_cm'] = dataTable['dia'] * 1e2 # diameter in centimeters
dataTable['sPhi']=sPhi
dataTable = dataTable[(dataTable['sPhi'] >= 0.015)]
# dataTable['sMult']=1 #(it deactivates the multiplicity)
#calculating density
dataTable = mcr.tableOperator.calcRho(dataTable)
#settings
dicSettings = mcr.loadSettings(dataPath='_', freq=np.array([9.6e9]),
maxHeight=3000, minHeight=2500,
heightRes=5)
#starting the simulation
output = mcr.fullRadar(dicSettings, dataTable)
#saving the data
#output.to_netcdf('comp_smult1.nc')
output.to_netcdf('comp.nc')
debug()
|
python
|
import numpy as np
"""
v_l object. c*r^{n-2}*exp{-e*r^2}
"""
class rnExp:
def __init__(self, n, e, c):
self.n = np.asarray(n)
self.e = np.asarray(e)
self.c = np.asarray(c)
def __call__(self, r):
return np.sum(
r[:, np.newaxis] ** self.n
* self.c
* np.exp(-self.e * r[:, np.newaxis] ** 2),
axis=1,
)
def generate_ecp_functors(coeffs):
"""
Returns a functor, with keys as the angular momenta:
-1 stands for the nonlocal part, 0,1,2,... are the s,p,d channels, etc.
Parameters:
mol._ecp[atom_name][1] (coefficients of the ECP)
Returns:
v_l function, with key = angular momentum
"""
d = {}
for c in coeffs:
el = c[0]
rn = []
exponent = []
coefficient = []
for n, expand in enumerate(c[1]):
# print("r",n-2,"coeff",expand)
for line in expand:
rn.append(n - 2)
exponent.append(line[0])
coefficient.append(line[1])
d[el] = rnExp(rn, exponent, coefficient)
return d
#########################################################################
def P_l(x, l):
"""
Legendre functions,
returns a nconf x naip array for a given l, x=r_ea(i)
Parameters:
x: nconf array, l: integer
Returns:
P_l values: nconf x naip array
"""
if l == 0:
return np.ones(x.shape)
elif l == 1:
return x
elif l == 2:
return 0.5 * (3 * x * x - np.ones(x.shape))
elif l == 3:
return 0.5 * (5 * x * x * x - 3 * x)
elif l == 4:
return 0.125 * (35 * x * x * x * x - 30 * x * x + 3 * np.ones(x.shape))
else:
return np.zeros(x.shape)
def get_r_ea(mol, configs, e, at):
"""
Returns a nconf x 3 array, distances between electron e and atom at
Parameters:
e,at: integers, eletron and atom indices
configs: nconf x nelec x 3 array
Returns:
epos-apos, electron-atom distances
"""
epos = configs[:, e, :]
nconf = configs.shape[0]
apos = np.outer(
np.ones(nconf), np.array(mol._atom[at][1])
) # nconf x 3 array, position of atom at
return epos - apos
def get_r_ea_i(mol, epos_rot, e, at):
"""
Returns a nconf x naip x 3 array, distances between the rotated electron (e) and the atom at
Parameters:
epos_rot: rotated positions of electron e, nconf x naip x 3
Returns:
epos_rot-apos, (rotated) electron-atom distances
"""
nconf, naip = epos_rot.shape[0:2]
apos = np.zeros(
[nconf, naip, 3]
) # position of the atom, broadcasted into nconf x naip x 3
for aip in range(naip):
apos[:, aip, :] = np.outer(np.ones(nconf), np.array(mol._atom[at][1]))
return epos_rot - apos
def get_v_l(mol, configs, e, at):
"""
Returns list of the l's, and a nconf x nl array, v_l values for each l: l= 0,1,2,...,-1
"""
nconf = configs.shape[0]
at_name = mol._atom[at][0]
r_ea = np.linalg.norm(get_r_ea(mol, configs, e, at), axis=1)
vl = generate_ecp_functors(mol._ecp[at_name][1])
Lmax = len(vl)
v_l = np.zeros([nconf, Lmax])
for l in vl.keys(): # -1,0,1,...
v_l[:, l] = vl[l](r_ea)
return vl.keys(), v_l
def get_wf_ratio(wf, epos_rot, e):
"""
Returns a nconf x naip array, which is the Psi(r_e(i))/Psi(r_e) values
"""
nconf, naip = epos_rot.shape[0:2]
wf_ratio = np.zeros([nconf, naip])
for aip in range(naip):
wf_ratio[:, aip] = wf.testvalue(e, epos_rot[:, aip, :])
return wf_ratio
def get_P_l(mol, configs, weights, epos_rot, l_list, e, at):
"""
Returns a nconf x naip x nl array, which is the legendre function values for each l channel.
The factor (2l+1) and the quadrature weights are included.
Parameters:
l_list: [-1,0,1,...] list of given angular momenta
weights: integration weights
Return:
P_l values: nconf x naip x nl array
"""
# at_name = mol._atom[at][0]
nconf, naip = epos_rot.shape[0:2]
P_l_val = np.zeros([nconf, naip, len(l_list)])
r_ea = get_r_ea(mol, configs, e, at) # nconf x 3
r_ea_i = get_r_ea_i(mol, epos_rot, e, at) # nconf x naip x 3
rdotR = np.zeros(r_ea_i.shape[0:2]) # nconf x naip
# get the cosine values
for aip in range(naip):
rdotR[:, aip] = (
r_ea[:, 0] * r_ea_i[:, aip, 0]
+ r_ea[:, 1] * r_ea_i[:, aip, 1]
+ r_ea[:, 2] * r_ea_i[:, aip, 2]
)
rdotR[:, aip] /= np.linalg.norm(r_ea, axis=1) * np.linalg.norm(
r_ea_i[:, aip, :], axis=1
)
# print('cosine values',rdotR)
# already included the factor (2l+1), and the integration weights here
for l in l_list:
P_l_val[:, :, l] = (
(2 * l + 1) * P_l(rdotR, l) * np.outer(np.ones(nconf), weights)
)
return P_l_val
#########################################################################
def ecp_ea(mol, configs, wf, e, at):
"""
Returns the ECP value between electron e and atom at, local+nonlocal.
"""
l_list, v_l = get_v_l(mol, configs, e, at)
naip = 6
if len(l_list) > 2:
naip = 12
weights, epos_rot = get_rot(mol, configs, e, at, naip)
P_l = get_P_l(mol, configs, weights, epos_rot, l_list, e, at)
ratio = get_wf_ratio(wf, epos_rot, e)
ecp_val = np.einsum("ij,ik,ijk->i", ratio, v_l, P_l)
# compute the local part
local_l = -1
ecp_val += v_l[:, local_l]
return ecp_val
def ecp(mol, configs, wf):
"""
Returns the ECP value, summed over all the electrons and atoms.
"""
nconf, nelec = configs.shape[0:2]
ecp_tot = np.zeros(nconf)
if mol._ecp != {}:
for e in range(nelec):
for at in range(len(mol._atom)):
ecp_tot += ecp_ea(mol, configs, wf, e, at)
return ecp_tot
#################### Quadrature Rules ############################
def get_rot(mol, configs, e, at, naip):
"""
Returns the integration weights (naip), and the positions of the rotated electron e (nconf x naip x 3)
Parameters:
configs[:,e,:]: epos of the electron e to be rotated
Returns:
weights: naip array
epos_rot: positions of the rotated electron, nconf x naip x 3
"""
nconf = configs.shape[0]
apos = np.outer(np.ones(nconf), np.array(mol._atom[at][1]))
r_ea_vec = get_r_ea(mol, configs, e, at)
r_ea = np.linalg.norm(r_ea_vec, axis=1)
# t and p are sampled randomly over a sphere around the atom
t = np.random.uniform(low=0.0, high=np.pi, size=nconf)
p = np.random.uniform(low=0.0, high=2 * np.pi, size=nconf)
# rotated unit vectors:
i_rot, j_rot, k_rot = (
np.zeros([nconf, 3]),
np.zeros([nconf, 3]),
np.zeros([nconf, 3]),
)
i_rot[:, 0] = np.cos(p - np.pi / 2.0)
i_rot[:, 1] = np.sin(p - np.pi / 2.0)
j_rot[:, 0] = np.sin(t + np.pi / 2.0) * np.cos(p)
j_rot[:, 1] = np.sin(t + np.pi / 2.0) * np.sin(p)
j_rot[:, 2] = np.cos(t + np.pi / 2.0)
k_rot[:, 0] = np.sin(t) * np.cos(p)
k_rot[:, 1] = np.sin(t) * np.sin(p)
k_rot[:, 2] = np.cos(t)
d1, d2 = np.zeros(naip), np.zeros(naip)
if naip == 6:
d1[1] = np.pi
d1[2] = np.pi / 2.0
d1[3] = np.pi / 2.0
d2[3] = np.pi
d1[4] = np.pi / 2.0
d2[4] = np.pi / 2.0
d1[5] = np.pi / 2.0
d2[5] = 3.0 * np.pi / 2.0
elif naip == 12:
d1[1] = np.pi
fi0 = np.pi / 5.0
tha = np.arccos(1.0 / np.sqrt(5.0))
for i in range(5):
rk2 = 2 * i
d1[i + 2] = tha
d2[i + 2] = rk2 * fi0
d1[i + 7] = np.pi - tha
d2[i + 7] = (rk2 + 1) * fi0
epos_rot = np.zeros([nconf, naip, 3])
for aip in range(naip):
for d in range(3):
epos_rot[:, aip, d] = apos[:, d] + r_ea * (
np.sin(d1[aip]) * np.cos(d2[aip]) * i_rot[:, d]
+ np.sin(d1[aip]) * np.sin(d2[aip]) * j_rot[:, d]
+ np.cos(d1[aip]) * k_rot[:, d]
)
weights = 1.0 / naip * np.ones(naip)
return weights, epos_rot
|
python
|
#!/usr/bin/python3.8
import os
import requests
import argparse
import queue
import threading
import logging
DOWNLOAD_THREADS = 6
IO_THREADS = 2
download_queue = queue.Queue()
write_queue = queue.Queue()
files_lock = threading.Lock()
def download_worker(download_queue, write_queue, url, chunk_size):
finished = False
logging.info("Download thread started")
while not finished:
chunk_offset = download_queue.get()
if chunk_offset is not None:
chunk_end = chunk_offset + chunk_size - 1
logging.debug(f"GET: {url} - {chunk_offset}-{chunk_end}")
resume_headers = {}
if chunk_size != 0:
resume_headers = {"Range":f"bytes={chunk_offset}-{chunk_end}"}
r = requests.get(url, stream=True, headers=resume_headers)
chunk = r.content
write_queue.put((chunk_offset, chunk_size, chunk))
else:
finished = True
download_queue.task_done()
def io_worker(write_queue, file_size, file_chunk_size, files):
finished = False
while not finished:
task = write_queue.get()
if task is not None:
chunk_offset, chunk_size, chunk = task
file_start = file_chunk_size * (chunk_offset // file_chunk_size) # Calculate which file the received chunk belongs to
file_offset = chunk_offset - file_start # Calculate where in the file to put the chunk
filename = f"{file_start}.dat"
logging.debug(f"File: {filename}. {chunk_offset} -> {file_start}+{file_offset}")
if file_offset + chunk_size > file_chunk_size: # If not all of the chunk fits in current file, put rest back for later
overflow = file_offset + chunk_size - file_chunk_size
new_offset = chunk_offset + file_chunk_size - file_offset
logging.debug(f"File: {filename}. Splitting {overflow} bytes to next file at {new_offset} ")
write_queue.put((new_offset, overflow, chunk[-overflow:]))
chunk = chunk[:-overflow]
chunk_size = file_chunk_size - file_offset
with files_lock:
with files[file_start]:
if os.path.isfile(filename):
with open(filename, "r+b") as f:
f.seek(file_offset)
f.write(chunk)
else:
with open(filename, "wb") as f:
logging.debug(f"Starting new file {filename}")
if file_size < file_start + file_chunk_size:
newfile = file_size - file_start
else:
newfile = file_chunk_size
f.truncate(newfile)
f.seek(file_offset)
f.write(chunk)
else:
finished = True
write_queue.task_done()
def main(url, http_chunk, file_chunk):
logging.basicConfig(level=logging.DEBUG, format='%(relativeCreated)6d %(threadName)s %(message)s')
r = requests.head(url, allow_redirects=True)
if not r.ok:
logging.error(f"Server returned {r.status_code}, aborting")
return
size = int(r.headers.get("Content-Length", -1))
ranges = r.headers.get("Accept-Ranges","none")
if ranges.lower() == "none":
logging.error("Remote server doesn't support download ranges, aborting")
return
http_chunks = [i * http_chunk for i in range( 1 + size // http_chunk)]
file_chunks = {i * file_chunk : threading.Lock() for i in range( 1 + size // file_chunk)}
logging.debug(f"{size}: {http_chunks}")
for chunk_start in http_chunks:
download_queue.put(chunk_start)
download_workers = []
io_workers = []
for thread_no in range(DOWNLOAD_THREADS):
t = threading.Thread(target=download_worker, kwargs={ "download_queue": download_queue,
"write_queue": write_queue,
"url": url,
"chunk_size": http_chunk})
t.start()
download_workers.append(t)
download_queue.put(None)
for thread_no in range(IO_THREADS):
t = threading.Thread(target=io_worker, kwargs={ "write_queue": write_queue,
"file_size": size,
"file_chunk_size": file_chunk,
"files": file_chunks},
daemon=True)
t.start()
io_workers.append(t)
download_queue.join()
write_queue.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("url", help="URL to download")
parser.add_argument("http_chunk", help="Size (in bytes) of download chunk", type=int)
parser.add_argument("file_chunk", help="Size (in bytes) of output file chunk", type=int)
args = parser.parse_args()
main(args.url, args.http_chunk, args.file_chunk)
|
python
|
from spendfrom import determine_db_dir
class Options(object):
def __init__(self):
self.fromaddresses = list()
self.toaddress = None
self.new = False
self.datadir = determine_db_dir()
self.conffile = "crown.conf"
self.fee = "0.025"
self.amount = None
self.upto = None
self.select = None
self.passphrase = None
self.pswdcanceled = False
self.testnet = False
|
python
|
# Need to import path to test/fixtures and test/scripts/
# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/'
#
# To run tests, you can do 'python -m testtools.run tests'. To run specific tests,
# You can do 'python -m testtools.run -l tests'
# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD
#
import os
from novaclient import client as mynovaclient
from novaclient import exceptions as novaException
import unittest
import fixtures
import testtools
import traceback
from contrail_test_init import *
from vn_test import *
from quantum_test import *
from vnc_api_test import *
from nova_test import *
from vm_test import *
from connections import ContrailConnections
from floating_ip import *
from policy_test import *
from multiple_vn_vm_test import *
from contrail_fixtures import *
from tcutils.wrappers import prepost_wrapper
from tcutils.poc import (TemplateTestCase, template, Call)
from test_arguments import *
class TestSanityFixture(testtools.TestCase, fixtures.TestWithFixtures):
__metaclass__ = TemplateTestCase
# @classmethod
def setUp(self):
super(TestSanityFixture, self).setUp()
if 'PARAMS_FILE' in os.environ:
self.ini_file = os.environ.get('PARAMS_FILE')
else:
self.ini_file = 'params.ini'
self.inputs = self.useFixture(ContrailTestInit(self.ini_file))
self.connections = ContrailConnections(self.inputs)
self.quantum_fixture = self.connections.quantum_fixture
self.nova_fixture = self.connections.nova_fixture
self.vnc_lib = self.connections.vnc_lib
self.logger = self.inputs.logger
self.agent_inspect = self.connections.agent_inspect
self.cn_inspect = self.connections.cn_inspect
# end setUpClass
def cleanUp(self):
super(TestSanityFixture, self).cleanUp()
# end cleanUp
def runTest(self):
pass
# end runTest
@template(env.test_vn_add_delete_params)
@preposttest_wrapper
def test_vn_add_delete(self, vn_name, vn_subnets):
'''Test to validate VN creation and deletion.
'''
vn_obj = self.useFixture(
VNFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_name=vn_name, inputs=self.inputs, subnets=vn_subnets))
assert vn_obj.verify_on_setup()
assert vn_obj
return True
# end
# end TestSanityFixture
|
python
|
# -*- coding: utf-8 -*-
"""
@author: Jatin Goel
"""
import os
from flask import Flask
APP = Flask(__name__)
APP.config['SQLALCHEMY_DATABASE_URI'] = (
'sqlite:///'
f'{os.path.join(os.getcwd(), "database.db")}'
).replace("\\", "/")
APP.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
APP.config['SECRET_KEY'] = 'SpiceworksHealthCheckerApp'
|
python
|
import unittest
from code.core.enumerations import *
import json
class TestEnumBase(unittest.TestCase):
@classmethod
def dummy(cls, One = 1, Two = 2, Three = 3, Four = 4):
pass
# class TestEnumBase
class TestEnum(TestEnumBase):
@enumeration
class ParsingSample:
One = 1
Two = 2
Three = 3
DifficultCase = TestEnumBase.dummy(
One = 10
)
MoreDifficultCase = TestEnumBase.dummy( \
Two = 11)
EvenMoreDifficultCase = TestEnumBase.dummy( \
Three = 13)
Last = Enum.Auto
# class ParsingSample
def testParsing(self):
enum = self.ParsingSample
count = len(enum)
self.assertEqual(count, 7)
# testParsing
def testJson(self):
enum = self.ParsingSample
count = len(enum)
dictionary = enum.dictionary()
jsonString = json.dumps(dictionary)
self.assertEqual(jsonString,
'{"One": 1, "Two": 2, "Three": 3, "DifficultCase": null, "MoreDifficultCase": null, "EvenMoreDifficultCase": null, "Last": 4}')
# testJson
def testConversions(self):
enum = self.ParsingSample
length = len(enum)
sameLength = enum.length()
firstMember = enum[0]
secondMember = enum("Two") # not a constructor!
self.assertEqual(firstMember, enum.One)
self.assertEqual(secondMember, enum.Two)
self.assertEquals(length, 7)
self.assertEquals(length, sameLength)
# testConversions
# class TestEnum
if __name__ == '__main__':
unittest.main()
|
python
|
import json
def load_expressions():
"""Returns the expressions_file loaded from JSON"""
with open('../data/BotCycle/expressions.json') as expressions_file:
return json.load(expressions_file)
def load_intents():
"""Returns a list of names of the intents"""
with open('../data/BotCycle/entities/intent.json') as intents_file:
intents = json.load(intents_file)
return list(map(lambda x: x['value'], intents['data']['values']))
def get_train_data(expressions):
"""Returns a list of tuples (text, intent)"""
array = expressions['data']
result = []
# substitute the entities with their name
for sentence in array:
text = sentence['text']
intent = None
for entity in sentence['entities']:
if entity['entity'] != 'intent':
text = text.replace(entity['value'].strip(
'"'), entity['entity'].upper())
else:
intent = entity['value'].strip('"')
result.append((text, intent))
# print(result)
return result
def main():
expressions = load_expressions()
return get_train_data(expressions)
if __name__ == '__main__':
print(main())
|
python
|
import os
import h5py
import numpy as np
def copy_file_struct(f, hmap, fo):
"""
Iteratively copy a HDF5 file structure to a new file
Arguments:
-f : File from which to copy [OPEN HDF5 file handle]
-hmap : Current file object of interest to copy from
-fo : File to copy structure to [OPEN HDF5 file handle]
"""
# First, see if object is a group
if type(hmap) == h5py._hl.group.Group:
name = hmap.name.encode("ascii")
# Copy attributes associated with group
atts = f[name].attrs.keys()
if len(atts) > 0:
group = fo.create_group(name)
for v in atts:
group.attrs[v] = f[name].attrs[v]
# Now deal with subgroups and datasets
for w in hmap:
if type(f[name][w]) == h5py._hl.group.Group:
atts = f[name][w].attrs.keys()
if len(atts) > 0:
group = fo.create_group("{0}/{1}".format(name, w))
for v in atts:
group.attrs[v] = f[name][w].attrs[v]
copy_file_struct(f, f[name][w], fo)
elif type(f[name][w]) == h5py._hl.dataset.Dataset:
tmp = np.zeros(f[name][w][:].shape, dtype=f[name][w][:].dtype)
tmp[:] = f[name][w][:]
dset = fo.create_dataset(
"{0}/{1}".format(name.decode("utf-8"), w),
data=tmp,
dtype=f[name][w][:].dtype,
)
del tmp
atts = f[name][w].attrs.keys()
if len(atts) > 0:
for v in atts:
dset.attrs[v] = f[name][w].attrs[v]
# Otherwise deal with the dataset
elif type(hmap) == h5py._hl.dataset.Dataset:
name = hmap.name.encode("ascii")
tmp = np.zeros(f[name][:].shape, dtype=f[name][:].dtype)
tmp[:] = f[name][:]
dset = fo.create_dataset(name.decode("utf-8"), data=tmp, dtype=f[name][:].dtype)
atts = f[name].attrs.keys()
if len(atts) > 0:
for v in atts:
dset.attrs[v] = f[name].attrs[v]
del tmp
return
def merge_outputs(outputs):
"""
Merge all outputs from different processors into one file
Arguments:
-outputs : Search term for output files of interest [STRING]
"""
print(" > Merging {0}*".format(outputs), flush=True)
# Find outputs of interest
files = []
for y in os.listdir("output/"):
if y.startswith(outputs):
files.append("output/{0}".format(y))
files.sort()
# Create output file
flib = "output/{0}.hdf5".format(outputs)
outf = h5py.File(flib, "w")
# Loop over files of interest, iteratively copying data to consolidated output file
for y in files:
if y == flib:
continue
print(" -{0}".format(y), flush=True)
f = h5py.File(y, "r")
fk = sorted(f.keys())
for z in fk:
print(" --> {0}".format(z), flush=True)
copy_file_struct(f, f[z], outf)
f.close()
os.remove(y)
outf.close()
del files, flib
return
|
python
|
"""
The child process.
"""
import time
from asyncio import get_event_loop
from typing import Callable, Optional
from prompt_toolkit.eventloop import call_soon_threadsafe
from .backends import Backend
from .key_mappings import prompt_toolkit_key_to_vt100_key
from .screen import BetterScreen
from .stream import BetterStream
__all__ = ["Process"]
class Process:
"""
Child process.
Functionality for parsing the vt100 output (the Pyte screen and stream), as
well as sending input to the process.
Usage:
p = Process(loop, ...):
p.start()
:param invalidate: When the screen content changes, and the renderer needs
to redraw the output, this callback is called.
:param bell_func: Called when the process does a `bell`.
:param done_callback: Called when the process terminates.
:param has_priority: Callable that returns True when this Process should
get priority in the event loop. (When this pane has the focus.)
Otherwise output can be delayed.
"""
def __init__(
self,
invalidate: Callable[[], None],
backend: Backend,
bell_func: Optional[Callable[[], None]] = None,
done_callback: Optional[Callable[[], None]] = None,
has_priority: Optional[Callable[[], bool]] = None,
) -> None:
self.loop = get_event_loop()
self.invalidate = invalidate
self.backend = backend
self.done_callback = done_callback
self.has_priority = has_priority or (lambda: True)
self.suspended = False
self._reader_connected = False
# Create terminal interface.
self.backend.add_input_ready_callback(self._read)
if done_callback is not None:
self.backend.ready_f.add_done_callback(lambda _: done_callback())
# Create output stream and attach to screen
self.sx = 0
self.sy = 0
self.screen = BetterScreen(
self.sx, self.sy, write_process_input=self.write_input, bell_func=bell_func
)
self.stream = BetterStream(self.screen)
self.stream.attach(self.screen)
def start(self) -> None:
"""
Start the process: fork child.
"""
self.set_size(120, 24)
self.backend.start()
self.backend.connect_reader()
def set_size(self, width: int, height: int) -> None:
"""
Set terminal size.
"""
if (self.sx, self.sy) != (width, height):
self.backend.set_size(width, height)
self.screen.resize(lines=height, columns=width)
self.screen.lines = height
self.screen.columns = width
self.sx = width
self.sy = height
def write_input(self, data: str, paste: bool = False) -> None:
"""
Write user key strokes to the input.
:param data: (text, not bytes.) The input.
:param paste: When True, and the process running here understands
bracketed paste. Send as pasted text.
"""
# send as bracketed paste?
if paste and self.screen.bracketed_paste_enabled:
data = "\x1b[200~" + data + "\x1b[201~"
self.backend.write_text(data)
def write_key(self, key: str) -> None:
"""
Write prompt_toolkit Key.
"""
data = prompt_toolkit_key_to_vt100_key(
key, application_mode=self.screen.in_application_mode
)
self.write_input(data)
def _read(self) -> None:
"""
Read callback, called by the loop.
"""
d = self.backend.read_text(4096)
assert isinstance(d, str), "got %r" % type(d)
# Make sure not to read too much at once. (Otherwise, this
# could block the event loop.)
if not self.backend.closed:
def process() -> None:
self.stream.feed(d)
self.invalidate()
# Feed directly, if this process has priority. (That is when this
# pane has the focus in any of the clients.)
if self.has_priority():
process()
# Otherwise, postpone processing until we have CPU time available.
else:
self.backend.disconnect_reader()
def do_asap():
" Process output and reconnect to event loop. "
process()
if not self.suspended:
self.backend.connect_reader()
# When the event loop is saturated because of CPU, we will
# postpone this processing max 'x' seconds.
# '1' seems like a reasonable value, because that way we say
# that we will process max 1k/1s in case of saturation.
# That should be enough to prevent the UI from feeling
# unresponsive.
timestamp = time.time() + 1
call_soon_threadsafe(do_asap, max_postpone_time=timestamp)
else:
# End of stream. Remove child.
self.backend.disconnect_reader()
def suspend(self) -> None:
"""
Suspend process. Stop reading stdout. (Called when going into copy mode.)
"""
if not self.suspended:
self.suspended = True
self.backend.disconnect_reader()
def resume(self) -> None:
"""
Resume from 'suspend'.
"""
if self.suspended:
self.backend.connect_reader()
self.suspended = False
def get_cwd(self) -> str:
"""
The current working directory for this process. (Or `None` when
unknown.)
"""
return self.backend.get_cwd()
def get_name(self) -> str:
"""
The name for this process. (Or `None` when unknown.)
"""
# TODO: Maybe cache for short time.
return self.backend.get_name()
def kill(self) -> None:
"""
Kill process.
"""
self.backend.kill()
@property
def is_terminated(self) -> bool:
return self.backend.closed
|
python
|
# Generated by Django 3.2 on 2022-02-19 13:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0005_remove_product_isfavourite'),
]
operations = [
migrations.CreateModel(
name='Reviews',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('review_title', models.TextField(max_length=254)),
('review_body', models.TextField(max_length=1000)),
('date_added', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('product', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to='products.product')),
],
options={
'ordering': ['-review_title'],
},
),
]
|
python
|
# -*- coding: utf-8 -*-
import scrapy
class AdzanItem(scrapy.Item):
c = scrapy.Field() # city
d = scrapy.Field() # day
s = scrapy.Field() # shubuh
t = scrapy.Field() # terbit
z = scrapy.Field() # zuhur
a = scrapy.Field() # ashr
m = scrapy.Field() # maghrib
i = scrapy.Field() # isya
|
python
|
import os
import sys
from os import path
from flask import Flask
from flask.ext.login import LoginManager
from flask_sqlalchemy import SQLAlchemy
from flask_recaptcha import ReCaptcha
app = Flask(__name__)
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
#config for Forms, Register and FCM tokens
app.config.update(dict( DEBUG = True, SECRET_KEY = 'you-will-never-guess', SECURITY_PASSWORD_SALT = 'my_precious_two', FCM_APP_TOKEN = 'AAAAXUWoieY:APA91bGcVQ67M5mAEl7e2OSb5yKko8J17NH7GZtOspoq9NKjnHMyD9RiCePjLKUHfyBzn4II0aVJx_JnyyBHQijdbT6sYwxAoDrI15bZX_0FdBpHKgAVqMBpKMQAxIggXxakcZ3It54f', RECAPTCHA_ENABLED = True, RECAPTCHA_SITE_KEY = '6LetACUUAAAAAPckPB-tmBZdLo9eZDp5tacC1XA9', RECAPTCHA_SECRET_KEY = '6LetACUUAAAAAMUPZ3N1gjDO1AHxq8AVAXau9Fg-', RECAPTCHA_THEME = 'light'))
#recaptcha init
recaptcha = ReCaptcha()
recaptcha.init_app(app)
#conection to database
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
db = SQLAlchemy(app)
#Configure Flask-Login
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
### move to other module and resolve problem with second import models (Table 'user' is already defined for this MetaData instance)
import config_celery
#Configure Celery
app.config.update(CELERY_BROKER_URL=os.environ['REDIS_URL'], CELERY_RESULT_BACKEND=os.environ['REDIS_URL'])
celery = config_celery.make_celery(app)
###
#User types
#adm - admin, usr - regular user, oth - for later use
def enum(**enums):
return type('Enum', (), enums)
UserType = enum(adm=1, usr=2, oth=3)
ServiceState = enum(up=1, down=2, unspecified=3)
from webapp import tasks
from webapp import views
|
python
|
"""
Standalone program to copy a file to azure, and return the URL to access the file.
The idea is that you can run from the command line and escape to shell because
sometimes the write takes awhile. Then look in the log file to get the URL for
sharing.
To test:
run copy_to_azure
To copy a file:
python copy_to_azure.py -fn [string of full path] > az.log &
Note that the last line of screen output from extract_box gives you
the [string of full path].
"""
from azure.storage.blob import BlobServiceClient
from azure.storage.blob import PublicAccess
from lo_tools import Lfun
import argparse
from time import time
from datetime import datetime
from pathlib import Path
# get and process arguments
parser = argparse.ArgumentParser()
parser.add_argument('-fn', type=str) # full path to input file
parser.add_argument('-out_name', type=str) # name of output file
parser.add_argument('-container_name', type=str, default='pm-share')
args = parser.parse_args()
fn = args.fn
out_name = args.out_name
container_name = args.container_name
Ldir = Lfun.Lstart()
if fn == None:
fn = Ldir['data'] / 'accounts' / 'az_testfile.txt'
else:
fn = Path(fn)
if out_name == None:
out_name = fn.name
# screen output
print(' copy_to_azure '.center(60,'='))
print(datetime.now().strftime('%Y.%m.%d %H:%M:%S'))
print('Copying: %s' % (fn))
tt0 = time()
# get the blob_service
account_fn = Ldir['data'] / 'accounts' / 'azure_pm_2015.05.25.txt'
with account_fn.open() as aa:
# make sure to strip newline characters
account = aa.readline().strip()
key = aa.readline().strip()
connection_string = ('DefaultEndpointsProtocol=https' +
';AccountName=' + account +
';AccountKey=' + key +
';EndpointSuffix=core.windows.net')
blob_service = BlobServiceClient.from_connection_string(conn_str=connection_string)
try: # create the container if needed
blob_service.create_container(container_name, public_access=PublicAccess.Container)
except:
pass # assume error is because container exists
# write it to Azure
try:
from azure.storage.blob import BlobClient
blob = BlobClient.from_connection_string(conn_str=connection_string,
container_name=container_name, blob_name=out_name)
with fn.open('rb') as data:
blob.upload_blob(data, overwrite=True)
az_url = ('https://pm2.blob.core.windows.net/' + container_name + '/' + out_name)
print('URL to access file: %s' % (az_url))
print('Took %0.2f sec' % (time()-tt0))
except Exception as e:
print('Copy failed: %s' % (str(e)))
|
python
|
#!/usr/bin/env python
# Copied from https://github.com/xantares/mingw-ldd/blob/master/mingw-ldd.py
# Modified to point to right prefix location on Fedora.
# WTFPL - Do What the Fuck You Want to Public License
from __future__ import print_function
import pefile
import os
import sys
def get_dependency(filename):
deps = []
pe = pefile.PE(filename)
for imp in pe.DIRECTORY_ENTRY_IMPORT:
deps.append(imp.dll.decode())
return deps
def dep_tree(root, prefix=None):
if not prefix:
arch = get_arch(root)
#print('Arch =', arch)
prefix = '/usr/'+arch+'-w64-mingw32/sys-root/mingw/bin'
#print('Using default prefix', prefix)
dep_dlls = dict()
def dep_tree_impl(root, prefix):
for dll in get_dependency(root):
if dll in dep_dlls:
continue
full_path = os.path.join(prefix, dll)
if os.path.exists(full_path):
dep_dlls[dll] = full_path
dep_tree_impl(full_path, prefix=prefix)
else:
dep_dlls[dll] = 'not found'
dep_tree_impl(root, prefix)
return (dep_dlls)
def get_arch(filename):
type2arch= {pefile.OPTIONAL_HEADER_MAGIC_PE: 'i686',
pefile.OPTIONAL_HEADER_MAGIC_PE_PLUS: 'x86_64'}
pe = pefile.PE(filename)
try:
return type2arch[pe.PE_TYPE]
except KeyError:
sys.stderr.write('Error: unknown architecture')
sys.exit(1)
if __name__ == '__main__':
filename = sys.argv[1]
for dll, full_path in dep_tree(filename).items():
print(' ' * 7, dll, '=>', full_path)
|
python
|
import torch
import torch.utils.data as tud
import numpy as np
from utils import create_space
import os
# from PIL import Image, ImageFile
import torchvision.transforms.functional as TF
from collections import defaultdict
import rasterio
from rasterio.windows import Window
import pyproj
# ImageFile.LOAD_TRUNCATED_IMAGES = True
class LandsatViirs(tud.Dataset):
"""
A data loader that samples pairs of Landsat
and VIIRS images for matching land areas.
"""
def __init__(
self, df, landsat_transform, viirs_transform
):
self.df = df
self.landsat_transform = landsat_transform
self.viirs_transform = viirs_transform
self.idxs = df.index.to_list()
def __len__(self):
return len(self.df)
def __zero_pad_pathrow(self, path, row):
return ("000" + str(path))[-3:] + ("000" + str(row))[-3:]
def __getitem__(self, idx):
idx = self.idxs[idx]
cols = ['# lon', 'lat', 'path', 'row', 'B2_link', 'B3_link', 'B4_link']
lon, lat, path, row, B2_url, B3_url, B4_url = self.df.loc[idx, cols]
# 0 pad !!!! < --------------------------------------------------------v-------
pathrow = self.__zero_pad_pathrow(path, row)
landsat = self.landsat_transform((lon, lat), pathrow,
(B2_url, B3_url, B4_url))
viirs = self.viirs_transform((lon, lat))
return landsat, viirs
class LandsatTransform:
"""
A callable object that, given a pair of coordinates, returns the
Landsat image formatted as a 3D Tensor [bands, height, width].
"""
def __init__(self, xdim=224, ydim=224):
"""
Inputs: None
"""
self.coord_imgs = {}
# self.pathrow_tifs = {}
self.xdim = xdim
self.ydim = ydim
if not os.path.isdir('landsat'):
os.mkdir('landsat')
def _get_tif(self, img_urls, pathrow):
rgb_path = 'landsat/' + pathrow + '.tiff'
if not os.path.isfile(rgb_path):
self._fetch_tif_from_s3(img_urls, pathrow)
return rgb_path
def _fetch_tif_from_s3(self, img_urls, pathrow):
"""
Given a tuple of urls to R, G, and B TIF bands for a single scene,
return a single TIF
"""
rgb_path = 'landsat/' + pathrow + '.tiff'
r_url, g_url, b_url = img_urls
b2 = rasterio.open(r_url)
b3 = rasterio.open(g_url)
b4 = rasterio.open(b_url)
with rasterio.open(rgb_path, 'w+', driver='Gtiff', width=b2.width,
height=b2.height, count=3, crs=b2.crs,
tranform=b2.transform, dtype='float32') as rgb:
rgb.write(b2.read(1), 1)
rgb.write(b3.read(1), 2)
rgb.write(b4.read(1), 3)
b2.close()
b3.close()
b4.close()
return
def __call__(self, coord, pathrow, img_urls):
"""
Extracts the 21x21 sub-array from the Landsat scene corresponding
to the provided coordinates, and returns a normalized 3D tensor.
Input:
coord (tuple of 2 floats)
pathrow (str)
img_urls: tuple of urls to R, G, and B TIF bands to a single scene
Returns a 3D tensor.
"""
r_url, b_url, g_url = img_urls
lon, lat = coord
# If we already have the 3d tensor, just return it
if coord in self.coord_imgs:
return self.coord_imgs[coord].new_tensor()
else:
# Get TIF with 3 bands
tif = rasterio.open(self._get_tif(img_urls, pathrow), 'r')
# Extract 224x224 subarray from landsat scene
min_lat, min_lon, max_lat, max_lon = create_space(lat, lon)
utm = pyproj.Proj(tif.crs)
lonlat = pyproj.Proj(init='epsg:4326')
east, north = pyproj.transform(lonlat, utm, max_lon, max_lat)
west, south = pyproj.transform(lonlat, utm, min_lon, min_lat)
north_idx, west_idx = tif.index(west, north)
south_idx, east_idx = tif.index(east, south)
raw_array = tif.read(window=Window(
west_idx, north_idx,
abs(west_idx - east_idx),
abs(north_idx - south_idx)
))
tif.close()
# Convert array to tensor
landsat_tensor = torch.tensor(raw_array).type(torch.FloatTensor)
# resize tensor
landsat_tensor = TF.resize(
landsat_tensor,
size = (self.ydim, self.xdim)
)
# Add tensor to dict
self.coord_imgs[coord] = landsat_tensor
return landsat_tensor.new_tensor()
class ViirsTransform:
"""
A callable object that, given a pair of coordinates, returns a the
VIIIRS Day/Night Band image formatted as a 3D Tensor [bands, height, width].
"""
def __init__(self, tif, subsetBBox=None):
"""
Inputs:
tif (rasterio.DatasetReader)
subsetBBox: tuple of 2 coords (float,float) representing
((min lon, min lat), (max lon, max lat)) of the desired subset.
All points must be in WGS84.
"""
self.tif = tif
if subsetBBox:
(west, south), (east, north) = subsetBBox
miny, minx = tif.index(west, south)
maxy, maxx = tif.index(east, north)
height = abs(maxy - miny)
width = abs(maxx - minx)
self.col_offset = minx
self.row_offset = maxy
self.tif_data = tif.read(window=Window(minx, maxy, width, height))
else:
self.col_offset = 0
self.row_off = 0
self.tif_data = tif.read()
def __call__(self, coord):
"""
Extracts the 21x21 sub-array from the the full VIIRS tile set
corresponding to the provided coordinates,
and returns a normalized 3D tensor.
Normalization:
output = ln(input + 1)/ln(max)
Where max = 92,000 radiance in our dataset.
Maps to [0,1] and reduces outsize influence of outliers.
Input:
coord (tuple of 2 floats)
Returns a 3D tensor.
"""
lon, lat = coord
min_lat, min_lon, max_lat, max_lon = create_space(lat, lon)
row, col = self.tif.index(min_lon, max_lat)
row -= self.row_offset
col -= self.col_offset
array = self.tif_data[:, row:row+21, col:col+21]
viirs_tensor = torch.tensor(array.reshape((-1,21,21))).type(torch.FloatTensor)
return torch.clamp(torch.log(viirs_tensor + 1) / 11.43, min=0, max=1)
############################# DEPRECATED #####################################
# class deprecated_LandsatTransform:
# """
# A callable object that, given a pair of coordinates, returns a the
# Landsat image formatted as a 3D Tensor [bands, height, width].
# """
# def __init__(self, base_path, width=256, height=256):
# self.base_path = base_path
# self.width = width
# self.height = height
# def __call__(self, image_name, country='ng'):
# path = '/'.join([self.base_path, country, image_name])
# img = Image.open(path)
# img = img.resize((self.width, self.height))
# return TF.pil_to_tensor(img.convert('RGB')).type(torch.FloatTensor)/255
# class deprecated_ViirsTransform:
# """
# A callable object that, given a pair of coordinates, returns a the
# VIIIRS Day/Night Band image formatted as a 3D Tensor [bands, height, width].
# """
# def __init__(self, tifs):
# """
# Inputs:
# tif
# """
# self.tifs = {
# 'ng' : tifs[1],
# 'eth' : tifs[1],
# 'mw' : tifs[0]
# }
# tif0_data = tifs[0].get_data()
# tif1_data = tifs[1].get_data()
# self.arrays = {
# 'ng' : tif1_data,
# 'eth' : tif1_data,
# 'mw' : tif0_data
# }
# def __call__(self, coord, country):
# """
# Extracts the 21x21 sub-array from the the full VIIRS tile set
# corresponding to the provided coordinates,
# and returns a normalized 3D tensor.
# Normalization:
# output = ln(input + 1)/ln(max)
# Where max = 92,000 radiance in our dataset.
# Maps to [0,1] and reduces outsize influence of outliers.
# Input:
# coord (tuple of 2 floats)
# country (str): One of ['eth', 'mw', 'ng']
# Returns a 3D tensor.
# """
# min_lat, min_lon, max_lat, max_lon = create_space(
# coord[0], coord[1])
# xminPixel, ymaxPixel = self.tifs[country].proj_to_raster(min_lon, min_lat)
# xminPixel, ymaxPixel = int(xminPixel), int(ymaxPixel)
# array = self.arrays[country][:, ymaxPixel-21:ymaxPixel, xminPixel:xminPixel+21]
# viirs_tensor = torch.tensor(array.reshape((-1,21,21))).type(torch.FloatTensor)
# return torch.clamp(
# (torch.log(viirs_tensor + 1) / 11.43),
# min=0, max=1
# )
# class LandsatDataset(tud.Subset):
# """
# A data loader that samples pairs of Landsat
# and VIIRS images for matching land areas.
# """
# def __init__(
# self, df, landsat_transform
# ):
# self.df = df
# self.landsat_transform = landsat_transform
# self.idxs = df.index.to_list()
# def __len__(self):
# return len(self.df)
# def __getitem__(self, idx):
# idx = self.idxs[idx]
# cols = ['image_lat', 'image_lon', 'image_name', 'country']
# lat, lon, img, country = self.df.loc[idx, cols]
# landsat = self.landsat_transform(img, country)
# return landsat
# class ViirsDataset(tud.Subset):
# """
# A data loader that samples pairs of Landsat
# and VIIRS images for matching land areas.
# """
# def __init__(
# self, df, viirs_transform
# ):
# self.df = df
# self.viirs_transform = viirs_transform
# self.idxs = df.index.to_list()
# def __len__(self):
# return len(self.df)
# def __getitem__(self, idx):
# idx = self.idxs[idx]
# cols = ['image_lat', 'image_lon', 'image_name', 'country']
# lat, lon, img, country = self.df.loc[idx, cols]
# viirs = self.viirs_transform((lat, lon), country)
# return viirs
# class Landsat(tud.Dataset):
# """
# A data loader that samples pairs of Landsat images.
# """
# def __init__(self, df, landsat_transform):
# self.df = df
# self.landsat_transform = landsat_transform
# self.idxs = df.index.to_list()
# def __len__(self):
# return len(self.df)
# def __getitem__(self, idx):
# idx = self.idxs[idx]
# img, country = self.df.loc[idx, ['image_name', 'country']]
# landsat = self.landsat_transform(img, country)
# return landsat, landsat
# class ToTensor(object):
# def __init__(self, bands, height, width):
# self.x = width
# self.y = height
# self.z = bands
# def __call__(self, sample):
# m = torch.from_numpy(sample).type(torch.float).reshape(
# (self.z, self.y, self.x))
# return m
# class MNIST(tud.Dataset):
# def __init__(self, X, transform=ToTensor):
# self.X = X
# self.transform = transform(1, 28, 28)
# def __len__(self):
# return len(self.X)
# def __getitem__(self, idx):
# x = self.X[idx]
# if self.transform:
# x = self.transform(x)
# return (x, x)
|
python
|
from distutils.core import setup
setup(
name='page-objects',
version='1.1.0',
packages=['page_objects'],
url='https://github.com/krizo/page-objects.git',
license='MIT',
author='Edward Easton',
author_email='[email protected]',
description='Page Objects for Python', requires=['selenium', 'pytest', 'mock']
)
|
python
|
from enum import Enum
from typing import Union
class CONNECTION_STYLE(Enum):
angle = 1
angle_3 = 2
arc = 3
arc_3 = 4
bar = 5
def get_name(self) -> str:
return {
'angle': 'angle',
'angle_3': 'angle3',
'arc': 'arc',
'arc_3': 'arc3',
'bar': 'bar'
}[self.name]
@staticmethod
def get_connection_style(
connection_style: Union[str, 'CONNECTION_STYLE']
) -> str:
if connection_style and isinstance(connection_style, CONNECTION_STYLE):
connection_style = connection_style.get_name()
return connection_style
|
python
|
# Librerias Future
from __future__ import unicode_literals
# Librerias Django
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render
from django.views.generic import DetailView, ListView
# Librerias de terceros
from apps.website.submodels.post import PyPost
def index(request):
return render(request, 'index.html')
def shop(request):
return render(request, 'shop.html')
def product(request):
return render(request, 'product.html')
def post(request):
return render(request, 'post.html')
def license(request):
return render(request, 'license.html')
def UnderConstruction(request):
return render(request, 'under_construction.html')
"""
BLOG
"""
POST_FIELDS = [
{'string': 'Título', 'field': 'title'},
{'string': 'Creado en', 'field': 'created_on'},
{'string': 'Contenido', 'field': 'content'},
]
POST_FIELDS_SHORT = ['title','content','created_on']
class BlogView(LoginRequiredMixin, ListView):
login_url = "login"
model = PyPost
template_name = 'blog.html'
fields = POST_FIELDS
paginate_by = 8
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class PostDetailView(LoginRequiredMixin, DetailView):
login_url = "login"
model = PyPost
template_name = 'post.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
|
python
|
######################################################################################
### This is a read-only file to allow participants to run their code locally. ###
### It will be over-writter during the evaluation, Please do not make any changes ###
### to this file. ###
######################################################################################
import traceback
import os
import signal
from contextlib import contextmanager
from os import listdir
from os.path import isfile, join
import soundfile as sf
import numpy as np
class TimeoutException(Exception): pass
class MusicDemixingPredictor:
def __init__(self, model_name='baseline'):
self.test_data_path = os.getenv("TEST_DATASET_PATH", os.getcwd() + "/input/")
self.results_data_path = os.getenv("RESULTS_DATASET_PATH", os.getcwd() + "/output")
self.results = []
self.current_music_name = None
self.model_name = model_name
def get_all_music_names(self):
return [file.split('.')[0] for file in listdir(self.test_data_path) if isfile(join(self.test_data_path, file))]
def get_music_file_location(self, music_name, instrument=None):
if instrument is None:
return join(self.test_data_path, f"{music_name}.wav")
if not os.path.exists(self.results_data_path): os.makedirs(self.results_data_path)
return join(self.results_data_path, f"{music_name}_{instrument}.wav")
def evaluation(self):
self.prediction_setup()
music_names = self.get_all_music_names()
for music_name in music_names:
print(f'{music_name}: {self.get_music_file_location(music_name)}')
self.prediction(mixture_file_path=self.get_music_file_location(music_name),
bass_file_path=self.get_music_file_location(music_name, "bass"),
drums_file_path=self.get_music_file_location(music_name, "drums"),
other_file_path=self.get_music_file_location(music_name, "other"),
vocals_file_path=self.get_music_file_location(music_name, "vocals"),
)
def run(self):
try:
self.evaluation()
except Exception as e:
print(traceback.format_exc())
|
python
|
print('Cliente da Vagalume API Iniciado: \n')
nome_artista = input('Digite o nome de um Artista\n')
import busca
resultado = busca.artista(nome_artista)
newinput = input('Digite:\n-pos: para saber a posição do artista no ranking\n-album: para saber o ultimo álbum do artista\n-frequencia: para saber as palavras mais frequentes nas letras mais acessadas do artista\n-musicas n: para exibir as n musicas mais acessadas do artista (n = 0 para todas)\n')
if newinput == '-pos':
print ('\nA posição do artista é '+ resultado.rank.pos)
elif newinput == '-frequencia':
print('\nAs palavras mais frequêntes são:\n' + resultado.toplyrics.freq_palavras_toplyrics)
elif newinput == '-album':
print('\nO ultimo album do artista é: '+resultado.albuns.ultimo)
else:
newlist = newinput.split()
resultado2 = busca.artista(nome_artista,int(newlist(1)))
if newinput(0)=='-musicas':
print('\nAs '+newlist(1)+'músicas mais acessadas do artista são: \n'+ resultado2.toplyrics.ntoplyrics)
|
python
|
import json
import logging
from homeassistant.components.http import HomeAssistantView
from .util import DOMAIN, XIAOAI_API, matcher_query_state, find_entity
from .xiaoai import (XiaoAIAudioItem, XiaoAIDirective, XiaoAIOpenResponse,
XiaoAIResponse, XiaoAIStream, XiaoAIToSpeak, XiaoAITTSItem,
xiaoai_request, xiaoai_response)
_LOGGER = logging.getLogger(__name__)
# 文本回复
def build_text_message(to_speak, is_session_end, open_mic):
xiao_ai_response = XiaoAIResponse(
to_speak=XiaoAIToSpeak(type_=0, text=to_speak),
open_mic=open_mic)
response = xiaoai_response(XiaoAIOpenResponse(version='1.0',
is_session_end=is_session_end,
response=xiao_ai_response))
return response
# 音乐回复
def build_music_message(to_speak, mp3_urls):
all_list = []
if to_speak is not None:
info_tts = XiaoAIDirective(
type_='tts',
tts_item=XiaoAITTSItem(
type_='0', text=to_speak
))
all_list.append(info_tts)
for url in mp3_urls:
info_audio = XiaoAIDirective(
type_='audio',
audio_item=XiaoAIAudioItem(stream=XiaoAIStream(url=url))
)
all_list.append(info_audio)
xiao_ai_response = XiaoAIResponse(directives=all_list, open_mic=False)
response = xiaoai_response(XiaoAIOpenResponse(
version='1.0', is_session_end=True, response=xiao_ai_response))
return response
# 格式转换
def parse_input(event, hass):
req = xiaoai_request(event)
text = req.query
# 判断当前用户是否是自己
cfg = hass.data['conversation_voice'].api_config.get_config()
user_id = cfg.get('user_id', '')
if user_id != '' and user_id != req.session.user.user_id:
return build_text_message('我真的好笨笨哦,不知道你在说啥,换个方式叫我吧', is_session_end=True, open_mic=False)
# 插槽:req.request.slot_info.intent_name
intent_name = ''
if hasattr(req.request.slot_info, 'intent_name'):
intent_name = req.request.slot_info.intent_name
# 消息内容:req.query
if req.request.type == 0:
# 技能进入请求
if intent_name == 'Mi_Welcome':
return build_text_message('欢迎使用您的家庭助理', is_session_end=False, open_mic=True)
# 初始化识别内容
return conversation_process(hass, text, cfg)
elif req.request.type == 1:
# 退出意图
if intent_name == 'Mi_Exit' or ['没事了', '退下', '没有了', '没有', '没用了', '没了', '没有呢'].count(text) > 0:
return build_text_message('再见了您!', is_session_end=True, open_mic=False)
else:
return conversation_process(hass, text, cfg)
elif req.request.type == 2:
return build_text_message('再见了您!', is_session_end=True, open_mic=False)
return build_text_message('我没听懂欸', is_session_end=True, open_mic=False)
# 消息处理
def conversation_process(hass, text, cfg):
open_mic = cfg.get('open_mic', True)
is_session_end = (open_mic == False)
hass.async_create_task(hass.services.async_call('conversation', 'process', {'source': 'XiaoAi','text': text}))
# 如果配置到了查询,则不进入系统意图
result = matcher_query_state(text)
if result is not None:
friendly_name = result
state = find_entity(hass, friendly_name)
if state is not None:
message = f'{friendly_name}的状态是{state.state}'
if open_mic:
message += ',请问还有什么事吗?'
return build_text_message(message, is_session_end, open_mic)
message = '收到'
if open_mic:
message += ',还有什么事吗?'
return build_text_message(message, is_session_end, open_mic)
# 网关视图
class XiaoaiGateView(HomeAssistantView):
url = XIAOAI_API
name = DOMAIN
requires_auth = False
async def post(self, request):
data = await request.json()
_LOGGER.info('======= 小爱API接口信息 =========')
_LOGGER.info(data)
_LOGGER.info('======= 小爱API接口信息 =========')
hass = request.app["hass"]
response = parse_input(data, hass)
return self.json(json.loads(response))
|
python
|
from apistar import Include, Route
from settings.config import DEBUG
from views.spider import callback
from views.crate import crate_list, crate_detail
routes = [
Route('/', 'GET', crate_list),
Route('/crate/{crate_id}', 'GET', crate_detail),
Route('/spider', 'POST', callback),
]
if DEBUG:
from apistar.handlers import docs_urls, static_urls
routes += [
Include('/docs', docs_urls),
Include('/static', static_urls),
]
|
python
|
#!/usr/bin/env python3
"""Générateur de service AVAHI pour Freebox en mode bridge
Lit les informations depuis http://mafreebox.freebox.fr/api_version
Crée un fichier utilisable comme service avahi.
Permet d’utiliser Freebox Compagnon avec une Freebox en mode Bridge :
https://dev.freebox.fr/bugs/task/22301
"""
import argparse
import sys
import logging
import logging.handlers
import os
import urllib.request, json
logger = logging.getLogger(os.path.splitext(os.path.basename(sys.argv[0]))[0])
class CustomFormatter(argparse.RawDescriptionHelpFormatter,
argparse.ArgumentDefaultsHelpFormatter):
pass
def parse_args(args=sys.argv[1:]):
"""Parse arguments."""
parser = argparse.ArgumentParser(
description=sys.modules[__name__].__doc__,
formatter_class=CustomFormatter)
parser.add_argument("output_file", help="Write to")
g = parser.add_mutually_exclusive_group()
g.add_argument("--debug", "-d", action="store_true",
default=False,
help="enable debugging")
g.add_argument("--silent", "-s", action="store_true",
default=False,
help="don't log to console")
return parser.parse_args(args)
def setup_logging(options):
"""Configure logging."""
root = logging.getLogger("")
root.setLevel(logging.WARNING)
logger.setLevel(options.debug and logging.DEBUG or logging.INFO)
if not options.silent:
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(
"%(levelname)s[%(name)s] %(message)s"))
root.addHandler(ch)
def get_fbx_params(api_url):
"""Read parms from Freebox API url
Returns an array of strings"""
with urllib.request.urlopen(api_url) as url:
data = json.loads(url.read().decode())
logger.debug("Downloaded content: {}".format(data))
params=[]
for param, value in data.items():
params.append("{}={}".format(param,value))
logger.debug("Array of params: {}".format(params))
return params
def write_to_service_file(file_name,params):
"""Write Avahi service file"""
with open(file_name,'w') as output:
output.write("""<service-group>
<name replace-wildcards="yes">Freebox Server</name>
<service protocol="ipv4">
<type>_fbx-api._tcp</type>
<port>80</port>
<host-name>mafreebox.freebox.fr</host-name>
""")
for param in params:
output.write(" <txt-record>{}</txt-record>\n".format(param))
output.write(""" </service>
</service-group>""")
if __name__ == "__main__":
options = parse_args()
setup_logging(options)
try:
logger.debug("Parameters: {}".format(options.output_file))
fbx_params = get_fbx_params("http://mafreebox.freebox.fr/api_version")
write_to_service_file(options.output_file,fbx_params)
except Exception as e:
logger.exception("%s", e)
sys.exit(1)
sys.exit(0)
|
python
|
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.colors import LogNorm
import matplotlib
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
def Plotting(ebs,aps,Na,stime,mus,Np,Ne,Nmu):
t,ax = plt.subplots(nrows =2, ncols = 1, figsize=(7,5))
#extent=[ebs.min(), ebs.max(), aps.min(), aps.max()]
#for i, mu in enumerate(mus):
#for j in range(0,Np):\
Arr = np.zeros(Ne*Na*Nmu)
for i in range(0,Nmu):
Arr.append(np.concatenate(stime[i,:,j,:]) #.ravel()
Arr = np.array(Arr).reshape([Ne,Na,Nmu])
print(Arr)
extent=[ebs.min(), ebs.max(), aps.min(), aps.max()]
ax.set_xlim(extent[0], extent[1])
ax.set_ylim(extent[2], extent[3])
ax.set_xlabel("Binary Eccentricity $e_b$ ")
ax.set_ylabel("Test particle semimajor axis $a_p$")
im = ax.imshow(stime1, aspect='auto', origin="lower", interpolation='nearest', cmap="viridis",extent=extent)
# ebs = np.linspace(0.,0.7,Ne)
ab_s = np.zeros(Na)
mu = MU
for i,eb in enumerate(ebs):
ab_s[i] = 1.6 + 5.1*eb-2.22*(eb**2)+4.12*mu-4.27*eb*mu-5.09*(mu**2)+4.61*(eb**2)*mu**2
# ab_s[i] = 2.278 + 3.824*eb - 1.71*(eb**2)
plt.plot(ebs,ab_s,'c', marker = "^",markersize = 7)
plt.xlabel('$e_b$')
plt.ylabel('$a_b(a_c$)')
plt.title('MU = {} : Critical semimajor axis $a_c$ as a function of eccentricity $e_b$'.format(mu))
cb = plt.colorbar(im, ax=ax)
cb.solids.set_rasterized(True)
cb.set_label("Particle Survival Times")
plt.show()
#plt.savefig("Classic_results.pdf")
|
python
|
import re
import math
import operator
import collections
import unittest
__version__ = (0, 0, 11)
_non_word_re = re.compile(r'[^\w, ]+')
__all__ = ('FuzzySet',)
class FuzzySet(object):
def __init__(self, iterable=(), gram_size_lower=2, gram_size_upper=3):
self.exact_set = {}
self.match_dict = collections.defaultdict(list)
self.items = {}
self.gram_size_lower = gram_size_lower
self.gram_size_upper = gram_size_upper
for i in range(gram_size_lower, gram_size_upper + 1):
self.items[i] = []
for value in iterable:
self.add(value)
def add(self, value):
lvalue = value.lower()
if lvalue in self.exact_set:
return False
for i in range(self.gram_size_lower, self.gram_size_upper + 1):
self.__add(value, i)
def __add(self, value, gram_size):
lvalue = value.lower()
items = self.items[gram_size]
idx = len(items)
items.append(0)
grams = _gram_counter(lvalue, gram_size)
norm = math.sqrt(sum(x**2 for x in grams.values()))
for gram, occ in grams.items():
self.match_dict[gram].append((idx, occ))
items[idx] = (norm, lvalue)
self.exact_set[lvalue] = value
def __getitem__(self, value):
return self._getitem(value, exact_match_only=True, min_match_score=0.5)
def _getitem(self, value, exact_match_only, min_match_score):
lvalue = value.lower()
exact_match = self.exact_set.get(lvalue)
if exact_match_only and exact_match:
return [(1, exact_match)]
for i in range(self.gram_size_upper, self.gram_size_lower - 1, -1):
results = self.__get(value, i, min_match_score)
if exact_match:
assert exact_match in [row for val, row in results]
if results:
return results
raise KeyError(value)
def __get(self, value, gram_size, min_match_score=0.5):
lvalue = value.lower()
matches = collections.defaultdict(float)
grams = _gram_counter(lvalue, gram_size)
items = self.items[gram_size]
norm = math.sqrt(sum(x**2 for x in grams.values()))
for gram, occ in grams.items():
for idx, other_occ in self.match_dict.get(gram, ()):
matches[idx] += occ * other_occ
if not matches:
return None
# cosine similarity
results = [(match_score / (norm * items[idx][0]), items[idx][1])
for idx, match_score in matches.items()]
results.sort(reverse=True, key=operator.itemgetter(0))
return [(score, self.exact_set[lval]) for score, lval in results
if score >= min_match_score]
def get(self, key, default=None, exact_match_only=True, min_match_score=0.5):
try:
return self._getitem(key, exact_match_only, min_match_score)
except KeyError:
return default
def __nonzero__(self):
return bool(self.exact_set)
def __len__(self):
return len(self.exact_set)
def _gram_counter(value, gram_size=2):
result = collections.defaultdict(int)
for value in _iterate_grams(value, gram_size):
result[value] += 1
return result
def _iterate_grams(value, gram_size=2):
simplified = '-' + value + '-'
len_diff = gram_size - len(simplified)
if len_diff > 0:
value += '-' * len_diff
for i in range(len(simplified) - gram_size + 1):
yield simplified[i:i + gram_size]
class FuzzySetTest(unittest.TestCase):
def get_from_set(self, fuzzy_set, search_term, expected_rows, exact_match_only=False, min_match_score=0.5):
rows = fuzzy_set.get(search_term, [], exact_match_only=exact_match_only, min_match_score=min_match_score)
vals = [val for _, val in rows]
self.assertEqual(expected_rows, vals)
def test_simple(self):
rows = [
"Ala ma kota",
"Ala ma psa",
"Zuzia ma psa",
"Zuzia ma kanarka"
]
fuzzy_set = FuzzySet(rows)
self.get_from_set(fuzzy_set, "ia ma psa", ["Zuzia ma psa", "Ala ma psa"])
def test_fuzzy_set_return_all_matching_rows_even_when_exact_match_is_there(self):
rows = [
"Ala ma kota",
"Ala ma kota.",
]
fuzzy_set = FuzzySet(rows)
self.get_from_set(fuzzy_set, "Ala ma kota", ["Ala ma kota"], exact_match_only=True)
self.get_from_set(fuzzy_set, "Ala ma kota", ["Ala ma kota", "Ala ma kota."], exact_match_only=False)
def test_fuzzy_set_works_well_for_short_words(self):
rows = [
"}",
"{,",
",{",
"a",
"b",
"c",
"xyz",
"xyzabc",
]
fuzzy_set = FuzzySet(rows)
self.get_from_set(fuzzy_set, "}", ["}"])
self.get_from_set(fuzzy_set, "{", ["{,", ",{"], min_match_score=0.35)
self.get_from_set(fuzzy_set, "{", [], min_match_score=0.5)
self.get_from_set(fuzzy_set, "ab", [], min_match_score=0.5)
self.get_from_set(fuzzy_set, "ab", ["a", "b"], min_match_score=0.35)
self.get_from_set(fuzzy_set, "ab", ["a", "b"], min_match_score=0.35)
self.get_from_set(fuzzy_set, "xy", ["xyz"], min_match_score=0.35)
# TODO conclusion - use 0.35 for 1 or 2 sign words and 0.5 or more for rest
|
python
|
import requests
import autoscaler.conf.engine_config as eng
import os
def remove_old_create_new(f_name, header):
if os.access(f_name, os.R_OK):
os.remove(f_name)
write_to_file(header, f_name)
def write_to_file(stats, f_name):
csv = open(f_name, "a")
csv.write(stats)
def get_dpid(ip):
ryu_controller = eng.RYU_CONTROLLER
url = 'http://'+ryu_controller+'/v1.0/topology/switches'
data = requests.get(url).json()
#print("data: %s" %str(data))
for item in data:
if ip == item["ip"]:
return item["dpid"]
def calc_bw(prev, curr):
prev = float(prev)
curr = float(curr)
bw = ((curr - prev)/eng.NETWORK_MONITORING_INTERVAL)
return bw
def calc_bw_left(prev, curr):
bw = calc_bw(prev, curr)
max_bw = 125000000 #500000000 Bits
bw_left = max_bw - bw
return bw_left
# Reference: https://stackoverflow.com/questions/12523586/python-format-size-application-converting-b-to-kb-mb-gb-tb/37423778
def bytes_2_human_readable_bits(number_of_bytes):
if number_of_bytes < 0:
raise ValueError("!!! number_of_bytes can't be smaller than 0 !!!")
step_to_greater_unit = 1000.
number_of_bits = float(number_of_bytes)*8
unit = 'bits/s'
if (number_of_bits / step_to_greater_unit) >= 1:
number_of_bits /= step_to_greater_unit
unit = 'Kb/s'
if (number_of_bits / step_to_greater_unit) >= 1:
number_of_bits /= step_to_greater_unit
unit = 'Mb/s'
if (number_of_bits / step_to_greater_unit) >= 1:
number_of_bits /= step_to_greater_unit
unit = 'Gb/s'
if (number_of_bits / step_to_greater_unit) >= 1:
number_of_bits /= step_to_greater_unit
unit = 'Tb/s'
number_of_bits = "%.3f" % number_of_bits
return str(number_of_bits) + ' ' + unit
|
python
|
from pathlib import Path
from math import ceil
from sys import maxsize
from pprint import pprint
def parse_reactions(raw_text):
reactions = dict()
r_inputs = reactions["inputs"] = list()
r_outputs = reactions["outputs"] = list()
for line in raw_text.split("\n"):
if len(line) == 0:
continue
inputs, output = line.strip().split(" => ")
output_value, output_key = output.split(" ")
r_outputs.append({output_key: int(output_value)})
pairs = dict()
for pair in inputs.split(", "):
value, key = pair.split(" ")
pairs[key] = int(value)
r_inputs.append(pairs)
return reactions
def get_chemical_quantity(reactions, chemical, fuel=1):
inputs = reactions.get("inputs")
outputs = reactions.get("outputs")
if chemical == "FUEL":
return fuel
quantity = 0
for p, pairs in enumerate(inputs):
if chemical in pairs:
output = outputs[p]
for output_chemical, output_quantity in output.items():
args = reactions, output_chemical, fuel
chemical_quantity = get_chemical_quantity(*args)
current_quantity = pairs.get(chemical)
used_quantity = ceil(chemical_quantity / output_quantity)
quantity = quantity + used_quantity * current_quantity
return quantity
def get_fuel_units(reactions):
min_fuel = 1
max_fuel = maxsize - 1
available_ore = 10**12
while (max_fuel - min_fuel) > 1:
make = (min_fuel + max_fuel) // 2
ores = get_chemical_quantity(reactions, "ORE", make)
if ores <= available_ore:
min_fuel = make
else:
max_fuel = make
return min_fuel
if __name__ == "__main__":
#reactions = parse_reactions("\n".join([
# "157 ORE => 5 NZVS",
# "165 ORE => 6 DCFZ",
# "44 XJWVT, 5 KHKGT, 1 QDVJ, 29 NZVS, 9 GPVTF, 48 HKGWZ => 1 FUEL",
# "12 HKGWZ, 1 GPVTF, 8 PSHF => 9 QDVJ","179 ORE => 7 PSHF",
# "177 ORE => 5 HKGWZ",
# "7 DCFZ, 7 PSHF => 2 XJWVT",
# "165 ORE => 2 GPVTF",
# "3 DCFZ, 7 NZVS, 5 HKGWZ, 10 PSHF => 8 KHKGT"
#]))
reactions = parse_reactions(Path("../etc/aoc14.txt").read_text())
count = get_chemical_quantity(reactions, "ORE")
print("Part 1:", count)
fuel = get_fuel_units(reactions)
print("Part 2:", fuel)
|
python
|
from dependency_injector.wiring import inject, Provide
from ...container import Container
from ...service import Service
@inject
def test_function(service: Service = Provide[Container.service]):
return service
|
python
|
from .models import Podcasts,Votes,Comments,Profile
from django import forms
class RateForm(forms.ModelForm):
class Meta:
model=Votes
exclude=['user','podcast']
class PostForm(forms.ModelForm):
class Meta:
model=Podcasts
exclude=['user','design','usability','content']
class ReviewForm(forms.ModelForm):
class Meta:
model=Comments
exclude=['user','pro_id']
class UpdateForm(forms.ModelForm):
class Meta:
model=Profile
exclude=['user']
|
python
|
"""Test suite for the pyproject_cookiecutter_test package."""
|
python
|
import tensorflow as tf
from einops.layers import tensorflow as tfeinsum
from tensorflow.keras import initializers, layers
from .layers import DiagonalAffine, PatchEmbed, PerSampleDropPath
def mlp_block(x, hidden_units, out_units, activation, dropout_rate, name=None):
x = layers.Dense(units=hidden_units, name=name + "_dense_1")(x)
x = layers.Activation(activation, name=name + "_act_1")(x)
x = layers.Dropout(dropout_rate, name=name + "_dropout_1")(x)
x = layers.Dense(units=out_units, name=name + "_dense_2")(x)
x = layers.Dropout(dropout_rate, name=name + "_dropout_2")(x)
return x
def layers_scale_mlp_blocks(
x,
dims,
dropout_rate,
drop_path_rate,
activation,
init_values,
num_patches,
name=None,
):
inputs = tf.identity(x)
x = DiagonalAffine(dims, name=name + "_affine_1")(x)
x = tf.transpose(x, (0, 2, 1), name=name + "_transpose_1")
x = layers.Dense(num_patches, name=name + "_dense_1")(x)
x = tf.transpose(x, (0, 2, 1), name=name + "_transpose_2")
x = DiagonalAffine(
dims,
alpha_initializer=initializers.Constant(tf.fill([dims], init_values)),
use_beta=False,
name=name + "_affine_2",
)(x)
x = PerSampleDropPath(drop_path_rate, name=name + "_drop_path_1")(x)
x = layers.Add(name=name + "_add_1")([inputs, x])
z = x
x = DiagonalAffine(dims, name=name + "_affine_3")(x)
x = mlp_block(x, 4 * dims, dims, activation, dropout_rate, name=name + "_mlp")
x = DiagonalAffine(
dims,
alpha_initializer=initializers.Constant(tf.fill([dims], init_values)),
use_beta=False,
name=name + "_affine_4",
)(x)
x = PerSampleDropPath(drop_path_rate, name=name + "_drop_path_2")(x)
x = layers.Add(name=name + "_add_2")([z, x])
return x
def resmlp(
input_shape=(224, 224, 3),
patch_width=16,
patch_height=16,
num_classes=1000,
embed_dims=768,
depth=12,
dropout_rate=0.0,
drop_path_rate=0.0,
init_scale=1e-4,
activation="gelu",
include_top=True,
model_name=None,
):
inputs = x = tf.keras.layers.Input(shape=input_shape)
x = PatchEmbed(
x,
patch_width=patch_width,
patch_height=patch_height,
embed_dims=embed_dims,
name="patch_embedding",
)
shape = x.get_shape()
for i in range(depth):
x = layers_scale_mlp_blocks(
x,
dims=embed_dims,
dropout_rate=dropout_rate,
drop_path_rate=drop_path_rate,
init_values=init_scale,
activation=activation,
num_patches=shape[1],
name=f"block_{i}",
)
x = DiagonalAffine(dims=embed_dims, name="feature_affine")(x)
if include_top:
x = tfeinsum.Reduce("b n c -> b c", "mean")(x)
x = layers.Dense(num_classes, name="predictions")(x)
model = tf.keras.models.Model(inputs, x, name=model_name)
return model
def ResMlp12(
input_shape=(224, 224, 3),
patch_width=16,
patch_height=16,
embed_dims=384,
model_name="resmlp12",
**kwargs,
):
return resmlp(
input_shape=input_shape,
patch_width=patch_width,
patch_height=patch_height,
embed_dims=embed_dims,
depth=12,
init_scale=1e-1,
model_name=model_name,
**kwargs,
)
def ResMlp24(
input_shape=(224, 224, 3),
patch_width=16,
patch_height=16,
embed_dims=384,
model_name="resmlp24",
**kwargs,
):
return resmlp(
input_shape=input_shape,
patch_width=patch_width,
patch_height=patch_height,
embed_dims=embed_dims,
depth=24,
init_scale=1e-5,
model_name=model_name,
**kwargs,
)
def ResMlp36(
input_shape=(224, 224, 3),
patch_width=16,
patch_height=16,
embed_dims=384,
model_name="resmlp36",
**kwargs,
):
return resmlp(
input_shape=input_shape,
patch_width=patch_width,
patch_height=patch_height,
embed_dims=embed_dims,
depth=36,
init_scale=1e-6,
model_name=model_name,
**kwargs,
)
def ResMlpB24(
input_shape=(224, 224, 3),
patch_width=8,
patch_height=8,
embed_dims=768,
model_name="resmlpB24",
**kwargs,
):
return resmlp(
input_shape=input_shape,
patch_width=patch_width,
patch_height=patch_height,
embed_dims=embed_dims,
depth=24,
init_scale=1e-6,
model_name=model_name,
**kwargs,
)
|
python
|
#!/usr/bin/python
# import RPi.GPIO as GPIO
import spidev, time, math, random, logging
class Gibson_LED_Driver:
## name constants ##
proglist = {'static':0, 'fadeupdown':1, 'flyingcolor':2, 'throb':3, 'randomstatic':4, 'spin':5}
colorlist = {'single':0, 'alternating':1, 'wide-alternating':2, 'colorwheel':3, 'rgb':4, 'random':5}
def __init__(self, spi_x=0, spi_y=0, nLeds=50):
self.spi = spidev.SpiDev()
self.spi.open(spi_x,spi_y)
self.spi.max_speed_hz = 25000000
# set the number of LEDs in string
self.nLeds = nLeds
#fixing scope
self.correctPixels = []
# store every LED as a pixel
self.pixels = []
for b in range(0,nLeds):
self.pixels.append(1)
self.pixels.append(0)
self.pixels.append(0)
self.pixels.append(0)
#make the duplicate for the fade function
self.pixelsCopy = list(self.pixels)
#init program values
self.stepnum=0
self.progname = Gibson_LED_Driver.proglist['static']
self.colorpattern = Gibson_LED_Driver.colorlist['single']
self.color = (255,100,255)
self.altcolor = (255,0,255)
#default calibration
self.progsleep = 0.1
self.width = 3
#log the start up
logging.info("New Gibson_LED_Driver initiated @" + str(time.time()))
logging.info("Programs: "+str(self.proglist.keys()))
logging.info("Color Patterns: "+str(self.colorlist.keys()))
## global settings aka 'calibration'
#speed is in Hz or steps/second
def calibrate(self, speed, width):
logging.info("calibrate speed: "+str(speed)+" & width: "+str(width))
self.progsleep = 1.0/float(speed)
self.width = int(width)
# not super neccesarry as it should close on it's own
def close(self):
logging.info("Closing")
if (self.spi != None):
self.spi.close()
self.f = None
def brightnessCorrect(self, val):
##this corrects the luminosity of LEDs - they are not linear ##
## This should done in a look up table for speed, but all we really need is 30 frames
## per second to appear fluid so no one should notice
## not perfect yet, but much better than direct metering
#return int(math.ceil(math.pow(2,(val*4) / 128) - 1))
#return int(math.ceil((math.pow(2,val/255) - 1) * 255))
return int(val)
## This function actually puts the bits on the wire
## in some cases a [1,0,0,0] string may be read as an end of line
## for SPI and the rest of the LED's will not update - still researching
def update(self):
# logging.debug("update")
#correct for LED brightness s-curve instead of linear
self.correctPixels = []
for i in range(0, self.nLeds):
#4 values for every pixel (y, R, G, B)
self.correctPixels.append(1)
red = self.brightnessCorrect(self.pixels[i*4 + 1])
gr = self.brightnessCorrect(self.pixels[i*4 + 2])
blu = self.brightnessCorrect(self.pixels[i*4 + 3])
self.correctPixels.append(red)
self.correctPixels.append(gr)
self.correctPixels.append(blu)
#print str(self.correctPixels)
self.spi.writebytes(self.correctPixels)
#self.spi.writebytes(self.pixels)
self.spi.writebytes([0,0,0,0])
def setPixel(self, index, color):
# if(color[0] == 0 and color[1] == 0 and color[2] == 0):
# #fixing the blackout glitch when the color is black, possibly not an issue
# color = (1,1,1)
self.pixels[index*4:index*4+4] = (1, color[0], color[1], color[2])
def getPixelColor(self, index):
thiscolor = []
thiscolor.append(self.pixels[index*4 + 1])
thiscolor.append(self.pixels[index*4 + 2])
thiscolor.append(self.pixels[index*4 + 3])
return thiscolor
def getPixelCopyColor(self, index):
thiscolor = []
thiscolor.append(self.pixelsCopy[index*4 + 1])
thiscolor.append(self.pixelsCopy[index*4 + 2])
thiscolor.append(self.pixelsCopy[index*4 + 3])
return thiscolor
def setAll(self, color):
for i in range(0, self.nLeds):
self.setPixel(i,color)
def fadeupdown(self, speed):
for i in range(1,256):
self.setAll([i,i,i])
self.update()
time.sleep(speed/1000.0)
for i in reversed(range(1,256)):
self.setAll([i,i,i])
self.update()
time.sleep(speed/1000.0)
# this sends a color orbiting the strand once
def flyingcolor(self, color, speed=50):
for p in range(self.nLeds):
temppixel = self.getPixelColor(p)
self.setPixel(p, color)
self.update()
self.setPixel(p, temppixel)
time.sleep(speed/1000.0)
def throb(self, speed):
math.sin()
def static(self, color):
self.setAll(color)
self.update()
def randomstatic(self):
for p in range(self.nLeds):
randomred = random.randint(1,255)
randomgreen = random.randint(1,255)
randomblue = random.randint(1,255)
randcolor = [randomred, randomgreen, randomblue]
self.setPixel(p, randcolor)
#self.update()
def rgb(self):
for p in range(self.nLeds):
if p%3 == 0:
self.setPixel(p, [255,0,0])
elif p%3 == 1:
self.setPixel(p, [0,255,0])
else:
self.setPixel(p, [0,0,255])
#self.update()
def colorwheel(self):
self.setPixel(0, [125,255,0]) #
self.setPixel(1, [157,255,0])
self.setPixel(2, [189,255,0])
self.setPixel(3, [255,255,0]) #
self.setPixel(4, [125,222,0])
self.setPixel(5, [125,190,0])
self.setPixel(6, [125,158,0])
self.setPixel(7, [255,125,0]) #
self.setPixel(8, [255,94,0])
self.setPixel(9, [255,62,0])
self.setPixel(10,[255,30,0])
self.setPixel(11,[255,0,0]) #
self.setPixel(12,[255,0,30])
self.setPixel(13,[255,0,62])
self.setPixel(14,[255,0,94])
self.setPixel(15,[255,0,126]) #
self.setPixel(16,[255,0,158])
self.setPixel(17,[255,0,190])
self.setPixel(18,[255,0,222])
self.setPixel(19,[255,0,255]) #
self.setPixel(20,[222,0,255])
self.setPixel(21,[190,0,255])
self.setPixel(22,[158,0,255])
self.setPixel(23,[124,0,255]) #
self.setPixel(24,[94,0,255])
self.setPixel(25,[62,0,255])
self.setPixel(26,[32,0,255])
self.setPixel(27,[0,0,255]) #
self.setPixel(28,[32,0,255])
self.setPixel(29,[62,0,255])
self.setPixel(30,[94,0,255])
self.setPixel(31,[124,0,255]) #
self.setPixel(32,[83,43,255])
self.setPixel(33,[42,83,255])
self.setPixel(34,[0,125,255]) #
self.setPixel(35,[0,158,255])
self.setPixel(36,[0,190,255])
self.setPixel(37,[0,222,255])
self.setPixel(38,[0,255,255]) #
self.setPixel(39,[0,255,222])
self.setPixel(40,[0,255,190])
self.setPixel(41,[0,255,157])
self.setPixel(42,[0,255,123]) #
self.setPixel(43,[0,255,94])
self.setPixel(44,[0,255,62])
self.setPixel(45,[0,255,30])
self.setPixel(46,[0,255,0]) #
self.setPixel(47,[32,255,0])
self.setPixel(48,[64,255,0])
self.setPixel(49,[96,255,0])
#self.update()
def spiniteration(self, msdelay=25):
tempcolor = self.getPixelColor(49)
for p in reversed(range(1,self.nLeds)):
self.setPixel(p, self.getPixelColor(p-1))
self.setPixel(0,tempcolor)
time.sleep(msdelay/1000.0)
self.update()
def alternate(self, color1, color2):
for p in range(self.nLeds):
if p%self.width == 0:
self.setPixel(p, color1)
else:
self.setPixel(p, color2)
#self.update()
def resetStepNum(self):
self.stepnum = 0
def setColor(self,name,color1=[255,255,255],color2=[255,0,255]):
# print("setting color")
try:
self.color=(color1[0], color1[1], color1[2])
self.altcolor=(color2[0], color2[1], color2[2])
self.colorpattern = self.colorlist[name]
# print("Setting color: "+str(self.color)+" alt color: "+str(self.altcolor)+" and pattern: "+str(self.colorpattern))
logging.debug("Setting color: "+str(self.color)+" alt color: "+str(self.altcolor)+" and pattern: "+str(self.colorpattern))
except KeyError as e:
logging.error(e)
self.colorpattern = self.colorlist['single']
except:
logging.error(e)
# print(e)
self.colorpattern = self.colorlist['single']
# print("..color complete")
def setProgram(self,name):
# logging.info("Set program to: "+name)
self.resetStepNum()
try:
self.progname = Gibson_LED_Driver.proglist[name]
except KeyError as e:
logging.error(e)
self.progname = self.proglist['static']
# logging.debug("progname: "+str(self.progname))
# logging.debug("colorpattern: "+str(self.colorpattern))
# logging.info("Set colorpattern to: "+str(self.colorpattern))
if(self.colorpattern == self.colorlist['alternating']):
for p in range(self.nLeds):
if p%2 == 0:
self.setPixel(p, self.color)
else:
self.setPixel(p, self.altcolor)
elif(self.colorpattern == self.colorlist['colorwheel']):
self.colorwheel()
elif(self.colorpattern == self.colorlist['wide-alternating']):
for p in range(self.nLeds):
if p%(2*self.width) < self.width:
self.setPixel(p, self.color)
else:
self.setPixel(p, self.altcolor)
elif(self.colorpattern == self.colorlist['rgb']):
self.rgb()
elif(self.colorpattern == self.colorlist['random']):
self.randomstatic()
elif(self.colorpattern == self.colorlist['single']):
# logging.debug('Color pattern single '+str(self.color))
self.setAll(self.color)
if(self.progname == self.proglist['fadeupdown']):
#self.setAll(self.color)
#uses the previous pattern to fade instead of just one color
#to avoid really bad rounding closer to the bottom of cycles we duplicate the original pattern:
self.pixelsCopy = list(self.pixels)
elif(self.progname == self.proglist['flyingcolor']):
self.setAll(self.color)
elif(self.progname == self.proglist['throb']):
pass
elif(self.progname == self.proglist['randomstatic']):
self.randomstatic()
elif(self.progname == self.proglist['spin']):
pass
self.update()
def increment(self):
#if(self.progname == self.proglist['static']):
# do nothing!
# logging.debug("inc:"+str(self.stepnum))
if(self.progname == self.proglist['fadeupdown']):
if(self.stepnum > self.nLeds*2):
self.resetStepNum()
if(self.stepnum < self.nLeds):
#get darker
for n in range(self.nLeds):
nColor = self.getPixelCopyColor(n)
newR = nColor[0]*(float(self.nLeds-self.stepnum)/float(self.nLeds))
newG = nColor[1]*(float(self.nLeds-self.stepnum)/float(self.nLeds))
newB = nColor[2]*(float(self.nLeds-self.stepnum)/float(self.nLeds))
nextColor = ( int(round(newR,0)), int(round(newG,0)), int(round(newB,0)) )
self.setPixel(n, nextColor)
else:
#gets brighter
for n in range(self.nLeds):
nColor = self.getPixelCopyColor(n)
newR = nColor[0]*(float(self.stepnum % self.nLeds)/float(self.nLeds))
newG = nColor[1]*(float(self.stepnum % self.nLeds)/float(self.nLeds))
newB = nColor[2]*(float(self.stepnum % self.nLeds)/float(self.nLeds))
nextColor = ( int(round(newR,0)), int(round(newG,0)), int(round(newB,0)) )
self.setPixel(n, nextColor)
elif(self.progname == self.proglist['flyingcolor']):
# NOT IMPLEMENTED YET
logging.error("flyingcolor not implemented"+str(self.stepnum))
elif(self.progname == self.proglist['throb']):
# NOT IMPLEMENTED YET
logging.error("throb not implemented"+str(self.stepnum))
elif(self.progname == self.proglist['randomstatic']):
self.randomstatic()
elif(self.progname == self.proglist['spin']):
tempcolor = self.getPixelColor(49)
for p in reversed(range(1,self.nLeds)):
self.setPixel(p, self.getPixelColor(p-1))
self.setPixel(0,tempcolor)
elif(self.progname == self.proglist['static']):
# logging.debug('static prog')
pass
time.sleep(self.progsleep)
self.stepnum = self.stepnum + 1
self.update()
|
python
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import math
from typing import Dict, Optional
from tabulate import tabulate
class StatsItem:
def __init__(self, key: Optional[str] = None) -> None:
self._key = key
self.reset()
@property
def key(self) -> str:
return self._key
def reset(self):
self._m0 = 0
self._m1 = 0.0
self._m2 = 0.0
self._min_val = float("inf")
self._max_val = float("-inf")
def add(self, v: float) -> None:
# Welford algorithm.
self._m0 += 1
delta = v - self._m1
self._m1 += delta / self._m0
self._m2 += delta * (v - self._m1)
self._min_val = min(self._min_val, v)
self._max_val = max(self._max_val, v)
def count(self) -> int:
return self._m0
def mean(self) -> float:
return self._m1
def var(self, ddof: int = 0) -> float:
return self._m2 / (self._m0 - ddof) if self._m0 > 1 else float("nan")
def std(self, ddof: int = 0) -> float:
return math.sqrt(self.var(ddof))
def min(self) -> float:
return self._min_val
def max(self) -> float:
return self._max_val
class StatsDict:
def __init__(self) -> None:
self._dict = {}
def __getitem__(self, key: str) -> StatsItem:
return self._dict[key]
def reset(self):
self._dict.clear()
def add(self, k: str, v: float) -> None:
if k in self._dict:
self._dict[k].add(v)
else:
item = StatsItem(k)
item.add(v)
self._dict[k] = item
def add_dict(self, d: Dict[str, float]) -> None:
for k, v in d.items():
self.add(k, v)
def update(self, stats: StatsDict) -> None:
self._dict.update(stats._dict)
def table(self, info: Optional[str] = None) -> str:
h = ["info"] if info is not None else []
h += ["key", "mean", "std", "min", "max", "count"]
t = []
for k, v in self._dict.items():
row = [info] if info is not None else []
row += [k, v.mean(), v.std(), v.min(), v.max(), v.count()]
t.append(row)
return tabulate(t,
h,
numalign="right",
stralign="right",
floatfmt=".8f")
|
python
|
# encoding: utf-8
import copy
import datetime
from secrets import token_urlsafe
from sqlalchemy import types, Column, Table, ForeignKey, orm
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.ext.mutable import MutableDict
import ckan.plugins.toolkit as tk
from ckan.model import meta, User, DomainObject
__all__ = [u"ApiToken", u"api_token_table"]
def _make_token():
nbytes = tk.asint(tk.config.get(u"api_token.nbytes", 32))
return token_urlsafe(nbytes)
api_token_table = Table(
u"api_token",
meta.metadata,
Column(u"id", types.UnicodeText, primary_key=True, default=_make_token),
Column(u"name", types.UnicodeText),
Column(u"user_id", types.UnicodeText, ForeignKey(u"user.id")),
Column(u"created_at", types.DateTime, default=datetime.datetime.utcnow),
Column(u"last_access", types.DateTime, nullable=True),
Column(u"plugin_extras", MutableDict.as_mutable(JSONB)),
)
class ApiToken(DomainObject):
def __init__(self, user_id=None, name=None):
self.id = _make_token()
self.user_id = user_id
self.name = name
@classmethod
def get(cls, id):
if not id:
return None
return meta.Session.query(cls).get(id)
@classmethod
def revoke(cls, id):
token = cls.get(id)
if token:
meta.Session.delete(token)
meta.Session.commit()
return True
return False
def touch(self, commit=False):
self.last_access = datetime.datetime.utcnow()
if commit:
meta.Session.commit()
def set_extra(self, key, value, commit=False):
extras = self.plugin_extras or {}
extras[key] = value
self.plugin_extras = copy.deepcopy(extras)
if commit:
meta.Session.commit()
meta.mapper(
ApiToken,
api_token_table,
properties={
u"owner": orm.relation(
User, backref=orm.backref(u"api_tokens", cascade=u"all, delete")
)
},
)
|
python
|
# Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.structure import graph_primtypes_wrapper
from cugraph.structure.number_map import NumberMap
import cudf
import dask_cudf
class simpleDistributedGraphImpl:
class EdgeList:
def __init__(self, ddf):
self.edgelist_df = ddf
self.weights = False
# FIXME: Edge Attribute not handled
# class AdjList:
# Not Supported
# class transposedAdjList:
# Not Supported
class Properties:
def __init__(self, properties):
self.multi_edge = getattr(properties, 'multi_edge', False)
self.directed = properties.directed
self.renumbered = False
self.store_transposed = False
self.self_loop = None
self.isolated_vertices = None
self.node_count = None
self.edge_count = None
self.weighted = False
def __init__(self, properties):
# Structure
self.edgelist = None
self.renumber_map = None
self.properties = simpleDistributedGraphImpl.Properties(properties)
self.source_columns = None
self.destination_columns = None
# Functions
def __from_edgelist(
self,
input_ddf,
source="source",
destination="destination",
edge_attr=None,
renumber=True,
store_transposed=False,
):
if not isinstance(input_ddf, dask_cudf.DataFrame):
raise Exception("input should be a dask_cudf dataFrame")
if self.properties.directed is False:
raise Exception("Undirected distributed graph not supported")
s_col = source
d_col = destination
if not isinstance(s_col, list):
s_col = [s_col]
if not isinstance(d_col, list):
d_col = [d_col]
if not (
set(s_col).issubset(set(input_ddf.columns))
and set(d_col).issubset(set(input_ddf.columns))
):
raise Exception(
"source column names and/or destination column "
"names not found in input. Recheck the source "
"and destination parameters"
)
ddf_columns = s_col + d_col
if edge_attr is not None:
if not (set([edge_attr]).issubset(set(input_ddf.columns))):
raise Exception(
"edge_attr column name not found in input."
"Recheck the edge_attr parameter")
self.weighted = True
ddf_columns = ddf_columns + [edge_attr]
input_ddf = input_ddf[ddf_columns]
if edge_attr is not None:
input_ddf = input_ddf.rename(columns={edge_attr: 'value'})
#
# Keep all of the original parameters so we can lazily
# evaluate this function
#
# FIXME: Edge Attribute not handled
self.properties.renumbered = renumber
self.input_df = input_ddf
self.source_columns = source
self.destination_columns = destination
def view_edge_list(self):
"""
Display the edge list. Compute it if needed.
NOTE: If the graph is of type Graph() then the displayed undirected
edges are the same as displayed by networkx Graph(), but the direction
could be different i.e. an edge displayed by cugraph as (src, dst)
could be displayed as (dst, src) by networkx.
cugraph.Graph stores symmetrized edgelist internally. For displaying
undirected edgelist for a Graph the upper trianglar matrix of the
symmetrized edgelist is returned.
networkx.Graph renumbers the input and stores the upper triangle of
this renumbered input. Since the internal renumbering of networx and
cugraph is different, the upper triangular matrix of networkx
renumbered input may not be the same as cugraph's upper trianglar
matrix of the symmetrized edgelist. Hence the displayed source and
destination pairs in both will represent the same edge but node values
could be swapped.
Returns
-------
df : cudf.DataFrame
This cudf.DataFrame wraps source, destination and weight
df[src] : cudf.Series
contains the source index for each edge
df[dst] : cudf.Series
contains the destination index for each edge
df[weight] : cusd.Series
Column is only present for weighted Graph,
then containing the weight value for each edge
"""
if self.edgelist is None:
raise Exception("Graph has no Edgelist.")
return self.edgelist.edgelist_df
def delete_edge_list(self):
"""
Delete the edge list.
"""
# decrease reference count to free memory if the referenced objects are
# no longer used.
self.edgelist = None
def clear(self):
"""
Empty this graph. This function is added for NetworkX compatibility.
"""
self.edgelist = None
def number_of_vertices(self):
"""
Get the number of nodes in the graph.
"""
if self.properties.node_count is None:
if self.edgelist is not None:
ddf = self.edgelist.edgelist_df[["src", "dst"]]
self.properties.node_count = ddf.max().max().compute() + 1
else:
raise Exception("Graph is Empty")
return self.properties.node_count
def number_of_nodes(self):
"""
An alias of number_of_vertices(). This function is added for NetworkX
compatibility.
"""
return self.number_of_vertices()
def number_of_edges(self, directed_edges=False):
"""
Get the number of edges in the graph.
"""
if self.edgelist is not None:
return len(self.edgelist.edgelist_df)
else:
raise Exception("Graph is Empty")
def in_degree(self, vertex_subset=None):
"""
Compute vertex in-degree. Vertex in-degree is the number of edges
pointing into the vertex. By default, this method computes vertex
degrees for the entire set of vertices. If vertex_subset is provided,
this method optionally filters out all but those listed in
vertex_subset.
Parameters
----------
vertex_subset : cudf.Series or iterable container, optional
A container of vertices for displaying corresponding in-degree.
If not set, degrees are computed for the entire set of vertices.
Returns
-------
df : cudf.DataFrame
GPU DataFrame of size N (the default) or the size of the given
vertices (vertex_subset) containing the in_degree. The ordering is
relative to the adjacency list, or that given by the specified
vertex_subset.
df[vertex] : cudf.Series
The vertex IDs (will be identical to vertex_subset if
specified).
df[degree] : cudf.Series
The computed in-degree of the corresponding vertex.
Examples
--------
>>> M = cudf.read_csv('datasets/karate.csv', delimiter=' ',
>>> dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(M, '0', '1')
>>> df = G.in_degree([0,9,12])
"""
return self._degree(vertex_subset, x=1)
def out_degree(self, vertex_subset=None):
"""
Compute vertex out-degree. Vertex out-degree is the number of edges
pointing out from the vertex. By default, this method computes vertex
degrees for the entire set of vertices. If vertex_subset is provided,
this method optionally filters out all but those listed in
vertex_subset.
Parameters
----------
vertex_subset : cudf.Series or iterable container, optional
A container of vertices for displaying corresponding out-degree.
If not set, degrees are computed for the entire set of vertices.
Returns
-------
df : cudf.DataFrame
GPU DataFrame of size N (the default) or the size of the given
vertices (vertex_subset) containing the out_degree. The ordering is
relative to the adjacency list, or that given by the specified
vertex_subset.
df[vertex] : cudf.Series
The vertex IDs (will be identical to vertex_subset if
specified).
df[degree] : cudf.Series
The computed out-degree of the corresponding vertex.
Examples
--------
>>> M = cudf.read_csv('datasets/karate.csv', delimiter=' ',
>>> dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(M, '0', '1')
>>> df = G.out_degree([0,9,12])
"""
# TODO: Add support
raise Exception("Not supported for distributed graph")
def degree(self, vertex_subset=None):
"""
Compute vertex degree, which is the total number of edges incident
to a vertex (both in and out edges). By default, this method computes
degrees for the entire set of vertices. If vertex_subset is provided,
then this method optionally filters out all but those listed in
vertex_subset.
Parameters
----------
vertex_subset : cudf.Series or iterable container, optional
a container of vertices for displaying corresponding degree. If not
set, degrees are computed for the entire set of vertices.
Returns
-------
df : cudf.DataFrame
GPU DataFrame of size N (the default) or the size of the given
vertices (vertex_subset) containing the degree. The ordering is
relative to the adjacency list, or that given by the specified
vertex_subset.
df['vertex'] : cudf.Series
The vertex IDs (will be identical to vertex_subset if
specified).
df['degree'] : cudf.Series
The computed degree of the corresponding vertex.
Examples
--------
>>> M = cudf.read_csv('datasets/karate.csv', delimiter=' ',
>>> dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(M, '0', '1')
>>> all_df = G.degree()
>>> subset_df = G.degree([0,9,12])
"""
raise Exception("Not supported for distributed graph")
# FIXME: vertex_subset could be a DataFrame for multi-column vertices
def degrees(self, vertex_subset=None):
"""
Compute vertex in-degree and out-degree. By default, this method
computes vertex degrees for the entire set of vertices. If
vertex_subset is provided, this method optionally filters out all but
those listed in vertex_subset.
Parameters
----------
vertex_subset : cudf.Series or iterable container, optional
A container of vertices for displaying corresponding degree. If not
set, degrees are computed for the entire set of vertices.
Returns
-------
df : cudf.DataFrame
GPU DataFrame of size N (the default) or the size of the given
vertices (vertex_subset) containing the degrees. The ordering is
relative to the adjacency list, or that given by the specified
vertex_subset.
df['vertex'] : cudf.Series
The vertex IDs (will be identical to vertex_subset if
specified).
df['in_degree'] : cudf.Series
The in-degree of the vertex.
df['out_degree'] : cudf.Series
The out-degree of the vertex.
Examples
--------
>>> M = cudf.read_csv('datasets/karate.csv', delimiter=' ',
>>> dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(M, '0', '1')
>>> df = G.degrees([0,9,12])
"""
raise Exception("Not supported for distributed graph")
def _degree(self, vertex_subset, x=0):
vertex_col, degree_col = graph_primtypes_wrapper._degree(self, x)
df = cudf.DataFrame()
df["vertex"] = vertex_col
df["degree"] = degree_col
if self.renumbered is True:
df = self.unrenumber(df, "vertex")
if vertex_subset is not None:
df = df[df['vertex'].isin(vertex_subset)]
return df
def to_directed(self, DiG):
"""
Return a directed representation of the graph.
This function sets the type of graph as DiGraph() and returns the
directed view.
Returns
-------
G : DiGraph
A directed graph with the same nodes, and each edge (u,v,weights)
replaced by two directed edges (u,v,weights) and (v,u,weights).
Examples
--------
>>> M = cudf.read_csv('datasets/karate.csv', delimiter=' ',
>>> dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(M, '0', '1')
>>> DiG = G.to_directed()
"""
# TODO: Add support
raise Exception("Not supported for distributed graph")
def to_undirected(self, G):
"""
Return an undirected copy of the graph.
Returns
-------
G : Graph
A undirected graph with the same nodes, and each directed edge
(u,v,weights) replaced by an undirected edge (u,v,weights).
Examples
--------
>>> M = cudf.read_csv('datasets/karate.csv', delimiter=' ',
>>> dtype=['int32', 'int32', 'float32'], header=None)
>>> DiG = cugraph.DiGraph()
>>> DiG.from_cudf_edgelist(M, '0', '1')
>>> G = DiG.to_undirected()
"""
# TODO: Add support
raise Exception("Not supported for distributed graph")
def has_node(self, n):
"""
Returns True if the graph contains the node n.
"""
if self.edgelist is None:
raise Exception("Graph has no Edgelist.")
# FIXME: Check renumber map
ddf = self.edgelist.edgelist_df[["src", "dst"]]
return (ddf == n).any().any().compute()
def has_edge(self, u, v):
"""
Returns True if the graph contains the edge (u,v).
"""
# TODO: Verify Correctness
if self.properties.renumbered:
tmp = cudf.DataFrame({"src": [u, v]})
tmp = tmp.astype({"src": "int"})
tmp = self.add_internal_vertex_id(
tmp, "id", "src", preserve_order=True
)
u = tmp["id"][0]
v = tmp["id"][1]
df = self.edgelist.edgelist_df
return ((df["src"] == u) & (df["dst"] == v)).any().compute()
def edges(self):
"""
Returns all the edges in the graph as a cudf.DataFrame containing
sources and destinations. It does not return the edge weights.
For viewing edges with weights use view_edge_list()
"""
return self.view_edge_list()[["src", "dst"]]
def nodes(self):
"""
Returns all the nodes in the graph as a cudf.Series
"""
# FIXME: Return renumber map nodes
raise Exception("Not supported for distributed graph")
def neighbors(self, n):
if self.edgelist is None:
raise Exception("Graph has no Edgelist.")
# FIXME: Add renumbering of node n
ddf = self.edgelist.edgelist_df
return ddf[ddf["src"] == n]["dst"].reset_index(drop=True)
def compute_renumber_edge_list(self, transposed=False):
"""
Compute a renumbered edge list
This function works in the MNMG pipeline and will transform
the input dask_cudf.DataFrame into a renumbered edge list
in the prescribed direction.
This function will be called by the algorithms to ensure
that the graph is renumbered properly. The graph object will
cache the most recent renumbering attempt. For benchmarking
purposes, this function can be called prior to calling a
graph algorithm so we can measure the cost of computing
the renumbering separately from the cost of executing the
algorithm.
When creating a CSR-like structure, set transposed to False.
When creating a CSC-like structure, set transposed to True.
Parameters
----------
transposed : (optional) bool
If True, renumber with the intent to make a CSC-like
structure. If False, renumber with the intent to make
a CSR-like structure. Defaults to False.
"""
# FIXME: What to do about edge_attr???
# currently ignored for MNMG
if not self.properties.renumbered:
self.edgelist = self.EdgeList(self.input_df)
self.renumber_map = None
else:
if self.edgelist is not None:
if self.properties.directed is False:
return
if self.properties.store_transposed == transposed:
return
del self.edgelist
renumbered_ddf, number_map = NumberMap.renumber(
self.input_df,
self.source_columns,
self.destination_columns,
store_transposed=transposed,
)
self.edgelist = self.EdgeList(renumbered_ddf)
self.renumber_map = number_map
self.properties.store_transposed = transposed
|
python
|
#!/usr/bin/env python
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(*names, **kwargs):
with open(os.path.join(os.path.dirname(__file__), *names), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
classifiers = [
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Software Development"
]
readme = os.path.join(os.path.dirname(__file__), 'README.md')
try:
import pypandoc
long_description = pypandoc.convert(readme, 'rst')
except ImportError:
long_description = open(readme).read()
setup(
name='berryditos',
version=find_version('berryditos', 'base.py'),
description='Berryditos is a tool that can edit and burn Raspbian images.',
long_description=long_description,
keywords=['raspberry','raspbian'],
author='tovam',
author_email='[email protected]',
url='https://github.com/tovam/berryditos',
license='MIT',
classifiers=classifiers,
install_requires=['requests'],
packages=['berryditos'],
scripts=['bin/berryditos'],
zip_safe=True
)
|
python
|
import bcrypt
from app.database import BaseMixin, db
from app.serializer import ma
class User(db.Model, BaseMixin):
__tablename__ = 'user_table'
userID = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String, nullable=False)
_password = db.Column(db.Binary(60))
email = db.Column(db.String, nullable=False)
guest = db.Column(db.Boolean, default=False)
user = db.Column(db.Boolean, default=True)
admin = db.Column(db.Boolean, default=False)
def __init__(self, username, password, email):
self.username = username
self._password = self.hash_pw(password).encode('utf-8')
self.email = email
def check_pw(self, hashed_pw, password):
return bcrypt.checkpw(password.encode('utf-8'), hash_pw)
def hash_pw(self, password):
return bcrypt.hashpw(password, bcrypt.gensalt(12))
@classmethod
def find_by_username(cls, username):
return cls.query.filter_by(username=username).first()
class UserSchema(ma.ModelSchema):
class Meta:
model = User
fields = (
"id",
"username",
"email",
"guest",
"user",
"admin"
)
|
python
|
import boto3
from .. import config
# Module API
def change_acl_on_s3(bucket, acl, path='', endpoint_url=None):
def func(package):
# Prepare client
s3_url = endpoint_url or config.S3_ENDPOINT_URL
s3_client = boto3.client('s3', endpoint_url=s3_url)
# Change ACL
# list_objects returns max 1000 keys (even if MaxKeys is >1000)
marker = ''
is_truncated = True
while is_truncated:
objs = s3_client.list_objects(Bucket=bucket, Prefix=path, Marker=marker)
is_truncated = objs.get('IsTruncated')
for obj in objs.get('Contents', []):
s3_client.put_object_acl(Bucket=bucket, Key=obj['Key'], ACL=acl)
marker = obj['Key']
# Return to flow
yield package.pkg
yield from package
return func
|
python
|
# Description: Count number of *.log files in current directory.
# Source: placeHolder
"""
cmd.do('print("Count the number of log image files in current directory.");')
cmd.do('print("Usage: cntlogs");')
cmd.do('myPath = os.getcwd();')
cmd.do('logCounter = len(glob.glob1(myPath,"*.log"));')
cmd.do('print("Number of number of log image files in the current directory: ", logCounter);')
"""
cmd.do('print("Count the number of log image files in current directory.");')
cmd.do('print("Usage: cntlogs");')
cmd.do('myPath = os.getcwd();')
cmd.do('logCounter = len(glob.glob1(myPath,"*.log"));')
cmd.do('print("Number of number of log image files in the current directory: ", logCounter);')
|
python
|
"""
A terminal based ray-casting engine.
'esc' to exit
't' to turn off textures
'wasdqe' or arrow-keys to move
'space' to jump
Depending on your terminal font, Renderer.ascii_map may need to be adjusted.
If you'd like to make an ascii map more suitable to your terminal's font,
check my Snippets repository for a script that grabs mean brightness of
unicode characters.
Values stored in textures should range from 0-9. Values below 6 are
subtractive and above 6 are additive.
"""
import curses
from .map_loader import Map
from .player import Player
from .renderer import Renderer
from .controller import Controller
def init_curses(screen):
curses.curs_set(0)
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
screen.attron(curses.color_pair(1))
screen.nodelay(True)
@curses.wrapper
def main(screen):
init_curses(screen)
game_map = Map("map_1")
player = Player(game_map)
wall_textures = "wall_1", "wall_2"
sprite_textures = "dragon", "tree"
Controller(Renderer(screen, player, wall_textures, sprite_textures)).start()
curses.flushinp()
curses.endwin()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: edwardahn
Evaluate a policy and publish metrics.
"""
import argparse
import cProfile
import pstats
import sys
import joblib
import matplotlib.pyplot as plt
import numpy as np
from rllab.sampler.utils import rollout
def profile_code(profiler):
"""
Use cProfile to profile code, listing functions with most
cumulative time spent.
"""
print('\n')
ps = pstats.Stats(profiler).strip_dirs().sort_stats('cumulative')
ps.print_stats(10)
def plot_curve(error, name, units):
"""
Plot error over time.
"""
title = '%s Error over Time in Final Policy' % name
plt.figure()
t = np.arange(error.size)
plt.plot(t, error)
plt.title(title)
plt.xlabel('Time steps')
plt.ylabel('Error (%s)' % units)
if name == 'Distance':
plt.gca().set_ylim((-0.01, 0.01))
else:
plt.gca().set_ylim((-0.7, 0.7))
plt.show()
def plot_distribution(error, name, units):
"""
Plot histogram showing distribution of error.
"""
mean = error.mean()
std = error.std()
maximum = error.max()
minimum = error.min()
stats = 'Mean = %.5f\nStd = %.5f\nMax = %.5f\nMin = %.5f' % \
(mean, std, maximum, minimum)
title = 'Distribution of %s Errors in Final Policy' % name
plt.figure()
plt.hist(error)
plt.title(title)
plt.xlabel('Error (%s)' % units)
plt.ylabel('Number of Time Steps')
plt.axvline(mean, color='k', linestyle='dashed', linewidth=1)
plt.axvline(mean+std, color='r', linestyle='dashed', linewidth=1)
plt.axvline(mean-std, color='r', linestyle='dashed', linewidth=1)
plt.text(0.87, 0.9, stats, ha='center', va='center',
transform=plt.gca().transAxes)
plt.show()
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file')
parser.add_argument('--max_path_length', type=int, default=100,
help='Max length of rollout')
parser.add_argument('--speedup', type=float, default=100000,
help='Speedup')
parser.add_argument('--render', dest='render',
action='store_true', help='Rendering')
parser.add_argument('--no-render', dest='render',
action='store_false', help='Rendering')
parser.set_defaults(render=False)
args = parser.parse_args()
return args
def main():
args = parse_arguments()
profiler = cProfile.Profile()
data = joblib.load(args.file)
policy = data['policy']
env = data['env']
plt.ion()
# Set fixed random seed
np.random.seed(9)
# Sample one rollout
profiler.enable()
path = rollout(env, policy, max_path_length=args.max_path_length,
animated=args.render, speedup=args.speedup,
always_return_paths=True)
profiler.disable()
# Policy analysis
profile_code(profiler)
plot_curve(path['env_infos']['dist'], 'Distance', 'm')
plot_curve(path['env_infos']['vel'], 'Velocity', 'm/s')
plot_distribution(path['env_infos']['dist'], 'Distance', 'm')
plot_distribution(path['env_infos']['vel'], 'Velocity', 'm/s')
# Block until key is pressed
sys.stdout.write("Press <enter> to continue: ")
input()
if __name__ == "__main__":
main()
|
python
|
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import tensorflow as tf
from tensorflow.contrib import antares
if tf.version.VERSION.startswith('2.'):
tf = tf.compat.v1
tf.disable_eager_execution()
from _common import *
x = create_variable([64, 224, 224, 3], dtype=tf.float32)
compare_ops(
tf.transpose(x, [0, 3, 1, 2]),
antares.make_op('output0[N, C, H, W] = input0[N, H, W, C]', [x]),
)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.