content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from django.shortcuts import render
# Create your views here.
def about_view(request):
return render(request, 'about/about.html')
|
python
|
# -*- coding: utf-8 -*-
"""
v13 model
* Input: v12_im
Author: Kohei <[email protected]>
"""
from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler
from pathlib import Path
import subprocess
import glob
import math
import sys
import json
import re
import warnings
import scipy
import tqdm
import click
import tables as tb
import pandas as pd
import numpy as np
from keras.models import Model
from keras.engine.topology import merge as merge_l
from keras.layers import (
Input, Convolution2D, MaxPooling2D, UpSampling2D,
Reshape, core, Dropout,
Activation, BatchNormalization)
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, EarlyStopping, History
from keras import backend as K
import skimage.draw
import rasterio
import rasterio.features
import shapely.wkt
import shapely.ops
import shapely.geometry
MODEL_NAME = 'v13'
ORIGINAL_SIZE = 650
INPUT_SIZE = 256
STRIDE_SZ = 197
BASE_DIR = "/data/train"
BASE_TEST_DIR = "/data/test"
WORKING_DIR = "/data/working"
IMAGE_DIR = "/data/working/images/{}".format('v12')
V5_IMAGE_DIR = "/data/working/images/{}".format('v5')
# ---------------------------------------------------------
# Parameters
MIN_POLYGON_AREA = 30 # 30
# ---------------------------------------------------------
# Input files
FMT_TRAIN_SUMMARY_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("summaryData/{prefix:s}_Train_Building_Solutions.csv"))
FMT_TRAIN_RGB_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TEST_RGB_IMAGE_PATH = str(
Path(BASE_TEST_DIR) /
Path("{prefix:s}_Test/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TRAIN_MSPEC_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
FMT_TEST_MSPEC_IMAGE_PATH = str(
Path(BASE_TEST_DIR) /
Path("{prefix:s}_Test/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
# ---------------------------------------------------------
# Preprocessing result
FMT_RGB_BANDCUT_TH_PATH = IMAGE_DIR + "/rgb_bandcut.csv"
FMT_MUL_BANDCUT_TH_PATH = IMAGE_DIR + "/mul_bandcut.csv"
# ---------------------------------------------------------
# Image list, Image container and mask container
FMT_VALTRAIN_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_valtrain_ImageId.csv"
FMT_VALTEST_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_valtest_ImageId.csv"
FMT_VALTRAIN_IM_STORE = IMAGE_DIR + "/valtrain_{}_im.h5"
FMT_VALTEST_IM_STORE = IMAGE_DIR + "/valtest_{}_im.h5"
FMT_VALTRAIN_MASK_STORE = IMAGE_DIR + "/valtrain_{}_mask.h5"
FMT_VALTEST_MASK_STORE = IMAGE_DIR + "/valtest_{}_mask.h5"
FMT_VALTRAIN_MUL_STORE = IMAGE_DIR + "/valtrain_{}_mul.h5"
FMT_VALTEST_MUL_STORE = IMAGE_DIR + "/valtest_{}_mul.h5"
FMT_TRAIN_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_train_ImageId.csv"
FMT_TEST_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_test_ImageId.csv"
FMT_TRAIN_IM_STORE = IMAGE_DIR + "/train_{}_im.h5"
FMT_TEST_IM_STORE = IMAGE_DIR + "/test_{}_im.h5"
FMT_TRAIN_MASK_STORE = IMAGE_DIR + "/train_{}_mask.h5"
FMT_TRAIN_MUL_STORE = IMAGE_DIR + "/train_{}_mul.h5"
FMT_TEST_MUL_STORE = IMAGE_DIR + "/test_{}_mul.h5"
FMT_MULMEAN = IMAGE_DIR + "/{}_mulmean.h5"
# ---------------------------------------------------------
# Model files
MODEL_DIR = "/data/working/models/{}".format(MODEL_NAME)
FMT_VALMODEL_PATH = MODEL_DIR + "/{}_val_weights.h5"
FMT_FULLMODEL_PATH = MODEL_DIR + "/{}_full_weights.h5"
FMT_VALMODEL_HIST = MODEL_DIR + "/{}_val_hist.csv"
FMT_VALMODEL_EVALHIST = MODEL_DIR + "/{}_val_evalhist.csv"
FMT_VALMODEL_EVALTHHIST = MODEL_DIR + "/{}_val_evalhist_th.csv"
# ---------------------------------------------------------
# Prediction & polygon result
FMT_TESTPRED_PATH = MODEL_DIR + "/{}_pred.h5"
FMT_VALTESTPRED_PATH = MODEL_DIR + "/{}_eval_pred.h5"
FMT_VALTESTPOLY_PATH = MODEL_DIR + "/{}_eval_poly.csv"
FMT_VALTESTTRUTH_PATH = MODEL_DIR + "/{}_eval_poly_truth.csv"
FMT_VALTESTPOLY_OVALL_PATH = MODEL_DIR + "/eval_poly.csv"
FMT_VALTESTTRUTH_OVALL_PATH = MODEL_DIR + "/eval_poly_truth.csv"
FMT_TESTPOLY_PATH = MODEL_DIR + "/{}_poly.csv"
FN_SOLUTION_CSV = "data/output/{}.csv".format(MODEL_NAME)
# ---------------------------------------------------------
# Model related files (others)
FMT_VALMODEL_LAST_PATH = MODEL_DIR + "/{}_val_weights_last.h5"
FMT_FULLMODEL_LAST_PATH = MODEL_DIR + "/{}_full_weights_last.h5"
# ---------------------------------------------------------
# warnins and logging
warnings.simplefilter("ignore", UserWarning)
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter('%(asctime)s %(levelname)s %(message)s'))
fh_handler = FileHandler(".{}.log".format(MODEL_NAME))
fh_handler.setFormatter(Formatter('%(asctime)s %(levelname)s %(message)s'))
logger = getLogger(__name__)
logger.setLevel(INFO)
if __name__ == '__main__':
logger.addHandler(handler)
logger.addHandler(fh_handler)
# Fix seed for reproducibility
np.random.seed(1145141919)
def directory_name_to_area_id(datapath):
"""
Directory name to AOI number
Usage:
>>> directory_name_to_area_id("/data/test/AOI_2_Vegas")
2
"""
dir_name = Path(datapath).name
if dir_name.startswith('AOI_2_Vegas'):
return 2
elif dir_name.startswith('AOI_3_Paris'):
return 3
elif dir_name.startswith('AOI_4_Shanghai'):
return 4
elif dir_name.startswith('AOI_5_Khartoum'):
return 5
else:
raise RuntimeError("Unsupported city id is given.")
def _remove_interiors(line):
if "), (" in line:
line_prefix = line.split('), (')[0]
line_terminate = line.split('))",')[-1]
line = (
line_prefix +
'))",' +
line_terminate
)
return line
def _calc_fscore_per_aoi(area_id):
prefix = area_id_to_prefix(area_id)
truth_file = FMT_VALTESTTRUTH_PATH.format(prefix)
poly_file = FMT_VALTESTPOLY_PATH.format(prefix)
cmd = [
'java',
'-jar',
'/root/visualizer-2.0/visualizer.jar',
'-truth',
truth_file,
'-solution',
poly_file,
'-no-gui',
'-band-triplets',
'/root/visualizer-2.0/data/band-triplets.txt',
'-image-dir',
'pass',
]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = proc.communicate()
lines = [line for line in stdout_data.decode('utf8').split('\n')[-10:]]
"""
Overall F-score : 0.85029
AOI_2_Vegas:
TP : 27827
FP : 4999
FN : 4800
Precision: 0.847712
Recall : 0.852883
F-score : 0.85029
"""
if stdout_data.decode('utf8').strip().endswith("Overall F-score : 0"):
overall_fscore = 0
tp = 0
fp = 0
fn = 0
precision = 0
recall = 0
fscore = 0
elif len(lines) > 0 and lines[0].startswith("Overall F-score : "):
assert lines[0].startswith("Overall F-score : ")
assert lines[2].startswith("AOI_")
assert lines[3].strip().startswith("TP")
assert lines[4].strip().startswith("FP")
assert lines[5].strip().startswith("FN")
assert lines[6].strip().startswith("Precision")
assert lines[7].strip().startswith("Recall")
assert lines[8].strip().startswith("F-score")
overall_fscore = float(re.findall("([\d\.]+)", lines[0])[0])
tp = int(re.findall("(\d+)", lines[3])[0])
fp = int(re.findall("(\d+)", lines[4])[0])
fn = int(re.findall("(\d+)", lines[5])[0])
precision = float(re.findall("([\d\.]+)", lines[6])[0])
recall = float(re.findall("([\d\.]+)", lines[7])[0])
fscore = float(re.findall("([\d\.]+)", lines[8])[0])
else:
logger.warn("Unexpected data >>> " + stdout_data.decode('utf8'))
raise RuntimeError("Unsupported format")
return {
'overall_fscore': overall_fscore,
'tp': tp,
'fp': fp,
'fn': fn,
'precision': precision,
'recall': recall,
'fscore': fscore,
}
def prefix_to_area_id(prefix):
area_dict = {
'AOI_1_Rio': 1,
'AOI_2_Vegas': 2,
'AOI_3_Paris': 3,
'AOI_4_Shanghai': 4,
'AOI_5_Khartoum': 5,
}
return area_dict[area_id]
def area_id_to_prefix(area_id):
"""
area_id から prefix を返す
"""
area_dict = {
1: 'AOI_1_Rio',
2: 'AOI_2_Vegas',
3: 'AOI_3_Paris',
4: 'AOI_4_Shanghai',
5: 'AOI_5_Khartoum',
}
return area_dict[area_id]
# ---------------------------------------------------------
# main
def _get_model_parameter(area_id):
prefix = area_id_to_prefix(area_id)
fn_hist = FMT_VALMODEL_EVALTHHIST.format(prefix)
best_row = pd.read_csv(fn_hist).sort_values(
by='fscore',
ascending=False,
).iloc[0]
param = dict(
fn_epoch=int(best_row['zero_base_epoch']),
min_poly_area=int(best_row['min_area_th']),
)
return param
def _internal_test_predict_best_param(area_id,
save_pred=True):
prefix = area_id_to_prefix(area_id)
param = _get_model_parameter(area_id)
epoch = param['fn_epoch']
min_th = param['min_poly_area']
# Prediction phase
logger.info("Prediction phase: {}".format(prefix))
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
fn = FMT_TESTPRED_PATH.format(prefix)
fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}')
fn_model = fn_model.format(epoch=epoch)
model = get_unet()
model.load_weights(fn_model)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
y_pred = model.predict_generator(
generate_test_batch(
area_id,
batch_size=64,
immean=X_mean,
enable_tqdm=True,
),
val_samples=len(df_test) * 9,
)
del model
# Save prediction result
if save_pred:
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'pred', atom, y_pred.shape,
filters=filters)
ds[:] = y_pred
return y_pred
def _internal_test(area_id):
prefix = area_id_to_prefix(area_id)
y_pred = _internal_test_predict_best_param(area_id, save_pred=False)
# Postprocessing phase
logger.info("Postprocessing phase")
# if not Path(FMT_VALTESTPOLY_PATH.format(prefix)).exists():
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_TESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'r') as f:
y_pred = np.array(f.get_node('/pred'))
fn_out = FMT_TESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
for idx, image_id in enumerate(df_test.index.tolist()):
pred_values = np.zeros((650, 650))
pred_count = np.zeros((650, 650))
for slice_pos in range(9):
slice_idx = idx * 9 + slice_pos
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
pred_values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += (
y_pred[slice_idx][0]
)
pred_count[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += 1
pred_values = pred_values / pred_count
df_poly = mask_to_poly(pred_values, min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
def _internal_validate_predict_best_param(area_id,
enable_tqdm=False):
"""
best param で valtest の prediction proba を return する
y_pred は保存しない
(used from ensemble model)
"""
param = _get_model_parameter(area_id)
epoch = param['fn_epoch']
y_pred = _internal_validate_predict(
area_id,
epoch=epoch,
save_pred=False,
enable_tqdm=enable_tqdm)
return y_pred
def _internal_validate_predict(area_id,
epoch=3,
save_pred=True,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}')
fn_model = fn_model.format(epoch=epoch)
model = get_unet()
model.load_weights(fn_model)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
y_pred = model.predict_generator(
generate_valtest_batch(
area_id,
batch_size=64,
immean=X_mean,
enable_tqdm=enable_tqdm,
),
val_samples=len(df_test) * 9,
)
del model
# Save prediction result
if save_pred:
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root,
'pred',
atom,
y_pred.shape,
filters=filters)
ds[:] = y_pred
return y_pred
def _internal_validate_fscore_wo_pred_file(area_id,
epoch=3,
min_th=MIN_POLYGON_AREA,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
# Prediction phase
logger.info("Prediction phase")
y_pred = _internal_validate_predict(
area_id,
save_pred=False,
epoch=epoch,
enable_tqdm=enable_tqdm)
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_list = df_test.index.tolist()
iterator = enumerate(test_list)
for idx, image_id in tqdm.tqdm(iterator, total=len(test_list)):
pred_values = np.zeros((650, 650))
pred_count = np.zeros((650, 650))
for slice_pos in range(9):
slice_idx = idx * 9 + slice_pos
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
pred_values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += (
y_pred[slice_idx][0]
)
pred_count[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += 1
pred_values = pred_values / pred_count
df_poly = mask_to_poly(pred_values, min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# ------------------------
# Validation solution file
logger.info("Validation solution file")
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_"
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
def _internal_validate_fscore(area_id,
epoch=3,
predict=True,
min_th=MIN_POLYGON_AREA,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
# Prediction phase
logger.info("Prediction phase")
if predict:
_internal_validate_predict(
area_id,
epoch=epoch,
enable_tqdm=enable_tqdm)
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'r') as f:
y_pred = np.array(f.get_node('/pred'))
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_list = df_test.index.tolist()
iterator = enumerate(test_list)
for idx, image_id in tqdm.tqdm(iterator, total=len(test_list)):
pred_values = np.zeros((650, 650))
pred_count = np.zeros((650, 650))
for slice_pos in range(9):
slice_idx = idx * 9 + slice_pos
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
pred_values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += (
y_pred[slice_idx][0]
)
pred_count[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += 1
pred_values = pred_values / pred_count
df_poly = mask_to_poly(pred_values, min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# ------------------------
# Validation solution file
logger.info("Validation solution file")
# if not Path(FMT_VALTESTTRUTH_PATH.format(prefix)).exists():
if True:
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_"
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
def mask_to_poly(mask, min_polygon_area_th=MIN_POLYGON_AREA):
mask = (mask > 0.5).astype(np.uint8)
shapes = rasterio.features.shapes(mask.astype(np.int16), mask > 0)
poly_list = []
mp = shapely.ops.cascaded_union(
shapely.geometry.MultiPolygon([
shapely.geometry.shape(shape)
for shape, value in shapes
]))
if isinstance(mp, shapely.geometry.Polygon):
df = pd.DataFrame({
'area_size': [mp.area],
'poly': [mp],
})
else:
df = pd.DataFrame({
'area_size': [p.area for p in mp],
'poly': [p for p in mp],
})
df = df[df.area_size > min_polygon_area_th].sort_values(
by='area_size', ascending=False)
df.loc[:, 'wkt'] = df.poly.apply(lambda x: shapely.wkt.dumps(
x, rounding_precision=0))
df.loc[:, 'bid'] = list(range(1, len(df) + 1))
df.loc[:, 'area_ratio'] = df.area_size / df.area_size.max()
return df
def jaccard_coef(y_true, y_pred):
smooth = 1e-12
intersection = K.sum(y_true * y_pred, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def jaccard_coef_int(y_true, y_pred):
smooth = 1e-12
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
intersection = K.sum(y_true * y_pred_pos, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred_pos, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def generate_test_batch(area_id,
batch_size=64,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_test = pd.read_csv(FMT_TEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_TEST_MUL_STORE.format(prefix)
slice_id_list = []
for idx, row in df_test.iterrows():
for slice_pos in range(9):
slice_id = row.ImageId + '_' + str(slice_pos)
slice_id_list.append(slice_id)
if enable_tqdm:
pbar = tqdm.tqdm(total=len(slice_id_list))
while 1:
total_sz = len(slice_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im:
for i_batch in range(n_batch):
target_slice_ids = slice_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_slice_ids) == 0:
continue
X_test = []
y_test = []
for slice_id in target_slice_ids:
im = np.array(f_im.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_test.append(im)
mask = np.zeros((INPUT_SIZE, INPUT_SIZE)).astype(np.uint8)
y_test.append(mask)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_test = y_test.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_test = X_test - immean
if enable_tqdm:
pbar.update(y_test.shape[0])
yield (X_test, y_test)
if enable_tqdm:
pbar.close()
def generate_valtest_batch(area_id,
batch_size=8,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_VALTEST_MUL_STORE.format(prefix)
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
slice_id_list = []
for idx, row in df_train.iterrows():
for slice_pos in range(9):
slice_id = row.ImageId + '_' + str(slice_pos)
slice_id_list.append(slice_id)
if enable_tqdm:
pbar = tqdm.tqdm(total=len(slice_id_list))
while 1:
total_sz = len(slice_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_mask, 'r') as f_mask:
for i_batch in range(n_batch):
target_slice_ids = slice_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_slice_ids) == 0:
continue
X_train = []
y_train = []
for slice_id in target_slice_ids:
im = np.array(f_im.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
mask = np.array(f_mask.get_node('/' + slice_id))
mask = (mask > 0).astype(np.uint8)
y_train.append(mask)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_train = X_train - immean
if enable_tqdm:
pbar.update(y_train.shape[0])
yield (X_train, y_train)
if enable_tqdm:
pbar.close()
def generate_valtrain_batch(area_id, batch_size=8, immean=None):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
slice_id_list = []
for idx, row in df_train.iterrows():
for slice_pos in range(9):
slice_id = row.ImageId + '_' + str(slice_pos)
slice_id_list.append(slice_id)
np.random.shuffle(slice_id_list)
while 1:
total_sz = len(slice_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_mask, 'r') as f_mask:
for i_batch in range(n_batch):
target_slice_ids = slice_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_slice_ids) == 0:
continue
X_train = []
y_train = []
for slice_id in target_slice_ids:
im = np.array(f_im.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
mask = np.array(f_mask.get_node('/' + slice_id))
mask = (mask > 0).astype(np.uint8)
y_train.append(mask)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_train = X_train - immean
yield (X_train, y_train)
def get_unet():
conv_params = dict(activation='relu', border_mode='same')
merge_params = dict(mode='concat', concat_axis=1)
inputs = Input((8, 256, 256))
conv1 = Convolution2D(32, 3, 3, **conv_params)(inputs)
conv1 = Convolution2D(32, 3, 3, **conv_params)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(pool1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(pool2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(pool3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(pool4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(conv5)
up6 = merge_l([UpSampling2D(size=(2, 2))(conv5), conv4], **merge_params)
conv6 = Convolution2D(256, 3, 3, **conv_params)(up6)
conv6 = Convolution2D(256, 3, 3, **conv_params)(conv6)
up7 = merge_l([UpSampling2D(size=(2, 2))(conv6), conv3], **merge_params)
conv7 = Convolution2D(128, 3, 3, **conv_params)(up7)
conv7 = Convolution2D(128, 3, 3, **conv_params)(conv7)
up8 = merge_l([UpSampling2D(size=(2, 2))(conv7), conv2], **merge_params)
conv8 = Convolution2D(64, 3, 3, **conv_params)(up8)
conv8 = Convolution2D(64, 3, 3, **conv_params)(conv8)
up9 = merge_l([UpSampling2D(size=(2, 2))(conv8), conv1], **merge_params)
conv9 = Convolution2D(32, 3, 3, **conv_params)(up9)
conv9 = Convolution2D(32, 3, 3, **conv_params)(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
optimizer = SGD(lr=0.01, momentum=0.9, nesterov=True)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=['accuracy', jaccard_coef, jaccard_coef_int])
return model
def get_mean_image(area_id):
prefix = area_id_to_prefix(area_id)
with tb.open_file(FMT_IMMEAN.format(prefix), 'r') as f:
im_mean = np.array(f.get_node('/immean'))
return im_mean
def get_mul_mean_image(area_id):
prefix = area_id_to_prefix(area_id)
with tb.open_file(FMT_MULMEAN.format(prefix), 'r') as f:
im_mean = np.array(f.get_node('/mulmean'))
return im_mean
def get_train_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_train = FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_train = []
fn_im = FMT_TRAIN_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
for slice_pos in range(9):
slice_id = image_id + '_' + str(slice_pos)
im = np.array(f.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
X_train = np.array(X_train)
y_train = []
fn_mask = FMT_TRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
for slice_pos in range(9):
slice_id = image_id + '_' + str(slice_pos)
mask = np.array(f.get_node('/' + slice_id))
mask = (mask > 0.5).astype(np.uint8)
y_train.append(mask)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_train, y_train
def get_test_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_test = []
fn_im = FMT_TEST_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
for slice_pos in range(9):
slice_id = image_id + '_' + str(slice_pos)
im = np.array(f.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_test.append(im)
X_test = np.array(X_test)
return X_test
def get_valtest_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_val = []
fn_im = FMT_VALTEST_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
for slice_pos in range(9):
slice_id = image_id + '_' + str(slice_pos)
im = np.array(f.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
for slice_pos in range(9):
slice_id = image_id + '_' + str(slice_pos)
mask = np.array(f.get_node('/' + slice_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def _get_valtrain_data_head(area_id):
prefix = area_id_to_prefix(area_id)
fn_train = FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_val = []
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
slice_pos = 5
slice_id = image_id + '_' + str(slice_pos)
im = np.array(f.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
slice_pos = 5
slice_id = image_id + '_' + str(slice_pos)
mask = np.array(f.get_node('/' + slice_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def get_valtrain_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_train = FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_val = []
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
for slice_pos in range(9):
slice_id = image_id + '_' + str(slice_pos)
im = np.array(f.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
for slice_pos in range(9):
slice_id = image_id + '_' + str(slice_pos)
mask = np.array(f.get_node('/' + slice_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def __load_band_cut_th(band_fn, bandsz=3):
df = pd.read_csv(band_fn, index_col='area_id')
all_band_cut_th = {area_id: {} for area_id in range(2, 6)}
for area_id, row in df.iterrows():
for chan_i in range(bandsz):
all_band_cut_th[area_id][chan_i] = dict(
min=row['chan{}_min'.format(chan_i)],
max=row['chan{}_max'.format(chan_i)],
)
return all_band_cut_th
def get_slice_3chan_test_im(image_id, band_cut_th):
fn = test_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
assert values.shape == (650, 650, 3)
for slice_pos in range(9):
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
im = values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE]
assert im.shape == (256, 256, 3)
yield slice_pos, im
def get_slice_3chan_im(image_id, band_cut_th):
fn = train_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
assert values.shape == (650, 650, 3)
for slice_pos in range(9):
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
im = values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE]
assert im.shape == (256, 256, 3)
yield slice_pos, im
def get_slice_8chan_test_im(image_id, band_cut_th):
fn = test_image_id_to_mspec_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(8):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
assert values.shape == (650, 650, 8)
for slice_pos in range(9):
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
im = values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE]
assert im.shape == (256, 256, 8)
yield slice_pos, im
def get_slice_8chan_im(image_id, band_cut_th):
fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(8):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
assert values.shape == (650, 650, 8)
for slice_pos in range(9):
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
im = values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE]
assert im.shape == (256, 256, 8)
yield slice_pos, im
def get_mask_im(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = (im_mask > 0.5).astype(np.uint8)
return im_mask
def get_slice_mask_im(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = (im_mask > 0.5).astype(np.uint8)
for slice_pos in range(9):
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
im_mask_part = im_mask[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE]
assert im_mask_part.shape == (256, 256)
yield slice_pos, im_mask_part
def prep_valtrain_test_slice_image(area_id):
prefix = area_id_to_prefix(area_id)
logger.info("prep_valtrain_test_slice_image for {}".format(prefix))
df_train = pd.read_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_summary = load_train_summary_data(area_id)
# MUL
band_cut_th = __load_band_cut_th(
FMT_MUL_BANDCUT_TH_PATH, bandsz=8)[area_id]
fn = FMT_VALTRAIN_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
for slice_pos, im in get_slice_8chan_im(image_id, band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTEST_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
for slice_pos, im in get_slice_8chan_im(image_id, band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
# RGB
band_cut_th = __load_band_cut_th(FMT_RGB_BANDCUT_TH_PATH)[area_id]
fn = FMT_VALTRAIN_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
for slice_pos, im in get_slice_3chan_im(image_id, band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTEST_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
for slice_pos, im in get_slice_3chan_im(image_id, band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTRAIN_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
for pos, im_mask in get_slice_mask_im(df_summary, image_id):
atom = tb.Atom.from_dtype(im_mask.dtype)
slice_id = image_id + "_" + str(pos)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
fn = FMT_VALTEST_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
for pos, im_mask in get_slice_mask_im(df_summary, image_id):
atom = tb.Atom.from_dtype(im_mask.dtype)
slice_id = image_id + "_" + str(pos)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
def prep_train_test_slice_image(area_id):
prefix = area_id_to_prefix(area_id)
logger.info("prep_train_test_slice_images for {}".format(prefix))
df_train = pd.read_csv(
FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_TEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_summary = load_train_summary_data(area_id)
# MUL
band_cut_th = __load_band_cut_th(
FMT_MUL_BANDCUT_TH_PATH, bandsz=8)[area_id]
fn = FMT_TRAIN_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
for slice_pos, im in get_slice_8chan_im(image_id, band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TEST_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
for slice_pos, im in get_slice_8chan_test_im(
image_id,
band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
# RGB
band_cut_th = __load_band_cut_th(FMT_RGB_BANDCUT_TH_PATH)[area_id]
fn = FMT_TRAIN_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
for slice_pos, im in get_slice_3chan_im(image_id, band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TEST_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
for slice_pos, im in get_slice_3chan_test_im(image_id,
band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TRAIN_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
for pos, im_mask in get_slice_mask_im(df_summary, image_id):
atom = tb.Atom.from_dtype(im_mask.dtype)
slice_id = image_id + "_" + str(pos)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
def calc_bandvalues_cut_threshold():
rows = []
for area_id in range(2, 6):
band_cut_th = __calc_mul_multiband_cut_threshold(area_id)
prefix = area_id_to_prefix(area_id)
row = dict(prefix=area_id_to_prefix(area_id))
row['area_id'] = area_id
for chan_i in band_cut_th.keys():
row['chan{}_max'.format(chan_i)] = band_cut_th[chan_i]['max']
row['chan{}_min'.format(chan_i)] = band_cut_th[chan_i]['min']
rows.append(row)
pd.DataFrame(rows).to_csv(FMT_MUL_BANDCUT_TH_PATH, index=False)
rows = []
for area_id in range(2, 6):
band_cut_th = __calc_rgb_multiband_cut_threshold(area_id)
prefix = area_id_to_prefix(area_id)
row = dict(prefix=area_id_to_prefix(area_id))
row['area_id'] = area_id
for chan_i in band_cut_th.keys():
row['chan{}_max'.format(chan_i)] = band_cut_th[chan_i]['max']
row['chan{}_min'.format(chan_i)] = band_cut_th[chan_i]['min']
rows.append(row)
pd.DataFrame(rows).to_csv(FMT_RGB_BANDCUT_TH_PATH, index=False)
def __calc_rgb_multiband_cut_threshold(area_id):
prefix = area_id_to_prefix(area_id)
band_values = {k: [] for k in range(3)}
band_cut_th = {k: dict(max=0, min=0) for k in range(3)}
image_id_list = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(3):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
image_id_list = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(3):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
for i_chan in range(3):
band_values[i_chan] = np.concatenate(
band_values[i_chan]).ravel()
band_cut_th[i_chan]['max'] = scipy.percentile(
band_values[i_chan], 98)
band_cut_th[i_chan]['min'] = scipy.percentile(
band_values[i_chan], 2)
return band_cut_th
def __calc_mul_multiband_cut_threshold(area_id):
prefix = area_id_to_prefix(area_id)
band_values = {k: [] for k in range(8)}
band_cut_th = {k: dict(max=0, min=0) for k in range(8)}
image_id_list = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(8):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove censored mask
band_values[i_chan].append(values_)
image_id_list = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(8):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove censored mask
band_values[i_chan].append(values_)
for i_chan in range(8):
band_values[i_chan] = np.concatenate(
band_values[i_chan]).ravel()
band_cut_th[i_chan]['max'] = scipy.percentile(
band_values[i_chan], 98)
band_cut_th[i_chan]['min'] = scipy.percentile(
band_values[i_chan], 2)
return band_cut_th
def train_image_id_to_mspec_path(image_id):
"""
"""
prefix = image_id_to_prefix(image_id)
fn = FMT_TRAIN_MSPEC_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def test_image_id_to_mspec_path(image_id):
"""
"""
prefix = image_id_to_prefix(image_id)
fn = FMT_TEST_MSPEC_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def train_image_id_to_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TRAIN_RGB_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def test_image_id_to_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TEST_RGB_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def image_id_to_prefix(image_id):
prefix = image_id.split('img')[0][:-1]
return prefix
def load_train_summary_data(area_id):
prefix = area_id_to_prefix(area_id)
fn = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df = pd.read_csv(fn)
# df.loc[:, 'ImageId'] = df.ImageId.str[4:]
return df
def split_val_train_test(area_id):
prefix = area_id_to_prefix(area_id)
df = load_train_summary_data(area_id)
df_agg = df.groupby('ImageId').agg('first')
image_id_list = df_agg.index.tolist()
np.random.shuffle(image_id_list)
sz_valtrain = int(len(image_id_list) * 0.7)
sz_valtest = len(image_id_list) - sz_valtrain
# Parent directory
parent_dir = Path(FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)).parent
if not parent_dir.exists():
parent_dir.mkdir(parents=True)
pd.DataFrame({'ImageId': image_id_list[:sz_valtrain]}).to_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index=False)
pd.DataFrame({'ImageId': image_id_list[sz_valtrain:]}).to_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index=False)
def get_image_mask_from_dataframe(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = (im_mask > 0.5).astype(np.uint8)
return im_mask
@click.group()
def cli():
pass
@cli.command()
def testmerge():
# file check
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_TESTPOLY_PATH.format(prefix)
if not Path(fn_out).exists():
logger.info("Required file not found: {}".format(fn_out))
sys.exit(1)
# file check
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
if not Path(fn_out).exists():
logger.info("Required file not found: {}".format(fn_out))
sys.exit(1)
# merge files
rows = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'r') as f:
line = f.readline()
if area_id == 2:
rows.append(line)
for line in f:
# remove interiors
line = _remove_interiors(line)
rows.append(line)
fn_out = FMT_VALTESTPOLY_OVALL_PATH
with open(fn_out, 'w') as f:
for line in rows:
f.write(line)
# merge files
rows = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'r') as f:
line = f.readline()
if area_id == 2:
rows.append(line)
for line in f:
rows.append(line)
fn_out = FMT_VALTESTTRUTH_OVALL_PATH
with open(fn_out, 'w') as f:
for line in rows:
f.write(line)
# merge files
rows = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_TESTPOLY_PATH.format(prefix)
with open(fn_out, 'r') as f:
line = f.readline()
if area_id == 2:
rows.append(line)
for line in f:
# remove interiors
line = _remove_interiors(line)
rows.append(line)
with open(FN_SOLUTION_CSV, 'w') as f:
for line in rows:
f.write(line)
@cli.command()
@click.argument('area_id', type=int)
def testproc(area_id):
prefix = area_id_to_prefix(area_id)
logger.info(">>>> Test proc for {}".format(prefix))
_internal_test(area_id)
logger.info(">>>> Test proc for {} ... done".format(prefix))
@cli.command()
@click.argument('area_id', type=int)
@click.option('--epoch', type=int, default=0)
@click.option('--th', type=int, default=MIN_POLYGON_AREA)
@click.option('--predict/--no-predict', default=False)
def validate_city_fscore(area_id, epoch, th, predict):
_internal_validate_fscore(
area_id,
epoch=epoch,
enable_tqdm=True,
min_th=th,
predict=predict)
evaluate_record = _calc_fscore_per_aoi(area_id)
evaluate_record['epoch'] = epoch
evaluate_record['min_area_th'] = th
evaluate_record['area_id'] = area_id
logger.info("\n" + json.dumps(evaluate_record, indent=4))
@cli.command()
@click.argument('datapath', type=str)
def evalfscore(datapath):
area_id = directory_name_to_area_id(datapath)
prefix = area_id_to_prefix(area_id)
logger.info("Evaluate fscore on validation set: {}".format(prefix))
# for each epoch
# if not Path(FMT_VALMODEL_EVALHIST.format(prefix)).exists():
if True:
df_hist = pd.read_csv(FMT_VALMODEL_HIST.format(prefix))
df_hist.loc[:, 'epoch'] = list(range(1, len(df_hist) + 1))
rows = []
for zero_base_epoch in range(0, len(df_hist)):
logger.info(">>> Epoch: {}".format(zero_base_epoch))
_internal_validate_fscore_wo_pred_file(
area_id,
epoch=zero_base_epoch,
enable_tqdm=True,
min_th=MIN_POLYGON_AREA)
evaluate_record = _calc_fscore_per_aoi(area_id)
evaluate_record['zero_base_epoch'] = zero_base_epoch
evaluate_record['min_area_th'] = MIN_POLYGON_AREA
evaluate_record['area_id'] = area_id
logger.info("\n" + json.dumps(evaluate_record, indent=4))
rows.append(evaluate_record)
pd.DataFrame(rows).to_csv(
FMT_VALMODEL_EVALHIST.format(prefix),
index=False)
# find best min-poly-threshold
df_evalhist = pd.read_csv(FMT_VALMODEL_EVALHIST.format(prefix))
best_row = df_evalhist.sort_values(by='fscore', ascending=False).iloc[0]
best_epoch = int(best_row.zero_base_epoch)
best_fscore = best_row.fscore
# optimize min area th
rows = []
for th in [30, 60, 90, 120, 150, 180, 210, 240]:
logger.info(">>> TH: {}".format(th))
predict_flag = False
if th == 30:
predict_flag = True
_internal_validate_fscore(
area_id,
epoch=best_epoch,
enable_tqdm=True,
min_th=th,
predict=predict_flag)
evaluate_record = _calc_fscore_per_aoi(area_id)
evaluate_record['zero_base_epoch'] = best_epoch
evaluate_record['min_area_th'] = th
evaluate_record['area_id'] = area_id
logger.info("\n" + json.dumps(evaluate_record, indent=4))
rows.append(evaluate_record)
pd.DataFrame(rows).to_csv(
FMT_VALMODEL_EVALTHHIST.format(prefix),
index=False)
logger.info("Evaluate fscore on validation set: {} .. done".format(prefix))
@cli.command()
@click.argument('datapath', type=str)
def validate(datapath):
area_id = directory_name_to_area_id(datapath)
prefix = area_id_to_prefix(area_id)
logger.info(">> validate sub-command: {}".format(prefix))
prefix = area_id_to_prefix(area_id)
logger.info("Loading valtest and mulmean ...")
X_mean = get_mul_mean_image(area_id)
X_val, y_val = get_valtest_data(area_id)
X_val = X_val - X_mean
if not Path(MODEL_DIR).exists():
Path(MODEL_DIR).mkdir(parents=True)
logger.info("Instantiate U-Net model")
model = get_unet()
model_checkpoint = ModelCheckpoint(
FMT_VALMODEL_PATH.format(prefix + "_{epoch:02d}"),
monitor='val_jaccard_coef_int',
save_best_only=False)
model_earlystop = EarlyStopping(
monitor='val_jaccard_coef_int',
patience=10,
verbose=0,
mode='max')
model_history = History()
df_train = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(
prefix=prefix))
logger.info("Fit")
model.fit_generator(
generate_valtrain_batch(area_id, batch_size=2, immean=X_mean),
samples_per_epoch=len(df_train) * 9,
nb_epoch=35,
verbose=1,
validation_data=(X_val, y_val),
callbacks=[model_checkpoint, model_earlystop, model_history])
model.save_weights(FMT_VALMODEL_LAST_PATH.format(prefix))
# Save evaluation history
pd.DataFrame(model_history.history).to_csv(
FMT_VALMODEL_HIST.format(prefix), index=False)
logger.info(">> validate sub-command: {} ... Done".format(prefix))
if __name__ == '__main__':
cli()
|
python
|
from __future__ import absolute_import, unicode_literals
import json
from mopidy.models import immutable
class ModelJSONEncoder(json.JSONEncoder):
"""
Automatically serialize Mopidy models to JSON.
Usage::
>>> import json
>>> json.dumps({'a_track': Track(name='name')}, cls=ModelJSONEncoder)
'{"a_track": {"__model__": "Track", "name": "name"}}'
"""
def default(self, obj):
if isinstance(obj, immutable.ImmutableObject):
return obj.serialize()
return json.JSONEncoder.default(self, obj)
def model_json_decoder(dct):
"""
Automatically deserialize Mopidy models from JSON.
Usage::
>>> import json
>>> json.loads(
... '{"a_track": {"__model__": "Track", "name": "name"}}',
... object_hook=model_json_decoder)
{u'a_track': Track(artists=[], name=u'name')}
"""
if '__model__' in dct:
model_name = dct.pop('__model__')
if model_name in immutable._models:
cls = immutable._models[model_name]
return cls(**dct)
return dct
|
python
|
"""Generate a plot to visualize revision impact inequality based on data-flow
interactions."""
import typing as tp
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import axes, style
from varats.data.databases.blame_interaction_database import (
BlameInteractionDatabase,
)
from varats.data.metrics import gini_coefficient, lorenz_curve
from varats.mapping.commit_map import CommitMap, get_commit_map
from varats.paper.case_study import CaseStudy
from varats.plot.plot import Plot, PlotDataEmpty
from varats.plot.plots import PlotGenerator
from varats.plots.repository_churn import (
build_repo_churn_table,
draw_code_churn,
)
from varats.project.project_util import get_local_project_git
from varats.ts_utils.click_param_types import REQUIRE_MULTI_CASE_STUDY
from varats.utils.git_util import (
ChurnConfig,
calc_repo_code_churn,
ShortCommitHash,
FullCommitHash,
)
def draw_interaction_lorenz_curve(
axis: axes.SubplotBase, data: pd.DataFrame, unique_rev_strs: tp.List[str],
consider_in_interactions: bool, consider_out_interactions: bool,
line_width: float
) -> None:
"""
Draws a lorenz_curve onto the given axis.
Args:
axis: matplot axis to draw on
data: plotting data
"""
if consider_in_interactions and consider_out_interactions:
data_selector = 'HEAD_Interactions'
elif consider_in_interactions:
data_selector = 'IN_HEAD_Interactions'
elif consider_out_interactions:
data_selector = 'OUT_HEAD_Interactions'
else:
raise AssertionError(
"At least one of the in/out interaction needs to be selected"
)
data.sort_values(by=[data_selector, 'time_id'], inplace=True)
lor = lorenz_curve(data[data_selector])
axis.plot(unique_rev_strs, lor, color='#cc0099', linewidth=line_width)
def draw_perfect_lorenz_curve(
axis: axes.SubplotBase, unique_rev_strs: tp.List[str], line_width: float
) -> None:
"""
Draws a perfect lorenz curve onto the given axis, i.e., a straight line from
the point of origin to the right upper corner.
Args:
axis: axis to draw to
data: plotting data
"""
axis.plot(
unique_rev_strs,
np.linspace(0.0, 1.0, len(unique_rev_strs)),
color='black',
linestyle='--',
linewidth=line_width
)
def draw_interaction_code_churn(
axis: axes.SubplotBase, data: pd.DataFrame, project_name: str,
commit_map: CommitMap
) -> None:
"""
Helper function to draw parts of the code churn that are related to our
data.
Args:
axis: to draw on
data: plotting data
project_name: name of the project
commit_map: CommitMap for the given project(by project_name)
"""
unique_revs = data['revision'].unique()
def remove_revisions_without_data(revision: ShortCommitHash) -> bool:
"""Removes all churn data where this plot has no data."""
return revision.hash in unique_revs
def apply_sorting(churn_data: pd.DataFrame) -> pd.DataFrame:
churn_data.set_index('time_id', inplace=True)
churn_data = churn_data.reindex(index=data['time_id'])
return churn_data.reset_index()
draw_code_churn(
axis, project_name, commit_map, remove_revisions_without_data,
apply_sorting
)
def filter_non_code_changes(
blame_data: pd.DataFrame, project_name: str
) -> pd.DataFrame:
"""
Filter all revision from data frame that are not code change related.
Args:
blame_data: data to filter
project_name: name of the project
Returns:
filtered data frame without rows related to non code changes
"""
repo = get_local_project_git(project_name)
code_related_changes = [
x.hash for x in calc_repo_code_churn(
repo, ChurnConfig.create_c_style_languages_config()
)
]
return blame_data[blame_data.apply(
lambda x: x['revision'] in code_related_changes, axis=1
)]
class BlameLorenzCurve(Plot, plot_name="b_lorenz_curve"):
"""Plots the lorenz curve for IN/OUT interactions for a given project."""
NAME = 'b_lorenz_curve'
def plot(self, view_mode: bool) -> None:
style.use(self.plot_config.style())
case_study: CaseStudy = self.plot_kwargs['case_study']
project_name: str = case_study.project_name
commit_map = get_commit_map(project_name)
fig = plt.figure()
fig.subplots_adjust(top=0.95, hspace=0.05, right=0.95, left=0.07)
grid_spec = fig.add_gridspec(3, 2)
main_axis = fig.add_subplot(grid_spec[:-1, :1])
main_axis.set_title("Lorenz curve for incoming commit interactions")
main_axis.get_xaxis().set_visible(False)
main_axis_r = fig.add_subplot(grid_spec[:-1, -1])
main_axis_r.set_title("Lorenz curve for outgoing commit interactions")
main_axis_r.get_xaxis().set_visible(False)
churn_axis = fig.add_subplot(grid_spec[2, :1], sharex=main_axis)
churn_axis_r = fig.add_subplot(grid_spec[2, -1], sharex=main_axis_r)
data = BlameInteractionDatabase.get_data_for_project(
project_name, [
"revision", "time_id", "IN_HEAD_Interactions",
"OUT_HEAD_Interactions", "HEAD_Interactions"
], commit_map, case_study
)
data = filter_non_code_changes(data, project_name)
if data.empty:
raise PlotDataEmpty
unique_rev_strs: tp.List[str] = [rev.hash for rev in data['revision']]
# Draw left side of the plot
draw_interaction_lorenz_curve(
main_axis, data, unique_rev_strs, True, False,
self.plot_config.line_width()
)
draw_perfect_lorenz_curve(
main_axis, unique_rev_strs, self.plot_config.line_width()
)
draw_interaction_code_churn(churn_axis, data, project_name, commit_map)
# Draw right side of the plot
draw_interaction_lorenz_curve(
main_axis_r, data, unique_rev_strs, False, True,
self.plot_config.line_width()
)
draw_perfect_lorenz_curve(
main_axis_r, unique_rev_strs, self.plot_config.line_width()
)
draw_interaction_code_churn(
churn_axis_r, data, project_name, commit_map
)
# Adapt axis to draw nicer plots
for x_label in churn_axis.get_xticklabels():
x_label.set_fontsize(self.plot_config.x_tick_size())
x_label.set_rotation(270)
x_label.set_fontfamily('monospace')
for x_label in churn_axis_r.get_xticklabels():
x_label.set_fontsize(self.plot_config.x_tick_size())
x_label.set_rotation(270)
x_label.set_fontfamily('monospace')
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class BlameLorenzCurveGenerator(
PlotGenerator,
generator_name="lorenz-curve-plot",
options=[REQUIRE_MULTI_CASE_STUDY]
):
"""Generates lorenz-curve plot(s) for the selected case study(ies)."""
def generate(self) -> tp.List[Plot]:
case_studies: tp.List[CaseStudy] = self.plot_kwargs.pop("case_study")
return [
BlameLorenzCurve(
self.plot_config, case_study=cs, **self.plot_kwargs
) for cs in case_studies
]
def draw_gini_churn_over_time(
axis: axes.SubplotBase, blame_data: pd.DataFrame,
unique_rev_strs: tp.List[str], project_name: str, commit_map: CommitMap,
consider_insertions: bool, consider_deletions: bool, line_width: float
) -> None:
"""
Draws the gini of the churn distribution over time.
Args:
axis: axis to draw to
blame_data: blame data of the base plot
project_name: name of the project
commit_map: CommitMap for the given project(by project_name)
consider_insertions: True, insertions should be included
consider_deletions: True, deletions should be included
line_width: line width of the plot lines
"""
churn_data = build_repo_churn_table(project_name, commit_map)
# clean data
unique_revs = blame_data['revision'].unique()
def remove_revisions_without_data(revision: ShortCommitHash) -> bool:
"""Removes all churn data where this plot has no data."""
return revision.hash[:10] in unique_revs
churn_data = churn_data[churn_data.apply(
lambda x: remove_revisions_without_data(x['revision']), axis=1
)]
# reorder churn data to match blame_data
churn_data.set_index('time_id', inplace=True)
churn_data = churn_data.reindex(index=blame_data['time_id'])
churn_data = churn_data.reset_index()
gini_churn = []
for time_id in blame_data['time_id']:
if consider_insertions and consider_deletions:
distribution = (
churn_data[churn_data.time_id <= time_id].insertions +
churn_data[churn_data.time_id <= time_id].deletions
).sort_values(ascending=True)
elif consider_insertions:
distribution = churn_data[churn_data.time_id <= time_id
].insertions.sort_values(ascending=True)
elif consider_deletions:
distribution = churn_data[churn_data.time_id <= time_id
].deletions.sort_values(ascending=True)
else:
raise AssertionError(
"At least one of the in/out interaction needs to be selected"
)
gini_churn.append(gini_coefficient(distribution))
if consider_insertions and consider_deletions:
linestyle = '-'
label = 'Insertions + Deletions'
elif consider_insertions:
linestyle = '--'
label = 'Insertions'
else:
linestyle = ':'
label = 'Deletions'
axis.plot(
unique_rev_strs,
gini_churn,
linestyle=linestyle,
linewidth=line_width,
label=label,
color='orange'
)
def draw_gini_blame_over_time(
axis: axes.SubplotBase, blame_data: pd.DataFrame,
unique_rev_strs: tp.List[str], consider_in_interactions: bool,
consider_out_interactions: bool, line_width: float
) -> None:
"""
Draws the gini coefficients of the blame interactions over time.
Args:
axis: axis to draw to
blame_data: blame data of the base plot
consider_in_interactions: True, IN interactions should be included
consider_out_interactions: True, OUT interactions should be included
line_width: line width of the plot lines
"""
if consider_in_interactions and consider_out_interactions:
data_selector = 'HEAD_Interactions'
linestyle = '-'
label = "Interactions"
elif consider_in_interactions:
data_selector = 'IN_HEAD_Interactions'
linestyle = '--'
label = "IN Interactions"
elif consider_out_interactions:
data_selector = 'OUT_HEAD_Interactions'
linestyle = ':'
label = "OUT Interactions"
else:
raise AssertionError(
"At least one of the in/out interaction needs to be selected"
)
gini_coefficients = []
for time_id in blame_data.time_id:
distribution = blame_data[blame_data.time_id <= time_id
][data_selector].sort_values(ascending=True)
gini_coefficients.append(gini_coefficient(distribution))
axis.plot(
unique_rev_strs,
gini_coefficients,
linestyle=linestyle,
linewidth=line_width,
label=label,
color='#cc0099'
)
class BlameGiniOverTime(Plot, plot_name="b_gini_overtime"):
"""
Plots the gini coefficient over time for a project.
This shows how the distribution of the interactions/churn changes of time.
"""
NAME = 'b_gini_overtime'
def plot(self, view_mode: bool) -> None:
style.use(self.plot_config.style())
case_study: CaseStudy = self.plot_kwargs["case_study"]
project_name = case_study.project_name
commit_map: CommitMap = get_commit_map(project_name)
data = BlameInteractionDatabase.get_data_for_project(
project_name, [
"revision", "time_id", "IN_HEAD_Interactions",
"OUT_HEAD_Interactions", "HEAD_Interactions"
], commit_map, case_study
)
data = filter_non_code_changes(data, project_name)
if data.empty:
raise PlotDataEmpty
data.sort_values(by=['time_id'], inplace=True)
fig = plt.figure()
fig.subplots_adjust(top=0.95, hspace=0.05, right=0.95, left=0.07)
grid_spec = fig.add_gridspec(3, 1)
main_axis = fig.add_subplot(grid_spec[:-1, :])
main_axis.set_title("Gini coefficient over the project lifetime")
main_axis.get_xaxis().set_visible(False)
churn_axis = fig.add_subplot(grid_spec[2, :], sharex=main_axis)
unique_rev_strs: tp.List[str] = [rev.hash for rev in data['revision']]
draw_gini_blame_over_time(
main_axis, data, unique_rev_strs, True, True,
self.plot_config.line_width()
)
draw_gini_blame_over_time(
main_axis, data, unique_rev_strs, True, False,
self.plot_config.line_width()
)
draw_gini_blame_over_time(
main_axis, data, unique_rev_strs, False, True,
self.plot_config.line_width()
)
draw_gini_churn_over_time(
main_axis, data, unique_rev_strs, project_name, commit_map, True,
True, self.plot_config.line_width()
)
draw_gini_churn_over_time(
main_axis, data, unique_rev_strs, project_name, commit_map, True,
False, self.plot_config.line_width()
)
draw_gini_churn_over_time(
main_axis, data, unique_rev_strs, project_name, commit_map, False,
True, self.plot_config.line_width()
)
main_axis.legend()
main_axis.set_ylim((0., 1.))
draw_interaction_code_churn(churn_axis, data, project_name, commit_map)
# Adapt axis to draw nicer plots
for x_label in churn_axis.get_xticklabels():
x_label.set_fontsize(self.plot_config.x_tick_size())
x_label.set_rotation(270)
x_label.set_fontfamily('monospace')
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class BlameGiniOverTimeGenerator(
PlotGenerator,
generator_name="gini-overtime-plot",
options=[REQUIRE_MULTI_CASE_STUDY]
):
"""Generates gini-overtime plot(s) for the selected case study(ies)."""
def generate(self) -> tp.List[Plot]:
case_studies: tp.List[CaseStudy] = self.plot_kwargs.pop("case_study")
return [
BlameGiniOverTime(
self.plot_config, case_study=cs, **self.plot_kwargs
) for cs in case_studies
]
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow import conf
from airflow.upgrade.rules.base_rule import BaseRule
from airflow.utils.module_loading import import_string
LOGS = [
(
"airflow.providers.amazon.aws.log.s3_task_handler.S3TaskHandler",
"airflow.utils.log.s3_task_handler.S3TaskHandler"
),
(
'airflow.providers.amazon.aws.log.cloudwatch_task_handler.CloudwatchTaskHandler',
'airflow.utils.log.cloudwatch_task_handler.CloudwatchTaskHandler'
),
(
'airflow.providers.elasticsearch.log.es_task_handler.ElasticsearchTaskHandler',
'airflow.utils.log.es_task_handler.ElasticsearchTaskHandler'
),
(
"airflow.providers.google.cloud.log.stackdriver_task_handler.StackdriverTaskHandler",
"airflow.utils.log.stackdriver_task_handler.StackdriverTaskHandler"
),
(
"airflow.providers.google.cloud.log.gcs_task_handler.GCSTaskHandler",
"airflow.utils.log.gcs_task_handler.GCSTaskHandler"
),
(
"airflow.providers.microsoft.azure.log.wasb_task_handler.WasbTaskHandler",
"airflow.utils.log.wasb_task_handler.WasbTaskHandler"
)
]
class TaskHandlersMovedRule(BaseRule):
title = "Changes in import path of remote task handlers"
description = (
"The remote log task handlers have been moved to the providers "
"directory and into their respective providers packages."
)
def check(self):
logging_class = conf.get("core", "logging_config_class", fallback=None)
if logging_class:
config = import_string(logging_class)
configured_path = config['handlers']['task']['class']
for new_path, old_path in LOGS:
if configured_path == old_path:
return [
"This path : `{old}` should be updated to this path: `{new}`".format(old=old_path,
new=new_path)
]
|
python
|
from InsertionSort import insertionSort
import math
def bucketSort(customList):
numBuckets = round(math.sqrt(len(customList)))
maxValue = max(customList)
arr = []
# Creating buckets
for i in range(numBuckets):
arr.append([])
# Shifting elemets to buckets
for j in range(customList):
index_b = math.ceil(j * numBuckets / maxValue)
arr[index_b - 1].append(j)
# Sorting the elements in bucket
for i in range(numBuckets):
arr[i] = insertionSort(arr[i])
# Finally bring the elements form bucket into the list
k = 0
for i in range(numBuckets):
for j in range(len(arr[i])):
customList[k] = arr[i][j]
k += 1
print(customList)
bucketSort([11, 98, 23, 78, 0, 22, 14, 7, 61, 43, 86, 65])
|
python
|
# -*- coding: utf-8 -*-
import re
import requests
from datetime import datetime, timedelta
from jobs import AbstractJob
class Vaernesekspressen(AbstractJob):
def __init__(self, conf):
self.airport_id = 113 # Vaernes is the the only supported destionation
self.from_stop = conf["from_stop"]
self.interval = conf["interval"]
self.timeout = conf.get("timeout")
self.base_url = conf.get("base_url", "https://www.vaernesekspressen.no")
self.now = datetime.now
def _find_stop_id(self):
url = "{}/Umbraco/Api/TicketOrderApi/GetStops".format(self.base_url)
params = {"routeId": 31} # There is only one route
r = requests.get(url, params=params, timeout=self.timeout)
r.raise_for_status()
for stop in r.json():
if stop["Name"].lower() == self.from_stop.lower():
return stop["Id"]
raise ValueError('Could not find ID for stop "{}"'.format(self.from_stop))
def _timestamp(self, dt, tz):
# I hate Python.
utc_offset = timedelta(0)
if tz == "CET":
utc_offset = timedelta(hours=1)
elif tz == "CEST":
utc_offset = timedelta(hours=2)
else:
raise ValueError('Unexpected time zone "{}"'.format(tz))
epoch = datetime(1970, 1, 1)
return (dt - utc_offset - epoch).total_seconds()
def _parse_time(self, date):
parts = date.rsplit(" ", 1)
tz = parts[1]
dt = datetime.strptime(parts[0], "%Y-%m-%d %H:%M:%S.0")
return int(self._timestamp(dt, tz))
def _departures(self, stop_id, dt):
url = "{}/Umbraco/Api/TicketOrderApi/GetJourneys".format(self.base_url)
data = {
"From": str(stop_id),
"To": str(self.airport_id),
"Route": "31",
"Date": dt.strftime("%d.%m.%Y"),
"Adult": "1",
"Student": "0",
"Child": "0",
"Baby": "0",
"Senior": "0",
"isRoundTrip": False,
}
r = requests.post(url, json=data, timeout=self.timeout)
r.raise_for_status()
return [
{
"stop_name": self._trim_name(d["Start"]["Name"]),
"destination_name": self._trim_name(d["End"]["Name"]),
"departure_time": str(self._parse_time(d["DepartureTime"])),
}
for d in r.json()
]
def _trim_name(self, name):
return re.sub(r"^FB \d+ ", "", name)
def get(self):
stop_id = self._find_stop_id()
now = self.now()
departures = self._departures(stop_id, now)
if len(departures) < 2:
# Few departures today, include tomorrow's departures
tomorrow = (now + timedelta(days=1)).date()
departures += self._departures(stop_id, tomorrow)
from_ = "N/A"
to = "N/A"
if len(departures) > 0:
from_ = departures[0]["stop_name"]
to = departures[0]["destination_name"]
return {"from": from_, "to": to, "departures": departures}
|
python
|
import jax.numpy as jnp
from jax import vmap, grad, nn, tree_util, jit, ops, custom_vjp
from functools import partial
from jax.experimental import ode
from collections import namedtuple
GradientFlowState = namedtuple('GradientFlowState', ['B', 's', 'z'])
def gradient_flow(loss_fn, init_params, inputs, labels, t_final,
rtol=1.4e-8, atol=1.4e-8, mxstep=jnp.inf):
return _gradient_flow(loss_fn, rtol, atol, mxstep, init_params,
inputs, labels, t_final)
@partial(custom_vjp, nondiff_argnums=(0, 1, 2, 3))
def _gradient_flow(loss_fn, rtol, atol, mxstep, init_params, inputs, labels, t_final):
def _dynamics(params, _):
grads, _ = grad(loss_fn, has_aux=True)(params, inputs, labels)
return -grads
trajectory = ode.odeint(
jit(_dynamics), init_params,
jnp.asarray([0., t_final], dtype=jnp.float32),
rtol=rtol, atol=atol, mxstep=mxstep
)
return trajectory[-1]
def _gradient_flow_fwd(loss_fn, rtol, atol, mxstep, init_params, inputs, labels, t_final):
M, N = inputs.shape[0], init_params.shape[0]
gram = jnp.dot(inputs, inputs.T)
init_logits = jnp.matmul(inputs, init_params.T)
diag_indices = jnp.diag_indices(M)
diag_indices_interlaced = (diag_indices[0], slice(None), diag_indices[1])
def _dynamics(state, _):
preds = nn.softmax(init_logits - jnp.matmul(gram, state.s), axis=-1)
A = (vmap(jnp.diag)(preds) - vmap(jnp.outer)(preds, preds)) / M
# Update of B
cross_prod = jnp.einsum('ikn,im,mjnl->ijkl', A, gram, state.B)
dB = ops.index_add(-cross_prod, diag_indices, A,
indices_are_sorted=True, unique_indices=True)
# Update of s
ds = (preds - labels) / M
# Update of z
cross_prod = jnp.einsum('iln,ik,kmjn->imjl', A, gram, state.z)
As = jnp.einsum('ikl,ml->imk', A, state.s)
dz = ops.index_add(cross_prod, diag_indices, As,
indices_are_sorted=True, unique_indices=True)
dz = ops.index_add(dz, diag_indices_interlaced, As,
indices_are_sorted=True, unique_indices=True)
return GradientFlowState(B=dB, s=ds, z=-dz)
init_state = GradientFlowState(
B=jnp.zeros((M, M, N, N)),
s=jnp.zeros((M, N)),
z=jnp.zeros((M, M, M, N))
)
trajectory = ode.odeint(
jit(_dynamics), init_state,
jnp.asarray([0., t_final], dtype=jnp.float32),
rtol=rtol, atol=atol, mxstep=mxstep
)
final_state = tree_util.tree_map(lambda x: x[-1], trajectory)
final_params = init_params - jnp.matmul(final_state.s.T, inputs)
return final_params, (init_params, inputs, labels, final_state, final_params)
def _gradient_flow_bwd(loss_fn, rtol, atol, mxstep, res, grads_test):
init_params, inputs, labels, state, params = res
grads_train, _ = grad(loss_fn, has_aux=True)(params, inputs, labels)
# Projections
inputs_grads_test = jnp.matmul(inputs, grads_test.T)
C = jnp.einsum('ik,ijkl->jl', inputs_grads_test, state.B)
grads_params = grads_test - jnp.matmul(C.T, inputs)
D = jnp.einsum('ik,imjk->jm', inputs_grads_test, state.z)
grads_inputs = -(jnp.matmul(state.s, grads_test)
+ jnp.matmul(C, init_params) + jnp.matmul(D, inputs))
grads_t_final = -jnp.vdot(grads_train, grads_test)
return (grads_params, grads_inputs, None, grads_t_final)
_gradient_flow.defvjp(_gradient_flow_fwd, _gradient_flow_bwd)
|
python
|
"""
Crie um programa que aprove um emprestimo bancário, onde o programa leia:
Valor da Casa / salário da pessoa / quantos anos será o pagamento
Calcule o valor da prestação mensal, sabendo que ela não pode ser superior a 30% da renda da pessoa, se passar o
emprestimo será negado
"""
import time
valor_casa = float(input('Valor do imóvel que deseja comprar: '))
salario = float(input('Qual o salário do pagador: '))
anos_pagamento = int(input('Quantos anos para pagar: '))
meses_pagamento = int(input('Quantos meses para pagamento: '))
tempo_pagamento = anos_pagamento * 12 + meses_pagamento
prestacao = valor_casa / tempo_pagamento
print('\nValor do imóvel de R$ {:.2f}, salário R$ {:.2f}, tempo do emprestimo de {} meses.\n'.format(valor_casa, salario, tempo_pagamento))
time.sleep(3)
if prestacao > salario * 0.3:
print('Infelizmente o empréstimo não pode ser concedido, a prestação supera {}{}{} da renda mensal.'.format('\033[36m', '30%', '\033[m'))
else:
print('Podemos conceder o empréstimo para o senhor!!!')
print('A parte da renda que será comprometida é de {}{:.1%}{}.'.format('\033[31m', (prestacao/salario), '\033[m'))
|
python
|
"""Core module for own metrics implementation"""
from sklearn.metrics import mean_squared_error
import numpy as np
def rmse(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))
|
python
|
from django.contrib import admin
from .models import Ballot, Candidate, SubElection, Election, Image, ElectionUser
class CandidateAdmin(admin.StackedInline):
model = Candidate
extra = 0
class SubElectionAdmin(admin.ModelAdmin):
model = SubElection
inlines = [
CandidateAdmin,
]
list_filter = ('election',)
admin.site.register(Ballot)
admin.site.register(SubElection, SubElectionAdmin)
admin.site.register(Election)
admin.site.register(Image)
admin.site.register(ElectionUser)
|
python
|
""" Defines the Note repository """
from models import Note
class NoteRepository:
""" The repository for the note model """
@staticmethod
def get(user_first_name, user_last_name, movie):
""" Query a note by last and first name of the user and the movie's title"""
return Note.query.filter_by(user_first_name=user_first_name, user_last_name=user_last_name, movie=movie).one()
def update(self, user_first_name, user_last_name, movie, note):
""" Update a note """
notation = self.get(user_first_name, user_last_name, movie)
notation.note = note
return notation.save()
@staticmethod
def create(user_first_name, user_last_name, movie, note):
""" Create a new note """
notation = Note(user_first_name=user_first_name, user_last_name=user_last_name, movie=movie, note=note)
return notation.save()
class NoteAllRepository:
@staticmethod
def get(movie):
return Note.query.filter_by(movie=movie).all()
|
python
|
prefix = '14IDA:shutter_auto_enable2'
description = 'Shutter 14IDC auto'
target = 0.0
|
python
|
"""Pipeline subclass for all multiclass classification pipelines."""
from evalml.pipelines.classification_pipeline import ClassificationPipeline
from evalml.problem_types import ProblemTypes
class MulticlassClassificationPipeline(ClassificationPipeline):
"""Pipeline subclass for all multiclass classification pipelines.
Args:
component_graph (ComponentGraph, list, dict): ComponentGraph instance, list of components in order, or dictionary of components.
Accepts strings or ComponentBase subclasses in the list.
Note that when duplicate components are specified in a list, the duplicate component names will be modified with the
component's index in the list. For example, the component graph
[Imputer, One Hot Encoder, Imputer, Logistic Regression Classifier] will have names
["Imputer", "One Hot Encoder", "Imputer_2", "Logistic Regression Classifier"]
parameters (dict): Dictionary with component names as keys and dictionary of that component's parameters as values.
An empty dictionary or None implies using all default values for component parameters. Defaults to None.
custom_name (str): Custom name for the pipeline. Defaults to None.
random_seed (int): Seed for the random number generator. Defaults to 0.
Example:
>>> pipeline = MulticlassClassificationPipeline(component_graph=["Simple Imputer", "Logistic Regression Classifier"],
... parameters={"Logistic Regression Classifier": {"penalty": "elasticnet",
... "solver": "liblinear"}},
... custom_name="My Multiclass Pipeline")
...
>>> assert pipeline.custom_name == "My Multiclass Pipeline"
>>> assert pipeline.component_graph.component_dict.keys() == {'Simple Imputer', 'Logistic Regression Classifier'}
The pipeline parameters will be chosen from the default parameters for every component, unless specific parameters
were passed in as they were above.
>>> assert pipeline.parameters == {
... 'Simple Imputer': {'impute_strategy': 'most_frequent', 'fill_value': None},
... 'Logistic Regression Classifier': {'penalty': 'elasticnet',
... 'C': 1.0,
... 'n_jobs': -1,
... 'multi_class': 'auto',
... 'solver': 'liblinear'}}
"""
problem_type = ProblemTypes.MULTICLASS
"""ProblemTypes.MULTICLASS"""
|
python
|
import os
import sys
import time
import random
import string
import datetime
import concurrent.futures
# Import function from module
from .program_supplementals import enter_key_only, exception_translator
# Import function from 3rd party module
from netmiko import ConnectHandler
def file_output(ssh_results, ssh_success, ssh_failed):
# Get the current path of the running Python file
current_path = os.path.dirname(os.path.realpath(__file__))
# Prompt user for
target_path = input("\nEnter the target path or leave it blank to set the default path [" + current_path + "]: ")
# If target_path is blank, fill it with a default directory name
if bool(target_path == ""):
target_path = "Malas_SSH_outputs"
try:
# Create a new directory if not exists yet on the target path to contains all SSH output file(s)
if bool(os.path.exists(target_path)) == False:
os.makedirs(target_path)
# Loop for every result in the list
for ssh_result in ssh_results:
# Give a unique key for the output file
unique_key = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
# Get the current date and time
present = datetime.datetime.now().strftime("_on_%Y-%m-%d_at_%H.%M")
# Merge target path with the file name and its extension
complete_path = os.path.join(target_path, ssh_result[0] + present + "_[" + unique_key + "].txt")
# Open the file with write permission
with open(complete_path, "w") as file:
# Write the SSH outputs to the file
file.write("%s" % ssh_result[1])
# SSH attempt results
print("\nSSH remote configuration success: " + str(ssh_success) + " host(s)")
print("SSH remote configuration failed: " + str(ssh_failed) + " host(s)")
# target_path is the default directory name
if bool(target_path == "Malas_SSH_outputs"):
print("\nPASS: The SSH output file(s) are stored in the path \'" + current_path + "\' inside the directory \'" + target_path + "\' successfully")
# target_path is user-defined
else:
print("\nPASS: The SSH output file(s) are stored in the path \'" + target_path + "\' successfully")
print("EXIT: Please review the SSH output file(s) to confirm the configured configuration, thank you!")
except:
# Execute exception_translator
exception_explained = exception_translator()
# Print the raised exception error messages values
print("\nFAIL: " + exception_explained[0] + ":\n" + exception_explained[1])
# Repeat execute file_output and then pass these values
file_output(ssh_results, ssh_success, ssh_failed)
def thread_processor(threads):
# Initial variables
ssh_results = []
ssh_success = 0
ssh_failed = 0
# Loop for every result from ssh-threading process
for thread in threads:
# Store the thread results values
ssh_result = thread.result()
# Failed SSH attempts contain 2 values in tuple formats
if isinstance(ssh_result[1], tuple):
# Merge raised exception error name and explanation
result_concatenated = "FAIL: " + ssh_result[1][0] + "\n\n" + ssh_result[1][1]
# Store the raised exception error messages values in the same index
ssh_results.append((ssh_result[0], result_concatenated))
# Increment of failed SSH attempts
ssh_failed += 1
else:
# Store the raised exception error messages values
ssh_results.append(ssh_result)
# Increment of success SSH attempts
ssh_success += 1
try:
# Execute user confirmation to create output file(s)
print("\nPress \'Enter\' to create the SSH output file(s) or \'CTRL+C\' to end the program", end = "", flush = True)
# Expect the user to press Enter key
enter_key_only()
# Execute file_output
file_output(ssh_results, ssh_success, ssh_failed)
# Stop process by keyboard (e.g. CTRL+C)
except KeyboardInterrupt:
# SSH attempt results
print("\n\nSSH remote configuration success: " + str(ssh_success) + " host(s)")
print("SSH remote configuration failed: " + str(ssh_failed) + " host(s)")
print("\nEXIT: Please review the SSH outputs to confirm the configured configuration, thank you!")
# Exit program
sys.exit()
def output_processor(output, command, stopwatch):
# Remote configuration stopwatch end
ssh_processed = "\'%.2f\'" % (time.time() - stopwatch) + " secs"
# Process the output according to its command type
if command == "send_command":
# No output process
final_output = output
elif command == "send_config_set":
# Split output into a list
disintegrate_output = output.split("\n")
# Remove the unnecessary lines
final_output = "\n".join(disintegrate_output[1:-1])
# Pass these values
return final_output, ssh_processed
def connection_ssh(dev, cmd, gdf, ip, usr, pwd, cfg):
# Strip newline at the end of device type, command type, IP address, username, and password
device = dev.rstrip("\n")
command = cmd.rstrip("\n")
ip_addr = ip.rstrip("\n")
username = usr.rstrip("\n")
password = pwd.rstrip("\n")
try:
# Remote configuration stopwatch start
stopwatch = time.time()
# Define the device type, the credential information, and the delay value to log in to the remote host
session = {
"device_type": device,
"host": ip_addr,
"username": username,
"password": password,
"global_delay_factor": gdf
}
# SSH to the remote host
remote = ConnectHandler(**session)
# Execute every command in the configuration file according to its command type
if command == "send_command":
output = remote.send_command(cfg)
# Execute output_processor and retrive values
final_output, ssh_processed = output_processor(output, command, stopwatch)
elif command == "send_config_set":
output = remote.send_config_set(cfg)
# Execute output_processor and retrive values
final_output, ssh_processed = output_processor(output, command, stopwatch)
# Output's bracket and print the output
print("\n\n \ Remote host \'" + ip_addr + "\' processed for " + ssh_processed + "\n \___________________________________________________________________\n\n" + final_output, end="")
# Pass values to threading result
return ip_addr, final_output
except:
# Execute exception_translator
exception_explained = exception_translator()
# Output's bracket and print the output
print("\n\n \ Remote host \'" + ip_addr + "\' failed to configure\n \___________________________________________________________________\n\nFAIL: " + exception_explained[0] + "\n\n" + exception_explained[1], end = "")
# Pass values to threading result
return ip_addr, exception_explained
def connection_futures(device, command, delay, ip_addr_list, username_list, password_list, command_list):
# Execute connection_ssh. Progress dot with threading capability
print("\nConcurrently configuring per", min(32, os.cpu_count() + 4), "hosts. Please wait", end = "", flush = True)
# SSH-threading stopwatch start
threading_start = time.time()
# Suppress raised exception error messages outputs
sys.stderr = os.devnull
# SSH-threading process
with concurrent.futures.ThreadPoolExecutor() as executor:
# Initial variables
threads = []
ssh_attempts = 0
# Loop for every IP address, username, and password in the list
for ip_addr, username, password in zip(ip_addr_list, username_list, password_list):
# Increment of SSH attempts
ssh_attempts += 1
# Execute configuration over SSH for every IP address, username, and password in the list concurrently
threads.append(executor.submit(connection_ssh, dev = device, cmd = command, gdf = delay, ip = ip_addr, usr = username, pwd = password, cfg = command_list))
# Progress dot
print(".", end = "", flush = True)
# Unsuppress raised exception error messages outputs
sys.stderr = sys.__stderr__
print("\n\n \ Completed")
print(" \___________________________________________________________________\n")
# SSH attempt results and ping-threading stopwatch end
print("SSH-threading for " + str(ssh_attempts) + " host(s) processed for:", "%.2f" % (time.time() - threading_start), "secs")
# Execute thread_processor
thread_processor(threads)
|
python
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import os
import json
import random
try:
# python <= 2.7
TYPE_TEXT_STRING = (str, unicode)
except NameError:
TYPE_TEXT_STRING = (str, )
try:
from unittest import mock
from unittest.mock import Mock
except ImportError:
# python < 3.3
import mock
from mock import Mock
from azure.core.exceptions import (
HttpResponseError,
ResourceNotFoundError,
ClientAuthenticationError,
ServiceResponseError
)
from azure.cognitiveservices.inkrecognizer import (
InkStrokeKind,
InkRecognitionUnitKind,
ShapeKind,
InkPointUnit,
ApplicationKind,
ServiceVersion
)
from azure.cognitiveservices.inkrecognizer import InkRecognizerClient
from azure.cognitiveservices.inkrecognizer import (
Point,
Rectangle,
InkRecognitionUnit,
InkBullet,
InkDrawing,
Line,
Paragraph,
InkWord,
WritingRegion,
ListItem,
InkRecognitionRoot
)
RAISE_ONLINE_TEST_ERRORS = False
URL = ""
CREDENTIAL = Mock(name="FakeCredential", get_token="token")
def online_test(func):
def wrapper(*args, **kw):
if URL == "" or isinstance(CREDENTIAL, Mock):
if RAISE_ONLINE_TEST_ERRORS:
raise ValueError("Please fill URL and CREDENTIAL before running online tests.")
else:
return
return func(*args, **kw)
return wrapper
def fake_run(self, request, **kwargs):
return Mock(http_response=(json.loads(request.data), kwargs["headers"], kwargs))
def pass_response(response, config):
return response
def parse_result(result_filename):
json_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data", result_filename)
client = InkRecognizerClient(URL, CREDENTIAL)
with open(json_path, "r") as f:
raw_recognition_result = f.read()
response = Mock(status_code=200, headers={}, body=lambda: raw_recognition_result.encode("utf-8"))
with mock.patch.object(client, "_send_request", lambda *args, **kw: response):
root = client.recognize_ink([])
return root
class TestClient:
def test_set_azure_general_arguments(self):
def pipeline_client_checker(base_url, transport, config):
assert base_url == URL
assert config.logging_policy.enable_http_logger is True
assert config.retry_policy.total_retries == 3
from azure.core.pipeline.transport import HttpTransport
assert isinstance(transport, HttpTransport)
def fake_pipeline_client_constructor(*args, **kw):
pipeline_client_checker(kw["base_url"], kw["transport"], kw["config"])
with mock.patch("azure.core.PipelineClient.__init__", fake_pipeline_client_constructor):
InkRecognizerClient(URL,
CREDENTIAL,
logging_enable=True,
retry_total=3)
def test_set_ink_recognizer_arguments(self):
client = InkRecognizerClient(URL,
CREDENTIAL,
application_kind=ApplicationKind.DRAWING,
ink_point_unit=InkPointUnit.INCH,
language="zh-cn",
unit_multiple=2.5)
with mock.patch.object(client, "_parse_result", pass_response):
with mock.patch("azure.core.pipeline.Pipeline.run", fake_run):
request_json, headers, kwargs = client.recognize_ink([])
# check ink recognizer arguments
assert request_json["applicationType"] == ApplicationKind.DRAWING.value
assert request_json["unit"] == InkPointUnit.INCH.value
assert request_json["language"] == "zh-cn"
assert request_json["unitMultiple"] == 2.5
def test_set_arguments_in_request(self):
client = InkRecognizerClient(URL,
CREDENTIAL,
application_kind=ApplicationKind.DRAWING,
language="zh-cn")
with mock.patch.object(client, "_parse_result", pass_response):
with mock.patch("azure.core.pipeline.Pipeline.run", fake_run):
request_json, headers, kwargs = client.recognize_ink(
[],
application_kind=ApplicationKind.WRITING,
language = "en-gb",
client_request_id="random_id",
headers={"test_header": "test_header_result"},
timeout=10,
total_retries=5)
# check ink recognizer arguments
assert request_json["applicationType"] == ApplicationKind.WRITING.value
assert request_json["language"] == "en-gb"
# check azure general arguments
assert headers["test_header"] == "test_header_result"
assert headers["x-ms-client-request-id"] == "random_id"
assert kwargs["connection_timeout"] == 10
assert kwargs["total_retries"] == 5
def test_consume_ink_stroke_list(self):
point = Mock(x=0, y=0)
stroke = Mock(id=0, points=[point], language="python", kind=InkStrokeKind.DRAWING)
ink_stroke_list = [stroke] * 3
client = InkRecognizerClient(URL, CREDENTIAL)
with mock.patch.object(client, "_parse_result", pass_response):
with mock.patch("azure.core.pipeline.Pipeline.run", fake_run):
request_json, headers, kwargs = client.recognize_ink(ink_stroke_list)
# check number of strokes, point values and other features
assert len(request_json["strokes"]) == 3
for s in request_json["strokes"]:
assert len(s["points"]) == 1
assert s["points"][0]["x"] == 0
assert s["points"][0]["y"] == 0
assert s["id"] == 0
assert s["language"] == "python"
assert s["kind"] == InkStrokeKind.DRAWING.value
def test_parse_http_response(self):
client = InkRecognizerClient(URL, CREDENTIAL)
# 401: ClientAuthenticationError
response = Mock(status_code=401, headers={}, body=lambda: "HTTP STATUS: 401".encode("utf-8"))
with mock.patch.object(client, "_send_request", lambda *args, **kw: response):
try:
root = client.recognize_ink([])
except ClientAuthenticationError:
pass # expected
else:
raise AssertionError("Should raise ClientAuthenticationError here")
# 404: ResourceNotFoundError
response = Mock(status_code=404, headers={}, body=lambda: "HTTP STATUS: 404".encode("utf-8"))
with mock.patch.object(client, "_send_request", lambda *args, **kw: response):
try:
root = client.recognize_ink([])
except ResourceNotFoundError:
pass # expected
else:
raise AssertionError("Should raise ResourceNotFoundError here")
# valid response from server
json_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data", "hello_world_result.json")
with open(json_path, "r") as f:
recognition_json = f.read()
response = Mock(status_code=200, headers={}, body=lambda: recognition_json.encode("utf-8"))
with mock.patch.object(client, "_send_request", lambda *args, **kw: response):
root = client.recognize_ink([]) # should pass. No need to check result.
# invalid response from server
jobj = json.loads(recognition_json)
jobj["recognitionUnits"].append("random_string")
invalid_recognition_json = json.dumps(jobj)
response = Mock(status_code=200, headers={}, body=lambda: invalid_recognition_json.encode("utf-8"))
with mock.patch.object(client, "_send_request", lambda *args, **kw: response):
try:
root = client.recognize_ink([])
except ServiceResponseError:
pass # expected
else:
raise AssertionError("Should raise ServiceResponseError here")
class TestModels:
def test_unit_ink_recognition_unit(self):
root = parse_result("hello_world_result.json")
units = root._units
assert len(units) > 0
for unit in units:
assert isinstance(unit.id, int)
assert isinstance(unit.bounding_box, Rectangle)
assert isinstance(unit.rotated_bounding_box, list)
assert isinstance(unit.stroke_ids, list)
assert isinstance(unit.children, list)
assert isinstance(unit.parent, (InkRecognitionUnit, InkRecognitionRoot))
for point in unit.rotated_bounding_box:
assert isinstance(point, Point)
for stroke_id in unit.stroke_ids:
assert isinstance(stroke_id, int)
for child in unit.children:
assert isinstance(child, InkRecognitionUnit)
def test_unit_ink_bullet(self):
root = parse_result("list_result.json")
bullets = root.ink_bullets
assert len(bullets) > 0
for bullet in bullets:
assert bullet.kind == InkRecognitionUnitKind.INK_BULLET
assert isinstance(bullet.recognized_text, TYPE_TEXT_STRING)
assert isinstance(bullet.parent, Line)
assert len(bullet.children) == 0
def test_unit_ink_drawing(self):
root = parse_result("drawings_result.json")
drawings = root.ink_drawings
assert len(drawings) > 0
for drawing in drawings:
assert drawing.kind == InkRecognitionUnitKind.INK_DRAWING
assert isinstance(drawing.center, Point)
assert isinstance(drawing.confidence, (int, float))
assert isinstance(drawing.recognized_shape, ShapeKind)
assert isinstance(drawing.rotated_angle, (int, float))
assert isinstance(drawing.points, list)
assert isinstance(drawing.alternates, list)
for point in drawing.points:
assert isinstance(point, Point)
for alt in drawing.alternates:
assert isinstance(alt, InkDrawing)
assert alt.alternates == []
assert isinstance(drawing.parent, InkRecognitionRoot)
assert len(drawing.children) == 0
def test_unit_line(self):
root = parse_result("hello_world_result.json")
lines = root.lines
assert len(lines) > 0
for line in lines:
assert line.kind == InkRecognitionUnitKind.LINE
assert isinstance(line.recognized_text, TYPE_TEXT_STRING)
assert isinstance(line.alternates, list)
for alt in line.alternates:
assert isinstance(alt, TYPE_TEXT_STRING)
assert isinstance(line.parent, (Paragraph, ListItem))
for child in line.children:
assert isinstance(child, (InkBullet, InkWord))
def test_unit_paragraph(self):
root = parse_result("list_result.json")
paragraphs = root.paragraphs
assert len(paragraphs) > 0
for paragraph in paragraphs:
assert paragraph.kind == InkRecognitionUnitKind.PARAGRAPH
assert isinstance(paragraph.recognized_text, TYPE_TEXT_STRING)
assert isinstance(paragraph.parent, WritingRegion)
for child in paragraph.children:
assert isinstance(child, (Line, ListItem))
def test_unit_ink_word(self):
root = parse_result("hello_world_result.json")
words = root.ink_words
assert len(words) > 0
for word in words:
assert word.kind == InkRecognitionUnitKind.INK_WORD
assert isinstance(word.recognized_text, TYPE_TEXT_STRING)
assert isinstance(word.alternates, list)
for alt in word.alternates:
assert isinstance(alt, TYPE_TEXT_STRING)
assert isinstance(word.parent, Line)
assert len(word.children) == 0
def test_unit_writing_region(self):
root = parse_result("list_result.json")
writing_regions = root.writing_regions
assert len(writing_regions) > 0
for writing_region in writing_regions:
assert writing_region.kind == InkRecognitionUnitKind.WRITING_REGION
assert isinstance(writing_region.recognized_text, TYPE_TEXT_STRING)
assert isinstance(writing_region.parent, InkRecognitionRoot)
for child in writing_region.children:
assert isinstance(child, Paragraph)
def test_unit_list_item(self):
root = parse_result("list_result.json")
list_items = root.list_items
assert len(list_items) > 0
for list_item in list_items:
assert list_item.kind == InkRecognitionUnitKind.LIST_ITEM
assert isinstance(list_item.recognized_text, TYPE_TEXT_STRING)
assert isinstance(list_item.parent, Paragraph)
for child in list_item.children:
assert isinstance(child, Line)
class TestSendRequests:
@online_test
def test_recognize_ink_with_empty_ink_stroke_list(self):
client = InkRecognizerClient(URL, CREDENTIAL)
root = client.recognize_ink([])
words = root.ink_words
assert not words
drawings = root.ink_drawings
assert not drawings
bullets = root.ink_bullets
assert not bullets
@online_test
def test_recognize_ink(self):
points = []
for i in range(10):
points.append(Mock(x=i, y=i))
stroke = Mock(id=i, points=points, language="en-US")
ink_stroke_list = [stroke]
client = InkRecognizerClient(URL, CREDENTIAL)
root = client.recognize_ink(ink_stroke_list)
words = root.ink_words
drawings = root.ink_drawings
bullets = root.ink_bullets
assert len(words) + len(drawings) + len(bullets) > 0
|
python
|
"""
Module containing character class for use
within world.
"""
from abc import ABC
from .. import entity
class Character(entity.Entity):
"""
Abstract class representing a character within a world.
"""
pass
if __name__ == "__main__":
pass
|
python
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert raw PASCAL dataset to TFRecord for object_detection.
Example usage:
python object_detection/dataset_tools/create_pascal_tf_record.py \
--data_dir=/home/user/VOCdevkit \
--output_dir=/home/user
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import logging
import os
from lxml import etree
import PIL.Image
import tensorflow as tf
import glob
import random
import dataset_util
import xml.etree.ElementTree as ET
flags = tf.app.flags
flags.DEFINE_string(
'data_dir', '', 'Root directory to raw PASCAL VOC dataset.')
flags.DEFINE_string('images_dir', 'images',
'Name of images directory.')
flags.DEFINE_string('annotations_dir', 'xml',
'Name of annotations directory.')
flags.DEFINE_string('output_dir', '', 'Path to output TFRecord')
# flags.DEFINE_integer(
# 'ratio', '7', 'Ratio to split data to train set and val set. Default is train 7/ val 3')
flags.DEFINE_boolean('ignore_difficult_instances', False, 'Whether to ignore '
'difficult instances')
FLAGS = flags.FLAGS
def dict_to_tf_example(data,
image_path,
label_map_dict,
ignore_difficult_instances=False,
image_subdirectory='images'):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
image_path: Full path to image file
label_map_dict: A map from string label names to integers ids.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
image_subdirectory: String specifying subdirectory within the
PASCAL dataset directory holding the actual image data.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
# img_path = os.path.join(
# data['folder'], image_subdirectory, data['filename'])
# full_path = os.path.join(dataset_directory, img_path)
full_path = image_path
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
filename = full_path.split('/')[-1]
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
if 'object' in data:
for obj in data['object']:
difficult = False # bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
if obj['name'] not in label_map_dict:
continue
difficult_obj.append(int(difficult))
xmin.append(float(obj['bndbox']['xmin']) / width)
ymin.append(float(obj['bndbox']['ymin']) / height)
xmax.append(float(obj['bndbox']['xmax']) / width)
ymax.append(float(obj['bndbox']['ymax']) / height)
classes_text.append(obj['name'].encode('utf8'))
classes.append(label_map_dict[obj['name']])
# truncated.append(int(obj['truncated']))
truncated.append(0)
# poses.append(obj['pose'].encode('utf8'))
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
filename.encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
filename.encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
return example
def background_tf_example(
image_path,
):
"""
Args:
image_path: Full path to image file
Returns:
example: The converted tf.Example.
"""
full_path = image_path
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
filename = full_path.split('/')[-1]
width = image.width
height = image.height
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
filename.encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
filename.encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
return example
def create_tf_record(images_path, output_path, images_dir_name='images', annotation_dir_name='xml'):
# label_map_dict = {
# "person": 1,
# "face": 2
# }
label_map_dict = {'person': 1, 'face': 2, 'potted plant': 3, 'tvmonitor': 4, 'chair': 5, 'microwave': 6, 'refrigerator': 7, 'book': 8, 'clock': 9, 'vase': 10, 'dining table': 11, 'bear': 12, 'bed': 13, 'stop sign': 14, 'truck': 15, 'car': 16, 'teddy bear': 17, 'skis': 18, 'oven': 19, 'sports ball': 20, 'baseball glove': 21, 'tennis racket': 22, 'handbag': 23, 'backpack': 24, 'bird': 25, 'boat': 26, 'cell phone': 27, 'train': 28, 'sandwich': 29, 'bowl': 30, 'surfboard': 31, 'laptop': 32, 'mouse': 33, 'keyboard': 34, 'bus': 35, 'cat': 36, 'airplane': 37, 'zebra': 38, 'tie': 39, 'traffic light': 40, 'apple': 41, 'baseball bat': 42, 'knife': 43, 'cake': 44, 'wine glass': 45, 'cup': 46, 'spoon': 47, 'banana': 48, 'donut': 49, 'sink': 50, 'toilet': 51, 'broccoli': 52, 'skateboard': 53, 'fork': 54, 'carrot': 55, 'couch': 56, 'remote': 57, 'scissors': 58, 'bicycle': 59, 'sheep': 60, 'bench': 61, 'bottle': 62, 'orange': 63, 'elephant': 64, 'motorcycle': 65, 'horse': 66, 'hot dog': 67, 'frisbee': 68, 'umbrella': 69, 'dog': 70, 'kite': 71, 'pizza': 72, 'fire hydrant': 73, 'suitcase': 74, 'cow': 75, 'giraffe': 76, 'snowboard': 77, 'parking meter': 78, 'toothbrush': 79, 'toaster': 80, 'hair drier': 81, 'pottedplant': 82, 'sofa': 83, 'diningtable': 84, 'motorbike': 85, 'aeroplane': 86}
logging.info('Creating {}'.format(output_path))
writer = tf.python_io.TFRecordWriter(output_path)
for idx in range(len(images_path)):
if idx % 100 == 0:
logging.info('On image %d of %d', idx, len(images_path))
# xml_path = xmls_path[idx]
image_path = images_path[idx]
xml_path = image_path.replace(
'/{}/'.format(images_dir_name), '/{}/'.format(annotation_dir_name))
xml_path = xml_path.replace('.jpg', '.xml')
if os.path.exists(xml_path):
# print(xml_path)
tree = ET.parse(xml_path)
xml = tree.getroot()
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
tf_example = dict_to_tf_example(data, image_path, label_map_dict)
writer.write(tf_example.SerializeToString())
else:
continue
tf_example = background_tf_example(image_path)
writer.write(tf_example.SerializeToString())
writer.close()
def main(_):
data_dir = FLAGS.data_dir
# load list image files and xml files
images_dir = os.path.join(data_dir, FLAGS.images_dir)
print(data_dir)
print(images_dir)
images_path = glob.glob(os.path.join(images_dir, '*.jpg'))
random.seed(42)
random.shuffle(images_path)
# set_name = data_dir.split(os.sep)[-1]
if str(data_dir).endswith(os.sep):
set_name = os.path.split(data_dir)[-2]
else:
set_name = os.path.split(data_dir)[-1]
print("dataset contain: {} images".format(len(images_path)))
tfrecord_path = os.path.join(FLAGS.output_dir, '{}.record'.format(set_name))
print('saved data at: ', tfrecord_path)
create_tf_record(images_path, tfrecord_path, images_dir_name=FLAGS.images_dir, annotation_dir_name=FLAGS.annotations_dir)
if __name__ == '__main__':
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
tf.app.run()
|
python
|
import unittest
from pygments import lexers, token
from gviewer.util import pygmentize, _join
class TestUtil(unittest.TestCase):
def test_pygmentize(self):
python_content = """
import unittest
class Pygmentize(object):
pass"""
result = pygmentize(python_content, lexers.PythonLexer())
self.assertEqual(len(result), 4)
self.assertIn(
(token.Token.Keyword.Namespace, u'import'),
result[0])
self.assertIn(
(token.Token.Name.Namespace, u'unittest'),
result[0])
self.assertEqual(result[1], u"")
self.assertIn(
(token.Token.Keyword, u'class'),
result[2])
self.assertIn(
(token.Token.Name.Class, u'Pygmentize'),
result[2])
self.assertIn(
(token.Token.Keyword, u'pass'),
result[3])
def test_join(self):
result = _join([("aaa", "bbb"), ("ccc", "ddd")], "\n")
self.assertEqual(len(result), 1)
self.assertEqual(
result[0], [("aaa", "bbb"), ("ccc", "ddd")])
|
python
|
import json
import unittest
from contextlib import contextmanager
@contextmanager
def mock_stderr():
from cStringIO import StringIO
import sys
_stderr = sys.stderr
sys.stderr = StringIO()
try:
yield sys.stderr
finally:
sys.stderr = _stderr
class RegressionIssue109(unittest.TestCase):
"""
logging prints text and traceback to stderr. Then, code in `utils.py` can
not parse output from daemon.py and there are a lot of messages in ST
console with `Non JSON data from daemon`
SHould be tested:
1. content in stderr should be JSON valid
2. content should contains correct data
"""
def test_json_formatter_works_on_jedi_expections(self):
with mock_stderr() as stderr_mock:
from daemon import JediFacade # load class here to mock stderr
JediFacade('print "hello"', 1, 1).get('some')
stderr_content = json.loads(stderr_mock.getvalue())
self.assertEqual(stderr_content['logging'], 'error')
self.assertIn('Traceback (most recent call last):',
stderr_content['content'])
self.assertIn('JediFacade instance has no attribute \'get_some\'',
stderr_content['content'])
if __name__ == '__main__':
unittest.main()
|
python
|
'''
Skip-thought vectors
'''
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import range
from past.utils import old_div
import os
import theano
import theano.tensor as tensor
import pickle as pkl
import numpy
import copy
import nltk
from collections import OrderedDict, defaultdict
from scipy.linalg import norm
from nltk.tokenize import word_tokenize
profile = False
#-----------------------------------------------------------------------------#
# Specify model and table locations here
#-----------------------------------------------------------------------------#
path_to_models = 'models/'
path_to_tables = 'models/'
#-----------------------------------------------------------------------------#
path_to_umodel = path_to_models + 'uni_skip.npz'
path_to_bmodel = path_to_models + 'bi_skip.npz'
def load_model():
"""
Load the model with saved tables
"""
# Load model options
print('Loading model parameters...')
with open('%s.pkl'%path_to_umodel, 'rb') as f:
uoptions = pkl.load(f)
with open('%s.pkl'%path_to_bmodel, 'rb') as f:
boptions = pkl.load(f)
# Load parameters
uparams = init_params(uoptions)
uparams = load_params(path_to_umodel, uparams)
utparams = init_tparams(uparams)
bparams = init_params_bi(boptions)
bparams = load_params(path_to_bmodel, bparams)
btparams = init_tparams(bparams)
# Extractor functions
print('Compiling encoders...')
embedding, x_mask, ctxw2v = build_encoder(utparams, uoptions)
f_w2v = theano.function([embedding, x_mask], ctxw2v, name='f_w2v')
embedding, x_mask, ctxw2v = build_encoder_bi(btparams, boptions)
f_w2v2 = theano.function([embedding, x_mask], ctxw2v, name='f_w2v2')
# Tables
print('Loading tables...')
utable, btable = load_tables()
# Store everything we need in a dictionary
print('Packing up...')
model = {}
model['uoptions'] = uoptions
model['boptions'] = boptions
model['utable'] = utable
model['btable'] = btable
model['f_w2v'] = f_w2v
model['f_w2v2'] = f_w2v2
return model
def load_tables():
"""
Load the tables
"""
words = []
utable = numpy.load(path_to_tables + 'utable.npy', fix_imports=True, encoding='bytes')
btable = numpy.load(path_to_tables + 'btable.npy', fix_imports=True, encoding='bytes')
f = open(path_to_tables + 'dictionary.txt', 'rb')
for line in f:
words.append(line.decode('utf-8').strip())
f.close()
utable = OrderedDict(list(zip(words, utable)))
btable = OrderedDict(list(zip(words, btable)))
return utable, btable
def encode(model, X, use_norm=True, verbose=True, batch_size=128, use_eos=False):
"""
Encode sentences in the list X. Each entry will return a vector
"""
# first, do preprocessing
X = preprocess(X)
# word dictionary and init
d = defaultdict(lambda : 0)
for w in list(model['utable'].keys()):
d[w] = 1
ufeatures = numpy.zeros((len(X), model['uoptions']['dim']), dtype='float32')
bfeatures = numpy.zeros((len(X), 2 * model['boptions']['dim']), dtype='float32')
# length dictionary
ds = defaultdict(list)
captions = [s.split() for s in X]
for i,s in enumerate(captions):
ds[len(s)].append(i)
# Get features. This encodes by length, in order to avoid wasting computation
for k in list(ds.keys()):
if verbose:
print(k)
numbatches = old_div(len(ds[k]), batch_size) + 1
for minibatch in range(numbatches):
caps = ds[k][minibatch::numbatches]
if use_eos:
uembedding = numpy.zeros((k+1, len(caps), model['uoptions']['dim_word']), dtype='float32')
bembedding = numpy.zeros((k+1, len(caps), model['boptions']['dim_word']), dtype='float32')
else:
uembedding = numpy.zeros((k, len(caps), model['uoptions']['dim_word']), dtype='float32')
bembedding = numpy.zeros((k, len(caps), model['boptions']['dim_word']), dtype='float32')
for ind, c in enumerate(caps):
caption = captions[c]
for j in range(len(caption)):
if d[caption[j]] > 0:
uembedding[j,ind] = model['utable'][caption[j]]
bembedding[j,ind] = model['btable'][caption[j]]
else:
uembedding[j,ind] = model['utable']['UNK']
bembedding[j,ind] = model['btable']['UNK']
if use_eos:
uembedding[-1,ind] = model['utable']['<eos>']
bembedding[-1,ind] = model['btable']['<eos>']
if use_eos:
uff = model['f_w2v'](uembedding, numpy.ones((len(caption)+1,len(caps)), dtype='float32'))
bff = model['f_w2v2'](bembedding, numpy.ones((len(caption)+1,len(caps)), dtype='float32'))
else:
uff = model['f_w2v'](uembedding, numpy.ones((len(caption),len(caps)), dtype='float32'))
bff = model['f_w2v2'](bembedding, numpy.ones((len(caption),len(caps)), dtype='float32'))
if use_norm:
for j in range(len(uff)):
uff[j] /= norm(uff[j])
bff[j] /= norm(bff[j])
for ind, c in enumerate(caps):
ufeatures[c] = uff[ind]
bfeatures[c] = bff[ind]
features = numpy.c_[ufeatures, bfeatures]
return features
def preprocess(text):
"""
Preprocess text for encoder
"""
X = []
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
for t in text:
sents = sent_detector.tokenize(t)
result = ''
for s in sents:
tokens = word_tokenize(s)
result += ' ' + ' '.join(tokens)
X.append(result)
return X
def nn(model, text, vectors, query, k=5):
"""
Return the nearest neighbour sentences to query
text: list of sentences
vectors: the corresponding representations for text
query: a string to search
"""
qf = encode(model, [query])
qf /= norm(qf)
scores = numpy.dot(qf, vectors.T).flatten()
sorted_args = numpy.argsort(scores)[::-1]
sentences = [text[a] for a in sorted_args[:k]]
print('QUERY: ' + query)
print('NEAREST: ')
for i, s in enumerate(sentences):
print(s, sorted_args[i])
def word_features(table):
"""
Extract word features into a normalized matrix
"""
features = numpy.zeros((len(table), 620), dtype='float32')
keys = list(table.keys())
for i in range(len(table)):
f = table[keys[i]]
features[i] = old_div(f, norm(f))
return features
def nn_words(table, wordvecs, query, k=10):
"""
Get the nearest neighbour words
"""
keys = list(table.keys())
qf = table[query]
scores = numpy.dot(qf, wordvecs.T).flatten()
sorted_args = numpy.argsort(scores)[::-1]
words = [keys[a] for a in sorted_args[:k]]
print('QUERY: ' + query)
print('NEAREST: ')
for i, w in enumerate(words):
print(w)
def _p(pp, name):
"""
make prefix-appended name
"""
return '%s_%s'%(pp, name)
def init_tparams(params):
"""
initialize Theano shared variables according to the initial parameters
"""
tparams = OrderedDict()
for kk, pp in params.items():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def load_params(path, params):
"""
load parameters
"""
pp = numpy.load(path)
for kk, vv in params.items():
if kk not in pp:
warnings.warn('%s is not in the archive'%kk)
continue
params[kk] = pp[kk]
return params
# layers: 'name': ('parameter initializer', 'feedforward')
layers = {'gru': ('param_init_gru', 'gru_layer')}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
def init_params(options):
"""
initialize all parameters needed for the encoder
"""
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
# encoder: GRU
params = get_layer(options['encoder'])[0](options, params, prefix='encoder',
nin=options['dim_word'], dim=options['dim'])
return params
def init_params_bi(options):
"""
initialize all paramters needed for bidirectional encoder
"""
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
# encoder: GRU
params = get_layer(options['encoder'])[0](options, params, prefix='encoder',
nin=options['dim_word'], dim=options['dim'])
params = get_layer(options['encoder'])[0](options, params, prefix='encoder_r',
nin=options['dim_word'], dim=options['dim'])
return params
def build_encoder(tparams, options):
"""
build an encoder, given pre-computed word embeddings
"""
# word embedding (source)
embedding = tensor.tensor3('embedding', dtype='float32')
x_mask = tensor.matrix('x_mask', dtype='float32')
# encoder
proj = get_layer(options['encoder'])[1](tparams, embedding, options,
prefix='encoder',
mask=x_mask)
ctx = proj[0][-1]
return embedding, x_mask, ctx
def build_encoder_bi(tparams, options):
"""
build bidirectional encoder, given pre-computed word embeddings
"""
# word embedding (source)
embedding = tensor.tensor3('embedding', dtype='float32')
embeddingr = embedding[::-1]
x_mask = tensor.matrix('x_mask', dtype='float32')
xr_mask = x_mask[::-1]
# encoder
proj = get_layer(options['encoder'])[1](tparams, embedding, options,
prefix='encoder',
mask=x_mask)
projr = get_layer(options['encoder'])[1](tparams, embeddingr, options,
prefix='encoder_r',
mask=xr_mask)
ctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1)
return embedding, x_mask, ctx
# some utilities
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin,nout=None, scale=0.1, ortho=True):
if nout == None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = numpy.random.uniform(low=-scale, high=scale, size=(nin, nout))
return W.astype('float32')
def param_init_gru(options, params, prefix='gru', nin=None, dim=None):
"""
parameter init for GRU
"""
if nin == None:
nin = options['dim_proj']
if dim == None:
dim = options['dim_proj']
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
params[_p(prefix,'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
Wx = norm_weight(nin, dim)
params[_p(prefix,'Wx')] = Wx
Ux = ortho_weight(dim)
params[_p(prefix,'Ux')] = Ux
params[_p(prefix,'bx')] = numpy.zeros((dim,)).astype('float32')
return params
def gru_layer(tparams, state_below, options, prefix='gru', mask=None, **kwargs):
"""
Forward pass through GRU layer
"""
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix,'Ux')].shape[1]
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
U = tparams[_p(prefix, 'U')]
Ux = tparams[_p(prefix, 'Ux')]
def _step_slice(m_, x_, xx_, h_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx = preactx + xx_
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h
seqs = [mask, state_below_, state_belowx]
_step = _step_slice
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [tensor.alloc(0., n_samples, dim)],
non_sequences = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
rval = [rval]
return rval
|
python
|
#!/bin/env python
##
# @file This file is part of the ExaHyPE project.
# @author ExaHyPE Group ([email protected])
#
# @section LICENSE
#
# Copyright (c) 2016 http://exahype.eu
# All rights reserved.
#
# The project has received funding from the European Union's Horizon
# 2020 research and innovation programme under grant agreement
# No 671698. For copyrights and licensing, please consult the webpage.
#
# Released under the BSD 3 Open Source License.
# For the full license text, see LICENSE.txt
#
#
# @section DESCRIPTION
#
# Controller of the code generator
#
# @note
# requires python3
import os
import copy
import subprocess
import errno
import time
from .configuration import Configuration
from .argumentParser import ArgumentParser
from .models import *
class Controller:
"""Main Controller
Read the input from the public API, validate them and generate a base
context for the models.
Use generateCode() to run the models with the base context.
Can generate gemms with generateGemms(outputFile, matmulconfig), will be done
automatically when using generateCode().
"""
def __init__(self, inputConfig = None):
"""Initialize the base config from the command line inputs"""
Configuration.checkPythonVersion()
if inputConfig == None:
args = ArgumentParser.parseArgs()
else:
ArgumentParser.validateInputConfig(inputConfig)
args = inputConfig
self.commandLine = ArgumentParser.buildCommandLineFromConfig(args)
# Generate the base config from the args input
self.config = {
"numerics" : args["numerics"],
"pathToOptKernel" : args["pathToOptKernel"],
"solverName" : args["solverName"],
"nVar" : args["numberOfVariables"],
"nPar" : args["numberOfParameters"],
"nData" : args["numberOfVariables"] + args["numberOfParameters"],
"nDof" : (args["order"])+1,
"nDim" : args["dimension"],
"useFlux" : (args["useFlux"] or args["useFluxVect"]),
"useFluxVect" : args["useFluxVect"],
"useNCP" : (args["useNCP"] or args["useNCPVect"]),
"useNCPVect" : args["useNCPVect"],
"useSource" : (args["useSource"] or args["useSourceVect"] or args["useFusedSource"] or args["useFusedSourceVect"]),
"useSourceVect" : args["useSourceVect"],
"useFusedSource" : (args["useFusedSource"] or args["useFusedSourceVect"]),
"useFusedSourceVect" : args["useFusedSourceVect"],
"nPointSources" : args["usePointSources"],
"usePointSources" : args["usePointSources"] >= 0,
"useMaterialParam" : (args["useMaterialParam"] or args["useMaterialParamVect"]),
"useMaterialParamVect" : args["useMaterialParamVect"],
"codeNamespace" : args["namespace"],
"pathToOutputDirectory" : os.path.join(Configuration.pathToExaHyPERoot, args["pathToApplication"], args["pathToOptKernel"]),
"architecture" : args["architecture"],
"useLimiter" : args["useLimiter"] >= 0,
"nObs" : args["useLimiter"],
"ghostLayerWidth" : args["ghostLayerWidth"],
"pathToLibxsmmGemmGenerator" : Configuration.pathToLibxsmmGemmGenerator,
"quadratureType" : ("Gauss-Lobatto" if args["useGaussLobatto"] else "Gauss-Legendre"),
"useCERKGuess" : args["useCERKGuess"],
"useSplitCKScalar" : args["useSplitCKScalar"],
"useSplitCKVect" : args["useSplitCKVect"],
"tempVarsOnStack" : args["tempVarsOnStack"],
"useLibxsmm" : Configuration.useLibxsmm,
"runtimeDebug" : Configuration.runtimeDebug #for debug
}
self.config["useSourceOrNCP"] = self.config["useSource"] or self.config["useNCP"]
self.validateConfig(Configuration.simdWidth.keys())
self.config["vectSize"] = Configuration.simdWidth[self.config["architecture"]] #only initialize once architecture has been validated
self.baseContext = self.generateBaseContext() # default context build from config
self.gemmList = [] #list to store the name of all generated gemms (used for gemmsCPPModel)
def validateConfig(self, validArchitectures):
"""Ensure the configuration fit some constraint, raise errors if not"""
if not (self.config["architecture"] in validArchitectures):
raise ValueError("Architecture not recognized. Available architecture: "+str(validArchitectures))
if not (self.config["numerics"] == "linear" or self.config["numerics"] == "nonlinear"):
raise ValueError("numerics has to be linear or nonlinear")
if self.config["nVar"] < 0:
raise ValueError("Number of variables must be >=0 ")
if self.config["nPar"] < 0:
raise ValueError("Number of parameters must be >= 0")
if self.config["nDim"] < 2 or self.config["nDim"] > 3:
raise ValueError("Number of dimensions must be 2 or 3")
if self.config["nDof"] < 1 or self.config["nDof"] > 10: #nDof = order+1
raise ValueError("Order has to be between 0 and 9")
#if (self.config["useSource"] and not self.config["useSourceVect"] and self.config["useNCPVect"]) or (self.config["useNCP"] and not self.config["useNCPVect"] and self.config["useSourceVect"]) :
# raise ValueError("If using source and NCP, both or neither must be vectorized")
def printConfig(self):
print(self.config)
def generateBaseContext(self):
"""Generate a base context for the models from the config (use hard copy)"""
context = copy.copy(self.config)
context["nVarPad"] = self.getSizeWithPadding(context["nVar"])
context["nParPad"] = self.getSizeWithPadding(context["nPar"])
context["nDataPad"] = self.getSizeWithPadding(context["nData"])
context["nDofPad"] = self.getSizeWithPadding(context["nDof"])
context["nDof3D"] = 1 if context["nDim"] == 2 else context["nDof"]
context["isLinear"] = context["numerics"] == "linear"
context["solverHeader"] = context["solverName"].split("::")[1] + ".h"
context["codeNamespaceList"] = context["codeNamespace"].split("::")
context["guardNamespace"] = "_".join(context["codeNamespaceList"]).upper()
context["nDofLim"] = 2*context["nDof"]-1 #for limiter
context["nDofLimPad"] = self.getSizeWithPadding(context["nDofLim"])
context["nDofLim3D"] = 1 if context["nDim"] == 2 else context["nDofLim"]
context["ghostLayerWidth3D"] = 0 if context["nDim"] == 2 else context["ghostLayerWidth"]
context["useVectPDEs"] = context["useFluxVect"] or True #TODO JMG add other vect
return context
def getSizeWithPadding(self, sizeWithoutPadding):
"""Return the size of the input with the architecture specific padding added"""
return self.config["vectSize"] * int((sizeWithoutPadding+(self.config["vectSize"]-1))/self.config["vectSize"])
def getPadSize(self, sizeWithoutPadding):
"""Return the size of padding required for its input"""
return self.getSizeWithPadding(sizeWithoutPadding) - sizeWithoutPadding
def generateCode(self):
"""Main method: call the models to generate the code"""
# create directory for output files if not existing
try:
os.makedirs(self.config['pathToOutputDirectory'])
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# remove all .cpp, .cpph, .c and .h files (we are in append mode!)
for fileName in os.listdir(self.config['pathToOutputDirectory']):
_ , ext = os.path.splitext(fileName)
if(ext in [".cpp", ".cpph", ".c", ".h"]):
os.remove(self.config['pathToOutputDirectory'] + "/" + fileName)
# generate new files
runtimes = {}
start = time.perf_counter()
adjustSolution = adjustSolutionModel.AdjustSolutionModel(self.baseContext)
adjustSolution.generateCode()
runtimes["adjustSolution"] = time.perf_counter() - start
start = time.perf_counter()
amrRoutines = amrRoutinesModel.AMRRoutinesModel(self.baseContext, self)
amrRoutines.generateCode()
runtimes["amrRoutines"] = time.perf_counter() - start
start = time.perf_counter()
boundaryConditions = boundaryConditionsModel.BoundaryConditionsModel(self.baseContext)
boundaryConditions.generateCode()
runtimes["boundaryConditions"] = time.perf_counter() - start
start = time.perf_counter()
configurationParameters = configurationParametersModel.ConfigurationParametersModel(self.baseContext)
configurationParameters.generateCode()
runtimes["configurationParameters"] = time.perf_counter() - start
start = time.perf_counter()
converter = converterModel.ConverterModel(self.baseContext)
converter.generateCode()
runtimes["converter"] = time.perf_counter() - start
start = time.perf_counter()
deltaDistribution = deltaDistributionModel.DeltaDistributionModel(self.baseContext)
deltaDistribution.generateCode()
runtimes["deltaDistribution"] = time.perf_counter() - start
start = time.perf_counter()
dgMatrix = dgMatrixModel.DGMatrixModel(self.baseContext)
dgMatrix.generateCode()
runtimes["dgMatrix"] = time.perf_counter() - start
start = time.perf_counter()
faceIntegral = faceIntegralModel.FaceIntegralModel(self.baseContext)
faceIntegral.generateCode()
runtimes["faceIntegral"] = time.perf_counter() - start
start = time.perf_counter()
fusedSpaceTimePredictorVolumeIntegral = fusedSpaceTimePredictorVolumeIntegralModel.FusedSpaceTimePredictorVolumeIntegralModel(self.baseContext, self)
fusedSpaceTimePredictorVolumeIntegral.generateCode()
runtimes["fusedSpaceTimePredictorVolumeIntegral"] = time.perf_counter() - start
start = time.perf_counter()
kernelsHeader = kernelsHeaderModel.KernelsHeaderModel(self.baseContext)
kernelsHeader.generateCode()
runtimes["kernelsHeader"] = time.perf_counter() - start
start = time.perf_counter()
limiter = limiterModel.LimiterModel(self.baseContext, self)
limiter.generateCode()
runtimes["limiter"] = time.perf_counter() - start
start = time.perf_counter()
matrixUtils = matrixUtilsModel.MatrixUtilsModel(self.baseContext)
matrixUtils.generateCode()
runtimes["matrixUtils"] = time.perf_counter() - start
start = time.perf_counter()
quadrature = quadratureModel.QuadratureModel(self.baseContext, self)
quadrature.generateCode()
runtimes["quadrature"] = time.perf_counter() - start
start = time.perf_counter()
riemann = riemannModel.RiemannModel(self.baseContext)
riemann.generateCode()
runtimes["riemann"] = time.perf_counter() - start
start = time.perf_counter()
solutionUpdate = solutionUpdateModel.SolutionUpdateModel(self.baseContext)
solutionUpdate.generateCode()
runtimes["solutionUpdate"] = time.perf_counter() - start
start = time.perf_counter()
stableTimeStepSize = stableTimeStepSizeModel.StableTimeStepSizeModel(self.baseContext)
stableTimeStepSize.generateCode()
runtimes["stableTimeStepSize"] = time.perf_counter() - start
start = time.perf_counter()
surfaceIntegral = surfaceIntegralModel.SurfaceIntegralModel(self.baseContext)
surfaceIntegral.generateCode()
runtimes["surfaceIntegral"] = time.perf_counter() - start
# must be run only after all gemm have been generated
start = time.perf_counter()
gemmsContext = copy.copy(self.baseContext)
gemmsContext["gemmList"] = self.gemmList
gemmsCPP = gemmsCPPModel.GemmsCPPModel(gemmsContext)
gemmsCPP.generateCode()
runtimes["gemmsCPP"] = time.perf_counter() - start
if self.config['runtimeDebug']:
for key, value in runtimes.items():
print(key+": "+str(value))
def generateGemms(self, outputFileName, matmulConfigList):
"""Generate the gemms with the given config list using LIBXSMM"""
for matmul in matmulConfigList:
# add the gemm name to the list of generated gemm
self.gemmList.append(matmul.baseroutinename)
# for plain assembly code (rather than inline assembly) choose dense_asm
commandLineArguments = " " + "dense" + \
" " + os.path.join(self.config["pathToOutputDirectory"], outputFileName) + \
" " + self.config["codeNamespace"] + "::" + matmul.baseroutinename + \
" " + str(matmul.M) + \
" " + str(matmul.N) + \
" " + str(matmul.K) + \
" " + str(matmul.LDA) + \
" " + str(matmul.LDB) + \
" " + str(matmul.LDC) + \
" " + str(matmul.alpha) + \
" " + str(matmul.beta) + \
" " + str(matmul.alignment_A) + \
" " + str(matmul.alignment_C) + \
" " + self.config["architecture"] + \
" " + matmul.prefetchStrategy + \
" " + "DP" #always use double precision, "SP" for single
bashCommand = self.config["pathToLibxsmmGemmGenerator"] + commandLineArguments
subprocess.call(bashCommand.split())
|
python
|
from sklearn.compose import ColumnTransformer
from sklearn.decomposition import PCA
from sklearn.impute import KNNImputer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import RobustScaler
class TrainModel():
@classmethod
def transformerFor(cls, cat_cols, num_cols):
"""Construct a column transformer for the named columns
Please see https://jaketae.github.io/study/sklearn-pipeline/ on
which this implementation is based.
Args:
cat_cols (List): Categorical column names
num_cols (List): Numerical column names
Returns:
ColumnTransformer: a column transformer
"""
# Categorical column transformer
cat_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(handle_unknown='ignore', sparse=False)),
('pca', PCA(n_components=10))
])
# Numerical column transformer
num_transformer = Pipeline(steps=[
('imputer', KNNImputer(n_neighbors=5)),
('scaler', RobustScaler())
])
return ColumnTransformer(
transformers=[
('num', num_transformer, num_cols),
('cat', cat_transformer, cat_cols)
])
@classmethod
def pipelineFor(cls, preprocessor, classifier):
"""Construct a pipeline for the specified preprocessor and classifier
Args:
preprocessor (ColumnTransformer): A column transformer
classifier (Classifier): A model classifier
Returns:
Pipeline: A Pipeline suitable for classification use
"""
return Pipeline(steps=[('preprocessor', preprocessor),
('classifier', classifier)])
@classmethod
def tunedParameters(cls):
"""Define search parameters
Returns:
Dictionary: A dictionary of key-value search parameters
"""
num_transformer_dist = {'preprocessor__num__imputer__n_neighbors': list(range(2, 15)),
'preprocessor__num__imputer__add_indicator': [True, False]}
cat_transformer_dist = {'preprocessor__cat__imputer__strategy': ['most_frequent', 'constant'],
'preprocessor__cat__imputer__add_indicator': [True, False],
'preprocessor__cat__pca__n_components': list(range(2, 15))}
random_forest_dist = {'classifier__n_estimators': list(range(50, 500)),
'classifier__max_depth': list(range(2, 20)),
'classifier__bootstrap': [True, False]}
return {**num_transformer_dist, **cat_transformer_dist, **random_forest_dist}
|
python
|
import cv2
from .drawBoxes import drawBoxes
def addPedestriansToTrack(image, tracker, trackers, trackedObjectsNum):
if trackers == None:
trackers = cv2.MultiTracker_create()
markedObjects = trackedObjectsNum
while True:
manualMarking = cv2.selectROI("Mark pedestrian to track", image)
if manualMarking != (0, 0, 0, 0):
markedObjects = markedObjects + 1
trackers.add(tracker(), image, manualMarking)
drawBoxes(image, [manualMarking])
print("Hit Enter to continue")
print("Hit backspace to clear all tracked objects")
print("Hit any other key to add next object")
key = cv2.waitKey(0)
cv2.destroyWindow("Mark pedestrian to track")
if key == ord("\r"):
return [trackers, markedObjects]
if key == 8:
trackers = cv2.MultiTracker_create()
markedObjects = 0
print("!! You clear all tracked objects !!")
|
python
|
import argparse
import io
import csv
import scipy
from scipy.sparse import csr_matrix
import numpy as np
import tensorflow as tf
def add_data(r, indptr, indices, data, vocab):
if len(r) > 1:
label = r[0]
for f in r[1:]:
if f:
k, v = f.split(':')
idx = vocab.setdefault(k, len(vocab))
indices.append(idx)
data.append(float(v))
indptr.append(len(indices))
return label, indptr, indices, data, vocab
return False, indptr, indices, data, vocab
def process_file(fn, indptr, indices, data, vocab):
y = []
with io.open(fn) as fh:
csvr = csv.reader(fh, delimiter = ' ')
for r in csvr:
label, indptr, indices, data, vocab = add_data(r, indptr, indices, data, vocab)
if label is not None:
y.append(label)
return y, indptr, indices, data, vocab
def parse(data_fn):
indptr = [0]
indices, data, vocab = [], [], dict()
y, indptr, indices, data, vocab = process_file(data_fn, indptr, indices, data, vocab)
x = csr_matrix((data, indices, indptr), dtype=np.float32)
x.sort_indices()
return x, y
def compress(x, y, model, out_fn):
x_new = model.predict(x)
with io.open(out_fn, 'w') as fh:
for i, x in enumerate(x_new):
fh.write('{} {}\n'.format(y[i], ' '.join('{}:{}'.format(j, v) for j, v in enumerate(x))))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parses a libSVM-formatted dataset.')
parser.add_argument('-d', '--dataset', required=True, help='Input dataset for reduction.')
parser.add_argument('-m', '--model', required=False, help='Trained compressor model file.')
parser.add_argument('-o', '--output', required=True, help='Output file with reduced data in libSVM format.')
args = parser.parse_args()
x, y = parse(args.dataset)
model = tf.keras.models.load_model(args.model)
compress(x, y, model, args.output)
|
python
|
import importlib
import xarray as xr
import numpy as np
import pandas as pd
import sys
import os
from CASutils import filter_utils as filt
from CASutils import calendar_utils as cal
importlib.reload(filt)
importlib.reload(cal)
def calcdeseas(da):
datseas = da.groupby('time.dayofyear').mean('time', skipna=True)
dat4harm = filt.calc_season_nharm(datseas, 4, dimtime=0)
anoms = da.groupby('time.dayofyear') - dat4harm
datdeseas = cal.group_season_daily(anoms, 'DJF')
seasmean = datdeseas.mean('day', skipna=True)
datdeseas = datdeseas - seasmean
#datdeseas = np.array(datdeseas).flatten()
return datdeseas
basepath="/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/CAM/"
trefht_clm5 = xr.open_dataset(basepath+"TREFHT_Isla_CAM6_CLM5_002.nc")
trefht_clm5_deseas = calcdeseas(trefht_clm5.trefht)
cities = trefht_clm5.city
ncities = trefht_clm5.city.size
for icity in range(0,ncities,1):
trefht_clm5 = np.array(trefht_clm5_deseas[:,:,icity]).flatten()
# calculate the ptile bin ranges
nblocks = 10
binmin = np.empty([nblocks]) ; binmax = np.empty([nblocks])
for iblock in np.arange(0,nblocks,1):
binmin[iblock] = np.percentile(trefht_clm5,iblock*10)
binmax[iblock] = np.percentile(trefht_clm5,iblock*10+10)
if (iblock == 0):
binmin[iblock] = np.percentile(trefht_clm5,1)
if (iblock == (nblocks-1)):
binmax[iblock] = np.percentile(trefht_clm5,99)
outpath="/project/cas/islas/python_savs/snowpaper/DATA_SORT/trefhtptile_composites/3cities/"
basepath="/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/OBS/"
trefht = xr.open_dataset(basepath+"ERA5_TREFHT.nc")
basepath="/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/ERA5/"
dat = xr.open_dataset(basepath+"ERA5_increments.nc")
increments_deseas = calcdeseas(dat.increments)
forecast_deseas = calcdeseas(dat.forecast)
analysis_deseas = calcdeseas(dat.analysis)
trefht_deseas = calcdeseas(trefht.era5)
cities=dat.city
ncities = dat.city.size
for icity in range(0,ncities,1):
trefht = np.array(trefht_deseas[:,:,icity]).flatten()
increments = np.array(increments_deseas[:,:,icity]).flatten()
forecast = np.array(forecast_deseas[:,:,icity]).flatten()
analysis = np.array(analysis_deseas[:,:,icity]).flatten()
if (icity == 0):
incrementcomp = np.zeros([nblocks, ncities])
forecastcomp = np.zeros([nblocks, ncities])
analysiscomp = np.zeros([nblocks, ncities])
for iblock in np.arange(0,nblocks,1):
incrementcomp[iblock, icity] = \
(increments[(analysis >= binmin[iblock]) & (analysis < binmax[iblock])]).mean()
forecastcomp[iblock, icity] = \
(forecast[(analysis >= binmin[iblock]) & (analysis < binmax[iblock])]).mean()
analysiscomp[iblock, icity] = \
(analysis[(analysis >= binmin[iblock]) & (analysis < binmax[iblock])]).mean()
increment_xr = xr.DataArray(incrementcomp,
coords=[np.arange(0,nblocks,1),cities], dims=['ptile','city'], name='increment')
forecast_xr = xr.DataArray(forecastcomp,
coords=[np.arange(0,nblocks,1),cities], dims=['ptile','city'], name='forecast')
analysis_xr = xr.DataArray(analysiscomp,
coords=[np.arange(0,nblocks,1),cities], dims=['ptile','city'], name='analysis')
increment_xr.to_netcdf(path=outpath+'trefhtptilecomposites_3cities_ERA5increments.nc')
forecast_xr.to_netcdf(path=outpath+'trefhtptilecomposites_3cities_ERA5increments.nc', mode='a')
analysis_xr.to_netcdf(path=outpath+'trefhtptilecomposites_3cities_ERA5increments.nc', mode='a')
|
python
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import torch
import spdnn
torch.manual_seed(7)
a = torch.rand(6, 6).cuda()
a[a<0.6] = 0.0
at = a.t()
print('at: ', at)
b = torch.rand(6, 6).cuda()
print('b: ', b)
#c = spdnn.spmm(a, b)
print('at shape: ', at.shape)
torch.cuda.synchronize()
c = spdnn.sparse_t_x_dense(a, b)
print('c=axb: ', c)
c_true = at.mm(b)
print('c_true=axb: ', c_true)
print('norm: ', float((c-c_true).norm()))
|
python
|
# -*- coding: utf-8 -*-
"""
Created on 16 June 2021
Created by J Botha
This script attempts to join the file provided city-hex-polygons-8.geojson to the service request dataset.
When using the first 10 000 records from the service request dataset I seem to get no matches with
Latitude and Longitude variables.
I do however set the index value to 0 for any requests where the Latitude and Longitude fields are empty.
How to use:
Modules Needed:
-pip install boto3
-pip install pandas
Files needed:
Input file: "sr.csv" file that has been provided should be in the same directory.
Output file: "sr_updated.csv" file gets generated by this application.
"aws_credentials.py" file has been uploaded to the root directory.
Run: python initial_data_transformation.py
"""
from aws_credentials import access_key, secret_key, aws_region
from boto3.session import Session
from io import StringIO
from csv import reader
import boto3
import pandas as pd
import os
import itertools
import csv
import datetime
# Tracking Time taken for application to run
application_start_time = datetime.datetime.now()
list_of_rows = []
for each_file in sorted(os.listdir('.')):
#open input file provided
if each_file.endswith("sr.csv"):
#read csv file as a list of lists
with open(each_file, 'r') as read_obj:
# pass the file object to reader() to get the reader object
csv_reader = reader(read_obj)
# reading in the first 10 000 records as a sample set
for row in itertools.islice(csv_reader, 10000):
# Pass reader object to list() to get a list of lists
list_of_rows.append(row)
# create boto session
session = Session(
aws_access_key_id="AKIAYH57YDEWMHW2ESH2",
aws_secret_access_key=secret_key,
region_name=aws_region
)
# make connection
client = session.client('s3')
# query and create response
base_resp_standard = client.select_object_content(
Bucket = "cct-ds-code-challenge-input-data",
Key = "city-hex-polygons-8.geojson",
Expression = "SELECT d.properties FROM S3Object[*].features[*] d",
ExpressionType = "SQL",
InputSerialization = {"JSON": {"Type": "DOCUMENT"}},
OutputSerialization = {"JSON": {'RecordDelimiter': "\n"}}
)
# upack query response
records = []
enhanced_list = []
for event in base_resp_standard["Payload"]:
if "Records" in event:
records.append(event["Records"]["Payload"])
# store unpacked data as a CSV format
file_str = ''.join(req.decode('utf-8') for req in records)
# read CSV to dataframe
df = pd.read_csv(StringIO(file_str))
for index, row in df.iterrows():
tmp_list = []
# h3_level8_index
tmp_list.append(row[0].split(":")[2].strip('"'))
# db_latitude
tmp_list.append(row[1].split(":")[1])
# db_longitude
tmp_list.append(row[2].split(":")[1].split("}")[0])
enhanced_list.append(tmp_list)
# open output file
with open('sr_updated.csv', 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
header = ['', 'NotificationNumber', 'NotificationType', 'CreationDate', 'CompletionDate', 'Duration', 'CodeGroup', 'Code', 'Open', 'Latitude', 'Longitude', 'SubCouncil2016', 'Wards2016', 'OfficialSuburbs', 'directorate', 'department', 'ModificationTimestamp', 'CompletionTimestamp', 'CreationTimestamp', 'h3_level8_index']
# write the header to output file
writer.writerow(header)
# Loop through input data set and
for row1 in list_of_rows:
if row1[10] == 'nan':
existing_row = row1
existing_row.append(0)
writer.writerow(existing_row)
for row2 in enhanced_list:
if row1[10] == row2[2] and row1[9] == row2[1]:
enhanced_row = row1.append(row2[0])
writer.writerow(enhanced_row)
application_end_time = datetime.datetime.now()
application_time_taken = application_end_time - application_start_time
# Process time stats
print("application_start_time = ", application_start_time)
print("application_end_time = ", application_end_time)
print("application_time_taken = ", application_time_taken)
|
python
|
from collections import OrderedDict
from Jumpscale import j
JSBASE = j.baseclasses.object
class ModelBase(j.baseclasses.object):
def __init__(self, key="", new=False, collection=None):
self._propnames = []
self.collection = collection
self._key = ""
self.dbobj = None
self.changed = False
self._subobjects = {}
if j.data.types.bytes.check(key):
key = key.decode()
# if key != "":
# if len(key) != 16 and len(key) != 32 and len(key) != 64:
# raise j.exceptions.Input("Key needs to be length 16,32,64")
if new:
self.dbobj = self.collection._capnp_schema.new_message()
self._post_init()
if key != "":
self._key = key
elif key != "":
# will get from db
if self.collection._db.exists(key):
self.load(key=key)
self._key = key
else:
raise j.exceptions.Input(message="Cannot find object:%s!%s" % (self.collection.category, key))
else:
raise j.exceptions.Input(
message="key cannot be empty when no new obj is asked for.", level=1, source="", tags="", msgpub=""
)
@property
def key(self):
if self._key is None or self._key == "":
self._key = self._generate_key()
return self._key
@key.setter
def key(self, value):
if j.data.types.bytes.check(value):
value = value.decode()
self._key = value
def _post_init(self, **kwargs):
pass
def _pre_save(self):
# needs to be implemented see e.g. ActorModel
pass
def _generate_key(self):
# return a unique key to be used in db (std the key but can be overriden)
return j.data.hash.md5_string(j.data.idgenerator.generateGUID())
def index(self):
# put indexes in db as specified
if self.collection != None:
self.collection._index.index({self.dbobj.name: self.key})
def load(self, key):
if self.collection._db.inMem:
self.dbobj = self.collection._db.get(key)
else:
buff = self.collection._db.get(key)
self.dbobj = self.collection._capnp_schema.from_bytes(buff, builder=True)
# TODO: *2 would be nice that this works, but can't get it to work, something recursive
# def __setattr__(self, attr, val):
# if attr in ["_propnames", "_subobjects", "dbobj", "_capnp_schema"]:
# self.__dict__[attr] = val
# print("SETATTRBASE:%s" % attr)
# # return ModelBase.__setattr__(self, attr, val)
#
# print("SETATTR:%s" % attr)
# if attr in self._propnames:
# print("1%s" % attr)
# # TODO: is there no more clean way?
# dbobj = self._subobjects
# print(2)
# exec("dbobj.%s=%s" % (attr, val))
# print(3)
# #
# else:
# raise j.exceptions.Input(message="Cannot set attr:%s in %s" %
# (attr, self))
# def __dir__(self):
# propnames = ["key", "index", "load", "_post_init", "_pre_save", "_generate_key", "save", "logger",
# "dictFiltered", "reSerialize", "dictJson", "raiseError", "addSubItem", "_listAddRemoveItem",
# "logger", "_capnp_schema", "_category", "_db", "_index", "_key", "dbobj", "changed", "_subobjects"]
# return propnames + self._propnames
def reSerialize(self):
for key in list(self._subobjects.keys()):
prop = self.__dict__["list_%s" % key]
dbobjprop = eval("self.dbobj.%s" % key)
if len(dbobjprop) != 0:
raise j.exceptions.Base("bug, dbobj prop should be empty, means we didn't reserialize properly")
if prop is not None and len(prop) > 0:
# init the subobj, iterate over all the items we have & insert them
subobj = self.dbobj.init(key, len(prop))
for x in range(0, len(prop)):
subobj[x] = prop[x]
self._subobjects.pop(key)
self.__dict__.pop("list_%s" % key)
def save(self):
self.reSerialize()
self._pre_save()
if self.collection._db.inMem:
self.collection._db.db[self.key] = self.dbobj
else:
# no need to store when in mem because we are the object which does not have to be serialized
# so this one stores when not mem
buff = self.dbobj.to_bytes()
if hasattr(self.dbobj, "clear_write_flag"):
self.dbobj.clear_write_flag()
self.collection._db.set(self.key, buff)
self.index()
def to_dict(self):
self.reSerialize()
d = self.dbobj.to_dict()
d["key"] = self.key
return d
@property
def dictFiltered(self):
"""
remove items from obj which cannot be serialized to json or not relevant in dict
"""
# made to be overruled
return self.to_dict()
@dictFiltered.setter
def dictFiltered(self, ddict):
"""
"""
if "key" in ddict:
self.key = ddict[key]
self.dbobj = self.collection._capnp_schema.new_message(**ddict)
@property
def dictJson(self):
ddict2 = OrderedDict(self.dictFiltered)
return j.data.serializers.json.dumps(ddict2, sort_keys=True, indent=True)
def raiseError(self, msg):
msg = "Error in dbobj:%s (%s)\n%s" % (self._category, self.key, msg)
raise j.exceptions.Input(message=msg)
def updateSubItem(self, name, keys, data):
keys = keys or []
if not isinstance(keys, list):
keys = [keys]
self._listAddRemoveItem(name)
existing = self.__dict__["list_%s" % name]
for idx, item in enumerate(existing):
match = True
for key in keys:
if item.to_dict()[key] != data.to_dict()[key]:
match = False
if keys and match:
existing.pop(idx)
break
self.addSubItem(name, data)
def addDistinctSubItem(self, name, data):
self._listAddRemoveItem(name=name)
for item in self.__dict__["list_%s" % name]:
if item.to_dict() == data.to_dict():
return
self.__dict__["list_%s" % name].append(data)
def addSubItem(self, name, data):
"""
@param data is string or object first retrieved by self.collection.list_$name_constructor(**args)
can also directly add them to self.list_$name.append(self.collection.list_$name_constructor(**args)) if it already exists
"""
self._listAddRemoveItem(name=name)
self.__dict__["list_%s" % name].append(data)
def initSubItem(self, name):
self._listAddRemoveItem(name=name)
def deleteSubItem(self, name, pos):
"""
@param pos is the position in the list
"""
self._listAddRemoveItem(name=name)
self.__dict__["list_%s" % name].pop(pos)
self.reSerialize()
def _listAddRemoveItem(self, name):
"""
if you want to change size of a list on obj use this method
capnp doesn't allow modification of lists, so when we want to change size of a list then we need to reSerialize
and put content of a list in a python list of dicts
we then re-serialize and leave the subobject empty untill we know that we are at point we need to save the object
when we save we populate the subobject so we get a nicely created capnp message
"""
if name in self._subobjects:
# means we are already prepared
return
prop = eval("self.dbobj.%s" % name)
if len(prop) == 0:
self.__dict__["list_%s" % name] = []
else:
try:
self.__dict__["list_%s" % name] = [item.copy() for item in prop]
except BaseException: # means is not an object can be e.g. a string
self.__dict__["list_%s" % name] = [item for item in prop]
# empty the dbobj list
exec("self.dbobj.%s=[]" % name)
self._subobjects[name] = True
self.changed = True
def __repr__(self):
out = "key:%s\n" % self.key
out += self.dictJson
return out
__str__ = __repr__
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sep 6, 2020
@author: eljeffe
Copyright 2020 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os import urandom
from hashlib import sha256
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import String, Boolean, Integer
from models import dbsession
from models.BaseModels import DatabaseObject
from libs.StringCoding import encode
from datetime import datetime, timedelta
class PasswordToken(DatabaseObject):
""" Password token definition """
user_id = Column(Integer, ForeignKey("user.id", ondelete="CASCADE"), nullable=False)
value = Column(String(32), unique=True, nullable=False)
used = Column(Boolean, nullable=False, default=False)
@classmethod
def all(cls):
""" Returns a list of all objects in the database """
return dbsession.query(cls).all()
@classmethod
def by_id(cls, _id):
""" Returns a the object with id of _id """
return dbsession.query(cls).filter_by(id=_id).first()
@classmethod
def by_user_id(cls, user_id):
""" Returns a the object with id of user_id """
return dbsession.query(cls).filter_by(user_id=user_id).first()
@classmethod
def count(cls):
""" Returns a list of all objects in the database """
return dbsession.query(cls).count()
@classmethod
def by_value(cls, value):
""" Returns a the object with value of value """
return dbsession.query(cls).filter_by(value=value).first()
def is_expired(self, hours=3):
""" Check if the token is expired """
now = datetime.now()
expired = self.created + timedelta(hours=hours)
return now > expired
|
python
|
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
from oslo_log import log as logging
from tacker.common import coordination
from tacker.sol_refactored.common import exceptions as sol_ex
LOG = logging.getLogger(__name__)
# NOTE: It is used to prevent operation for the same vnf instance
# from being processed at the same time. It can be applied between
# threads of a process and different processes (e.g. tacker-server
# and tacker-conductor) on a same host.
# Note that race condition of very short time is not considered.
def lock_vnf_instance(inst_arg, delay=False):
# NOTE: tacker-server issues RPC call to tacker-conductor
# (just) before the lock released. 'delay' is for tacker-conductor
# to be able to wait if it receives RPC call before tacker-server
# releases the lock.
def operation_lock(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
coord = coordination.COORDINATOR
# ensure coordination start
# NOTE: it is noop if already started.
coord.start()
sig = inspect.signature(func)
call_args = sig.bind(*args, **kwargs).arguments
inst_id = inst_arg.format(**call_args)
lock = coord.get_lock(inst_id)
blocking = False if not delay else 10
# NOTE: 'with lock' is not used since it can't handle
# lock failed exception well.
if not lock.acquire(blocking=blocking):
LOG.debug("Locking vnfInstance %s failed.", inst_id)
raise sol_ex.OtherOperationInProgress(inst_id=inst_id)
try:
LOG.debug("vnfInstance %s locked.", inst_id)
return func(*args, **kwargs)
finally:
lock.release()
return wrapper
return operation_lock
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script demonstrates how the Python example service without needing
to use the bazel build system. Usage:
$ python example_compiler_gym_service/demo_without_bazel.py
It is equivalent in behavior to the demo.py script in this directory.
"""
import logging
from pathlib import Path
from typing import Iterable
import gym
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.spaces import Reward
from compiler_gym.util.logging import init_logging
from compiler_gym.util.registration import register
EXAMPLE_PY_SERVICE_BINARY: Path = Path(
"example_compiler_gym_service/service_py/example_service.py"
)
assert EXAMPLE_PY_SERVICE_BINARY.is_file(), "Service script not found"
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
id="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.previous_runtime = None
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.previous_runtime = None
def update(self, action, observations, observation_view):
del action
del observation_view
if self.previous_runtime is None:
self.previous_runtime = observations[0]
reward = float(self.previous_runtime - observations[0])
self.previous_runtime = observations[0]
return reward
class ExampleDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://example-v0",
license="MIT",
description="An example dataset",
)
self._benchmarks = {
"/foo": Benchmark.from_file_contents(
"benchmark://example-v0/foo", "Ir data".encode("utf-8")
),
"/bar": Benchmark.from_file_contents(
"benchmark://example-v0/bar", "Ir data".encode("utf-8")
),
}
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://example-v0{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the environment for use with gym.make(...).
register(
id="example-v0",
entry_point="compiler_gym.envs:CompilerEnv",
kwargs={
"service": EXAMPLE_PY_SERVICE_BINARY,
"rewards": [RuntimeReward()],
"datasets": [ExampleDataset()],
},
)
def main():
# Use debug verbosity to print out extra logging information.
init_logging(level=logging.DEBUG)
# Create the environment using the regular gym.make(...) interface.
with gym.make("example-v0") as env:
env.reset()
for _ in range(20):
observation, reward, done, info = env.step(env.action_space.sample())
if done:
env.reset()
if __name__ == "__main__":
main()
|
python
|
__all__ = ['Mode', 'Format']
from dataclasses import dataclass
from enum import Enum
from typing import Tuple
class Mode(Enum):
# Manually map these to the entries in .taco_compile.taco_type_header.taco_mode_t
dense = (0, 'd')
compressed = (1, 's')
def __init__(self, c_int: int, character: 'str'):
self.c_int = c_int
self.character = character
@staticmethod
def from_c_int(value: int) -> 'Mode':
for member in Mode:
if member.value[0] == value:
return member
raise ValueError(f'No member of DimensionalMode has the integer value {value}')
@dataclass(frozen=True)
class Format:
modes: Tuple[Mode, ...]
ordering: Tuple[int, ...]
def __post_init__(self):
if len(self.modes) != len(self.ordering):
raise ValueError(f'Length of modes ({len(self.modes)}) must be equal to length of ordering '
f'({len(self.ordering)})')
@property
def order(self):
return len(self.modes)
def deparse(self):
if self.ordering == tuple(range(self.order)):
return ''.join(mode.character for mode in self.modes)
else:
return ''.join(mode.character + str(ordering) for mode, ordering in zip(self.modes, self.ordering))
|
python
|
from matplotlib import pyplot,gridspec,colors,patches
import numpy
import os
from diatom import Calculate
import warnings
from scipy import constants
h = constants.h
cwd = os.path.dirname(os.path.abspath(__file__))
def make_segments(x, y):
''' segment x and y points
Create list of line segments from x and y coordinates, in the correct format for LineCollection:
an array of the form numlines x (points per line) x 2 (x and y) array
Args:
x,y (numpy.ndarray -like ) - points on lines
Returns:
segments (numpy.ndarray) - array of numlines by points per line by 2
'''
points = numpy.array([x, y]).T.reshape(-1, 1, 2)
segments = numpy.concatenate([points[:-1], points[1:]], axis=1)
return segments
def colorline(x, y, z=None, cmap=pyplot.get_cmap('copper'),
norm=pyplot.Normalize(0.0, 1.0), linewidth=3, alpha=1.0,
legend=False,ax=None):
'''Plot a line shaded by an extra value.
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
Args:
x,y (list-like): x and y coordinates to plot
kwargs:
z (list): Optional third parameter to colour lines by
cmap (matplotlib.cmap): colour mapping for z
norm (): Normalisation function for mapping z values to colours
linewidth (float): width of plotted lines (default =3)
alpha (float): value of alpha channel (default = 1)
legend (Bool): display a legend (default = False)
ax (matplotlib.pyplot.axes): axis object to plot on
Returns:
lc (Collection) - collection of lines
'''
if ax == None:
ax = pyplot.gca()
# Default colors equally spaced on [0,1]:
if z is None:
z = numpy.linspace(0.0, 1.0, len(x))
# Special case if a single number:
if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack
z = numpy.array([z])
z = numpy.asarray(z)
segments = make_segments(x, y)
lc = LineCollection(segments, array=z, cmap=cmap, norm=norm,
linewidth=linewidth,zorder=1.25)
ax.add_collection(lc)
return lc
def TDM_plot(energies,States,gs,Nmax,I1,I2,TDMs=None,
pm = +1, Offset=0,fig=pyplot.gcf(),
log=False,minf=None,maxf=None,prefactor=1e-3,col=None):
''' Create a TDM plot
this function plots a series of energy levels and their transition dipole
moments from a given ground state. In this version a lot of the plotting style
is fixed.
Args:
energies (numpy.ndarray) - array of energy levels
states (numpy.ndarray) - array of states corresponding to energies such that E[i] -> States[:,i]
gs (int) - index for ground state of interest
Nmax (int) - maximum rotational quantum number to include
I1, I2 (float) - nuclear spins of nuclei 1 and 2
Kwargs:
TDMs (list of numpy.ndarray) - optional precomputed transition dipole moments in [sigma-,pi,sigma+] order
pm (float) - flag for if the transition increases or decreases N (default = 1)
Offset (float) - yaxis offset (default = 0)
fig (matplotlib.pyplot.figure) - figure object to draw on
log (bool) - use logarithmic scaling for TDM plots
minf (float) - minimum frequency to show
maxf (float) - maximum frequency to show
prefactor (float) - scaling factor for all energies
col (list) - list of colours for lines (must be at least length 3 )
'''
gray ='xkcd:grey'
if col == None:
green ='xkcd:darkgreen'
red ='xkcd:maroon'
blue ='xkcd:azure'
col=[red,blue,green]
if TDMs == None and (Nmax == None or I1 == None or I2 == None):
raise RuntimeError("TDMs or Quantum numbers must be supplied")
elif (Nmax == None or I1 == None or I2 == None):
TDMs = numpy.array(TDMs)
dm = TDMs[0,:]
dz = TDMs[1,:]
dp = TDMs[2,:]
elif TDMs == None:
dm = numpy.round(Calculate.TDM(Nmax,I1,I2,+1,States,gs),6)
dz = numpy.round(Calculate.TDM(Nmax,I1,I2,0,States,gs),6)
dp = numpy.round(Calculate.TDM(Nmax,I1,I2,-1,States,gs),6)
if abs(pm)>1:
pm = int(pm/abs(pm))
widths = numpy.zeros(4)+1
widths[-1] = 1.4
fig.set_figheight(8)
fig.set_figwidth(6)
grid= gridspec.GridSpec(2,4,width_ratios=widths)
N,MN = Calculate.LabelStates_N_MN(States,Nmax,I1,I2)
#find the ground state that the user has put in
N0 = N[gs]
gs_E = energies[gs]
lim =10
l1 = numpy.where(N==N0)[0]
min_gs = prefactor*numpy.amin(energies[l1]-gs_E)/h
max_gs = prefactor*numpy.amax(energies[l1]-gs_E)/h
l2 = numpy.where(N==N0+pm)[0]
if minf ==None:
emin = numpy.amin(energies[l2])
minf = 10e4
f = prefactor*(emin-gs_E)/h - Offset
minf = min([minf,f])
if maxf ==None:
emax = numpy.amax(energies[l2])
maxf = 0
f = prefactor*(emax-gs_E)/h - Offset
maxf = max([maxf,f])
if pm == 1:
ax0 = fig.add_subplot(grid[1,:-1])
ax = []
for j in range(3):
if j ==0:
ax.append(fig.add_subplot(grid[0,j],zorder=1))
else:
ax.append(fig.add_subplot(grid[0,j],sharey=ax[0],zorder=1))
elif pm == -1:
ax0 = fig.add_subplot(grid[0,:-1])
ax = []
for j in range(3):
if j ==0:
ax.append(fig.add_subplot(grid[1,j],zorder=1))
else:
ax.append(fig.add_subplot(grid[1,j],sharey=ax[0],zorder=1))
#plotting the energy levels for ground state
for l in l1:
f =prefactor*(energies[l]-gs_E)/h #- Offset
if l ==gs:
ax0.plot([-lim,lim],[f,f],color='k',zorder=1.2)
else:
ax0.plot([-lim,lim],[f,f],color=gray,zorder=0.8)
lbl = ['$\sigma_-$',"$\pi$","$\sigma_+$"]
for j,axis in enumerate(ax):
#plotting for excited state
for l in l2:
f = prefactor*(energies[l]-gs_E)/h - Offset
if dz[l]!=0 and j==1:
axis.plot([-lim,lim],[f,f],color=blue,zorder=1.2)
elif dp[l] !=0 and j ==2:
axis.plot([-lim,lim],[f,f],color=green,zorder=1.2)
elif dm[l] !=0 and j ==0:
axis.plot([-lim,lim],[f,f],color=red,zorder=1.2)
else:
axis.plot([-lim,lim],[f,f],color=gray,zorder=0.8)
if j ==0 :
axis.tick_params(labelbottom=False,bottom=False,which='both')
else:
axis.tick_params(labelleft=False,left=False,labelbottom=False,
bottom=False,which='both')
axis.set_xlim(-lim,lim)
axis.set_title(lbl[j],color=col[j])
# set the ticks so that only the left most has a frequency/energy axis
# and none have an x axis
ax0.tick_params(labelbottom=False,bottom=False,which='both')
ax0.set_xlim(-lim,lim)
#add the bar plot axis
ax_bar = fig.add_subplot(grid[0,-1],sharey = ax[0])
ax_bar.tick_params(labelleft=False,left=False, which='both')
#fix the ROI to be 300 kHz around the state the user has chosen
ax0.set_ylim(min_gs,max_gs)
f = prefactor*(energies-gs_E)/h-Offset
#normalise function, returns a number between 0 and 1
Norm = colors.LogNorm(vmin=1e-3,vmax=1,clip=True)
#how thick should a line be?
max_width = 2
#setting where and how far apart the lines should all be in data coords
ax1 = ax[0]
ax2 = ax[1]
ax3 = ax[2]
disp = ax2.transData.transform((-lim,0))
x1a = ax0.transData.inverted().transform(disp)[0]
disp = ax2.transData.transform((lim,0))
x1b = ax0.transData.inverted().transform(disp)[0]
Nz = len(numpy.where(dz!=0)[0])
iz = 0
deltax = (x1b-x1a)/(Nz+1)
x0 = x1a+deltax
disp = ax3.transData.transform((-lim,0))
y1a = ax0.transData.inverted().transform(disp)[0]
disp = ax3.transData.transform((lim,0))
y1b = ax0.transData.inverted().transform(disp)[0]
Np = len(numpy.where(dp!=0)[0])
ip =0
deltay = (y1b-y1a)/(Np+1)
y0 = y1a+deltay
disp = ax1.transData.transform((-lim,0))
z1a = ax0.transData.inverted().transform(disp)[0]
disp = ax1.transData.transform((lim,0))
z1b = ax0.transData.inverted().transform(disp)[0]
Nm = len(numpy.where(dm!=0)[0])
im = 0
deltaz = (z1b-z1a)/(Nm+1)
z0 = z1a+deltaz
for j,d in enumerate(dz):
#this block of code plots the dipole moments (or transition strengths)
if abs(d)>0:
width = max_width*Norm(3*numpy.abs(d)**2)
x = x0 +iz*deltax
# makes sure that the line is perfectly vertical in display coords
disp = ax0.transData.transform((x,0))
x2 = ax2.transData.inverted().transform(disp)[0]
p = patches.ConnectionPatch((x,0),(x2,f[j]),coordsA='data',coordsB='data',
axesA=ax0,axesB=ax2,zorder=5,color='k',
lw=width) #line object
ax2.add_artist(p) # add line to axes
iz+=1
#bar plot for transition strengths. Relative to spin-stretched TDM
ax_bar.barh(f[j],numpy.abs(d),color=blue,height=5)
d=dp[j]
if abs(d)>0:
width = max_width*Norm(3*numpy.abs(d)**2)
y= y0 +ip*deltay
# makes sure that the line is perfectly vertical in display coords
disp = ax0.transData.transform((y,0))
y2 = ax3.transData.inverted().transform(disp)[0]
p = patches.ConnectionPatch((y,0),(y2,f[j]),coordsA='data',coordsB='data',
axesA=ax0,axesB=ax3,zorder=5,color='k',
lw=width) #line object
ax3.add_artist(p)
ip+=1
#bar plot for transition strengths. Relative to spin-stretched TDM
ax_bar.barh(f[j],numpy.abs(d),color=green,height=5)
d=dm[j]
if abs(d)>0:
width = max_width*Norm(3*numpy.abs(d)**2)
z = z0 +im*deltaz
# makes sure that the line is perfectly vertical in display coords
disp = ax0.transData.transform((z,0))
z2 = ax1.transData.inverted().transform(disp)[0]
p = patches.ConnectionPatch((z,0),(z2,f[j]),coordsA='data',coordsB='data',
axesA=ax0,axesB=ax1,zorder=5,color='k',
lw=width)#line object
ax1.add_artist(p)
im +=1
#bar plot for transition strengths. Relative to spin-stretched TDM
ax_bar.barh(f[j],numpy.abs(d),color=red,height = 5)
#setup log axes for axis 4 (bar plots)
if log:
ax_bar.set_xscale('log')
ax_bar.set_xticks([1e-6,1e-3,1])
ax_bar.set_xticks([1e-5,1e-4,1e-2,1e-1],minor=True)
ax_bar.set_xticklabels(["10$^{-6}$","10$^{-3}$","1"])
ax_bar.set_xticklabels(["","","",""],minor=True)
# now to rescale the other axes so that they have the same y scale
ax1.set_ylim(minf-20,maxf+20)
grid.set_height_ratios([(maxf-minf)+40,300])
pyplot.subplots_adjust(hspace=0.1)
grid.update()
#add some axis labels
ax0.set_ylabel("Energy/$h$ (kHz)")
if Offset != 0:
ax[0].set_ylabel("Energy/$h$ (kHz) - {:.1f} MHz".format(Offset))
else:
ax[0].set_ylabel("Energy/$h$ (Hz)")
ax_bar.set_xlabel("TDM ($d_0$)")
if __name__ == '__main__':
from diatom import Hamiltonian,Calculate
H0,Hz,HDC,HAC = Hamiltonian.Build_Hamiltonians(3,Hamiltonian.RbCs,zeeman=True)
eigvals,eigstate = numpy.linalg.eigh(H0+181.5e-4*Hz)
TDM_plot(eigvals,eigstate,1,
Nmax = 3,I1 = Hamiltonian.RbCs['I1'], I2 = Hamiltonian.RbCs['I2'],
Offset=980e3,prefactor=1e-3)
fig = pyplot.figure(2)
loc = 0
TDM_pi = Calculate.TDM(3,Hamiltonian.RbCs['I1'],Hamiltonian.RbCs['I2'],0,eigstate,loc)
TDM_Sigma_plus = Calculate.TDM(3,Hamiltonian.RbCs['I1'],Hamiltonian.RbCs['I2'],-1,eigstate,loc)
TDM_Sigma_minus = Calculate.TDM(3,Hamiltonian.RbCs['I1'],Hamiltonian.RbCs['I2'],+1,eigstate,loc)
TDMs =[TDM_Sigma_minus,TDM_pi,TDM_Sigma_plus]
TDM_plot(eigvals,eigstate,loc,3,Hamiltonian.RbCs['I1'],Hamiltonian.RbCs['I2'],Offset=980e3,fig=fig)
pyplot.show()
|
python
|
from __future__ import print_function
import os, sys
from chainer.links.caffe import CaffeFunction
from chainer import serializers
print('load VGG16 caffemodel')
vgg = CaffeFunction('pretrained_model/VGG_ILSVRC_16_layers.caffemodel')
print('save "vgg16.npz"')
serializers.save_npz('pretrained_model/vgg16.npz', vgg)
|
python
|
from flask import Blueprint, request, jsonify, make_response
from core import config
import requests
console = Blueprint('console', __name__)
@console.route('/jobs', methods=['GET', 'POST', 'DELETE'])
def jobs():
url = 'http://' + config['zmapd'] + '/api/jobs/'
if request.method == 'GET':
resp = requests.get(url)
return jsonify({
'code': 20000,
'jobs': resp.json()
})
elif request.method == 'POST':
job = request.json['job']
resp = requests.post(url, data=job)
if resp.status_code == 201:
return jsonify({
'code': 20000
})
elif request.method == 'DELETE':
id = request.json['id']
resp = requests.delete(url+id+'/')
if resp.status_code == 204:
return jsonify({
'code': 20000
})
return jsonify({
'code': 20000,
'error': resp.status_code
})
|
python
|
import hashlib
from requests import post
from observer_hub.util import logger
PRIORITY_MAPPING = {"Critical": 1, "High": 1, "Medium": 2, "Low": 3, "Info": 4}
class AdoClient(object):
def __init__(self, organization, project, personal_access_token,
team=None, issue_type="issue", rules="false", notify="false"):
self.auth = ('', personal_access_token)
self.team = f"{project}"
if team:
self.team = f"{project}\\{team}"
self.url = f'https://dev.azure.com/{organization}/{project}/_apis/wit/workitems/' \
f'${issue_type}?bypassRules={rules}&suppressNotifications={notify}&api-version=5.1'
self.query_url = f'https://dev.azure.com/{organization}/{project}/_apis/wit/wiql?api-version=5.1'
def get_issues(self, issue_hash=None):
q = f"SELECT [System.Id] From WorkItems Where [System.Description] Contains \"{issue_hash}\""
data = post(self.query_url, auth=self.auth, json={"query": q},
headers={'content-type': 'application/json'}).json()
return data["workItems"]
def create_issues(self, test_name, data):
for d in data:
if d['status'] == 'passed':
continue
issue_hash = hashlib.sha256(
f"{d['scope']} {d['name']} {d['aggregation']} {d['raw_result'].page_identifier}".encode(
'utf-8')).hexdigest()
if len(self.get_issues(issue_hash)) > 0:
continue
logger.info(f"=====> About to crate Azure DevOps issues")
steps = []
for i, cmd in enumerate(d['raw_result'].commands, 1):
command = cmd['command']
value = cmd["value"]
target = cmd['target']
action = "to" if value != "" else "on"
text = f"*{command}* {value} {action} *{target}*"
if command == "open":
text = f"*{command}* {action} {target}"
steps.append(f"{i}. {text}")
steps = "\n".join(steps)
summary = f"{d['scope'].capitalize()} [{d['name']}] {d['aggregation']} value violates threshold rule for {test_name}"
description = f"""Value {d['actual']} violates threshold rule: {d['scope']} [{d['name']}] {d['aggregation']}
{d['rule']} {d['expected']} for {test_name}"
Steps:\n {steps}
*Issue Hash:* {issue_hash}
"""
fields_mapping = {
"/fields/System.Title": summary,
"/fields/Microsoft.VSTS.Common.Priority": PRIORITY_MAPPING['High'],
"/fields/System.Description": description,
"/fields/System.AreaPath": self.team,
"/fields/System.IterationPath": self.team
}
body = []
for key, value in fields_mapping.items():
if value:
_piece = {"op": "add", "path": key, "value": value}
body.append(_piece)
res = post(self.url, auth=self.auth, json=body,
headers={'content-type': 'application/json-patch+json'})
logger.info(f"Azure DevOps issue {res.json()['id']} has been created")
def notify_azure_devops(test_name, threshold_results, args):
caps = args['desired_capabilities']
ado_organization = caps.get('ado_organization', '')
ado_project = caps.get('ado_project', '')
ado_token = caps.get('ado_token', '')
ado_team = caps.get('ado_team', '')
if ado_organization and ado_project and ado_token:
try:
client = AdoClient(ado_organization, ado_project, ado_token, ado_team)
client.create_issues(test_name, threshold_results["details"])
except Exception as e:
logger.error(f"Error during Azure DevOps ticket creation {e}")
|
python
|
from terrascript import _resource
class ignition_config(_resource): pass
config = ignition_config
class ignition_disk(_resource): pass
disk = ignition_disk
class ignition_raid(_resource): pass
raid = ignition_raid
class ignition_filesystem(_resource): pass
filesystem = ignition_filesystem
class ignition_file(_resource): pass
file = ignition_file
class ignition_directory(_resource): pass
directory = ignition_directory
class ignition_link(_resource): pass
link = ignition_link
class ignition_systemd_unit(_resource): pass
systemd_unit = ignition_systemd_unit
class ignition_networkd_unit(_resource): pass
networkd_unit = ignition_networkd_unit
class ignition_user(_resource): pass
user = ignition_user
class ignition_group(_resource): pass
group = ignition_group
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-06 20:39
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Nomina', '0004_auto_20170406_2015'),
]
operations = [
migrations.RemoveField(
model_name='entradacontable',
name='asiento',
),
migrations.DeleteModel(
name='EntradaContable',
),
]
|
python
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
# Index Page
url(r'^$', views.index, name='index'),
url(r'^registBankAccount$', views.registBankAccount, name='RegistBankAccount'),
url(r'^updateBankAccount$', views.updateBankAccount, name='UpdateBankAccount'),
url(r'^closeBankAccount$', views.closeBankAccount, name='CloseBankAccount'),
url(r'^revokeCloseBankAccount$', views.revokeCloseBankAccount, name='RevokeCloseBankAccount'),
url(r'^deleteBankAccount$', views.deleteBankAccount, name='DeleteBankAccount'),
url(r'^getBankAccountInfo$', views.getBankAccountInfo, name='GetBankAccountInfo'),
url(r'^getBankAccountMgtURL$', views.getBankAccountMgtURL, name='GetBankAccountMgtURL'),
url(r'^listBankAccount$', views.listBankAccount, name='ListBankAccount'),
url(r'^requestJob$', views.requestJob, name='RequestJob'),
url(r'^getJobState$', views.getJobState, name='GetJobState'),
url(r'^listActiveJob$', views.listActiveJob, name='ListActiveJob'),
url(r'^search$', views.search, name='Search'),
url(r'^summary$', views.summary, name='Summary'),
url(r'^saveMemo$', views.saveMemo, name='SaveMemo'),
url(r'^getFlatRatePopUpURL$', views.getFlatRatePopUpURL, name='GetFlatRatePopUpURL'),
url(r'^getFlatRateState$', views.getFlatRateState, name='GetFlatRateState'),
url(r'^getBalance$', views.getBalance, name='GetBalance'),
url(r'^getChargeURL$', views.getChargeURL, name='GetChargeURL'),
url(r'^GetPaymentURL', views.getPaymentURL, name='GetPaymentURL'),
url(r'^GetUseHistoryURL', views.getUseHistoryURL, name='GetUseHistoryURL'),
url(r'^getPartnerBalance$', views.getPartnerBalance, name='GetPartnerBalance'),
url(r'^getPartnerURL$', views.getPartnerURL, name='GetPartnerURL'),
url(r'^getChargeInfo$', views.getChargeInfo, name='GetChargeInfo'),
url(r'^getAccessURL', views.getAccessURL, name='GetAccessURL'),
url(r'^checkIsMember$', views.checkIsMember, name='CheckIsMember'),
url(r'^checkID$', views.checkID, name='CheckID'),
url(r'^joinMember$', views.joinMember, name='JoinMember'),
url(r'^getCorpInfo$', views.getCorpInfo, name='GetCorpInfo'),
url(r'^updateCorpInfo$', views.updateCorpInfo, name='UpdateCorpInfo'),
url(r'^registContact$', views.registContact, name='RegistContact'),
url(r'^GetContactInfo$', views.getContactInfo, name='GetContactInfo'),
url(r'^listContact$', views.listContact, name='ListContact'),
url(r'^updateContact$', views.updateContact, name='UpdateContact'),
]
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 KuraLabs S.R.L
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Utilities for filtering data.
"""
from fnmatch import fnmatch
def included_in(value, patterns):
"""
Check if the given value is included in the given list of patterns.
:param str value: The value to check for.
:param list patterns: List of patterns to check for.
:return: True in the value is included, False otherwise.
:rtype: bool
"""
return any(fnmatch(value, pattern) for pattern in patterns)
def is_wanted(value, include, exclude):
"""
Check that the given value is included in the include list and not included
in the exclude list.
:param str value: The value to check for.
:param list include: List of patterns of values to include.
:param list exclude: List of patterns of values to exclude.
:return: True in the value is wanted, False otherwise.
:rtype: bool
"""
return included_in(value, include) and not included_in(value, exclude)
def filter_dict(data, include, exclude, joinchar='.'):
"""
Filter a dictionary using the provided include and exclude patterns.
:param dict data: The data to filter
(dict or OrderedDict, type is respected).
:param list include: List of patterns of key paths to include.
:param list exclude: List of patterns of key paths to exclude.
:param str joinchar: String used to join the keys to form the path.
:return: The filtered dictionary.
:rtype: dict or OrderedDict
"""
assert isinstance(data, dict)
def filter_dict_recursive(breadcrumbs, element):
if not isinstance(element, dict):
return element
return element.__class__(
(key, filter_dict_recursive(breadcrumbs + [key], value))
for key, value in element.items()
if is_wanted(joinchar.join(breadcrumbs + [key]), include, exclude)
)
return filter_dict_recursive([], data)
__all__ = [
'included_in',
'is_wanted',
'filter_dict',
]
|
python
|
import os
import re
import torch
# Formatting strings (constant)
save_format_str = "checkpoint{:08d}.pth"
save_re_string = r"checkpoint(\d{8}).pth"
assert re.match(save_re_string, save_format_str.format(0)) is not None
def save_checkpoint(model_list, save_dir, epoch, optimizer=None, lr_scheduler=None):
checkpoint = {
'model_states': [model.state_dict() for model in model_list],
'optimizer_state': optimizer.state_dict() if optimizer is not None else None,
'epoch': epoch
}
if lr_scheduler is not None:
checkpoint['lr_scheduler'] = lr_scheduler.state_dict()
torch.save(checkpoint, os.path.join(save_dir, save_format_str.format(epoch)))
def load_checkpoint(model_list, save_dir, epoch=-1, load_to_device_name=None,
optimizer=None, lr_scheduler=None):
# Search for last checkpoint if no epoch given
if epoch < 0:
files = os.listdir(save_dir)
checkpoint_files = \
list(filter(lambda s: re.match(save_re_string, s) is not None, files))
if len(checkpoint_files) == 0:
print("No save files found to load! Proceding with no loading")
return 0
last_file = sorted(checkpoint_files)[-1]
load_epoch = int(re.match(save_re_string, last_file).group(1))
full_path = os.path.join(save_dir, last_file)
else:
full_path = os.path.join(save_dir, save_format_str.format(epoch))
load_epoch = epoch
print("Loading checkpoint from: {}".format(full_path), flush=True)
checkpoint = torch.load(full_path, map_location=load_to_device_name)
model_states = checkpoint['model_states']
assert len(model_states) == len(model_list), (len(model_states), len(model_list))
for model, state in zip(model_list, model_states):
model.load_state_dict(state)
if optimizer is not None:
optimizer.load_state_dict(checkpoint['optimizer_state'])
if lr_scheduler is not None:
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
return load_epoch + 1
|
python
|
# Purpose: Extract frames from video
import cv2
import os
import progressbar
import threading
class ExtractFrames:
def __init__(self, video_path, person_name):
self.video_path = video_path
self.person_name = person_name
if not os.path.isdir(f"Images/Known/{str(person_name)}"):
os.makedirs(f'Images/Known/{str(person_name)}')
def extract(self):
video = cv2.VideoCapture(self.video_path)
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
print(f"Frame Count: {str(frame_count)}")
bar = progressbar.ProgressBar(maxval=frame_count,
widgets=[progressbar.Bar('⬛', '[', ']', '⬜'), ' ',
progressbar.Percentage()]).start()
index = 0
while video.isOpened():
ret, frame = video.read()
if not ret:
break
cv2.imwrite(
f"Images/Known/{self.person_name}/{os.path.basename(self.video_path).split('.')[0] + '_' + str(index)}.jpg", frame)
index += 1
bar.update(bar.currval + 1)
bar.finish()
video.release()
cv2.destroyAllWindows()
# Example
if __name__ == "__main__":
videos = os.listdir("Videos")
threads = [ExtractFrames(
f"Videos/{video}", "Olivia Rodrigo").extract() for video in videos]
for thread in threads:
thread.start()
|
python
|
from pysnooper import snoop
from tools import *
import datetime
import binascii
import time
import nfc
#入退室の際のデータベース操作関数
def IO(ID: str, STATUS: str) -> None:
conn=sql()
cursor = conn.cursor()
#入退室する前の人の数をチェック------------------------------------------------------
cursor.execute(f"select count(*) from student_tb where {STATUS}='IN'")
_num = cursor.fetchone()
num_before = _num['count(*)']
#そのIDに関して登録されている事をここで全て取得----------------------------------------
cursor.execute(f"select * from student_tb where ID='{str(ID)}'")
io = cursor.fetchone()
#その人の入退室状況を変更-----------------------------------------------------------
if str(io[STATUS]) == "OUT": #"OUT"だったら"IN"に
color, status_now = "good", "入室"
cursor.execute(f"update student_tb set {STATUS}='IN' where ID='{str(ID)}'")
conn.commit()
cursor.close()
conn.close()
#もしもう一方の部屋の入退室で退室処理をせずにこちらの部屋に来た時
ANOTHER_STATUS='STATUS_B' if STATUS=='STATUS_A' else 'STATUS_A'
#もう一方の部屋が"IN"の時それを"OUT"にするためにIO関数を再帰的に動かす
#再帰的と言ってもループではなく一回だけ
if str(io[ANOTHER_STATUS]) == "IN": #もしSTATUSBがまだINの状態であれば
IO(ID, ANOTHER_STATUS)
else: #"IN"だったら"OUT"に
color, status_now = "danger", "退室"
cursor.execute(f"update student_tb set {STATUS}='OUT' where ID='{str(ID)}'")
conn.commit()
cursor.close()
conn.close()
#上で再帰的に関数を呼び出す処理があるためconnは一回閉じなければいけない
conn=sql()
cursor = conn.cursor()
#そのIDに結び付けられているNICKNAMEを呼び出す-------------------------------------------
cursor.execute(f"select NICKNAME from student_tb where ID='{str(ID)}'")
nickname = cursor.fetchone()['NICKNAME']
#入退室した後の人の数-----------------------------------------------------------------
cursor.execute(f"select count(*) from student_tb where {STATUS}='IN'")
_num_after = cursor.fetchone()
num_after = _num_after['count(*)']
print(nickname)
cursor.close()
conn.close()
#======================================================================================
#もともと0人で、1人入ってきたらOPEN
if num_before == 0 and num_after == 1: message(None, STATUS, status_now, dics[status_now])
#現在の状態をお知らせ
message(color, STATUS, status_now, f"<{status_now}>: {nickname}\n現在 {num_after} 人です")
#0人になったらCLOSE
if num_after == 0: message(None, STATUS, status_now, dics[status_now])
#学生証から名前と学生証のIDを読み取る関数
def scan_UNIV(target_res: nfc, clf: nfc) -> str:
tag = nfc.tag.activate_tt3(clf, target_res)
service_code = [nfc.tag.tt3.ServiceCode(0x100B >> 6, 0x100B & 0x3f)]
bc_univ_id = [nfc.tag.tt3.BlockCode(0)]
bc_name = [nfc.tag.tt3.BlockCode(1)]
name = tag.read_without_encryption(service_code, bc_name).decode() #学生証から名前を引き出す
univ_id = tag.read_without_encryption(service_code, bc_univ_id).decode() #学生証から(学生証の)IDを抜き出す
return name, univ_id
#学生証のIDからIDを検索する関数
def connected_UNIV(univ_id: str) -> str:
ID=update_sql(f"select ID from student_tb where UNIV_ID='{univ_id}'")['ID']
return ID
#交通系ICカードからidmを読み取る関数
def scan_transport(target_res: nfc, clf: nfc) -> str:
tag = nfc.tag.activate_tt3(clf, target_res)
_idm = binascii.hexlify(tag.idm)
idm=_idm.decode() #idmを抜き出す
return idm
#交通系ICカードのidmからIDを読み取る関数
def connected_transport(idm: str) -> str:
try: return update_sql(f"select ID from student_tb where TRANSPORTATION_ID1='{idm}'")['ID']
except: pass
try: return update_sql(f"select ID from student_tb where TRANSPORTATION_ID2='{idm}'")['ID']
except: return
#そのIDが直近で検出されたかどうかを判別する関数
def process(ID:str, STATUS: str, latestID:str, latestTIME: datetime) -> str and datetime:
lag = datetime.datetime.now() - latestTIME
#IDが直近7秒以内に検出されたことのあるIDのとき
if ID==latestID and lag.total_seconds() < WAIT_TIME:
#次にスキャンできるまでの秒数を一応表示
print("Please wait "+str(int(WAIT_TIME-lag.total_seconds())+1)+" seconds")
time.sleep(0.5)
return latestID, latestTIME
else: #IDが3秒以内に検出されてものでなければ
IO(ID, STATUS) #入退室の動作を行う
return ID, datetime.datetime.now()
#学生証でニックネームを登録するための関数
def regist_UNIV(name: str, univ_id: str) -> None:
result="NULL"
try:
nickname=update_sql(f"select * from {DATA_TB}")['nickname']
#もしそのニックネームがデータベースに既に登録されていれば例外
assert update_sql(f"select count(*) from student_tb where NICKNAME='{nickname}'")['count(*)']==0
if update_sql(f"select count(*) from student_tb where UNIV_ID='{univ_id}'")['count(*)'] == 1:
#その学生証がすでにデータベースに登録されている時
#NICKNAMEを変更
update_sql(f"update student_tb set NICKNAME='{nickname}' where UNIV_ID='{univ_id}'")
result='success'
else:
#その学生証がまだデータベースに登録されていないとき
number=update_sql("select max(ID) from student_tb")['max(ID)']+1 #初めて登録する人にはデータベースのIDの最大値に1を足したIDを割り当てる
update_sql(f"insert into student_tb values('{number}', '{univ_id}', NULL, NULL, '{name}', '{nickname}', 'OUT', 'OUT')")
result='fir_suc'
except: result='failure'
finally:
update_sql(f"update {DATA_TB} set result='{result}'")
update_sql(f"update {DATA_TB} set flag='1'")
print(result)
#交通系ICカードでニックネームを登録するための関数
def regist_transportation(idm: str) -> None:
result="NULL"
#もしこれまでに登録がされたことのないsuicaであれば、入力されたnicknameからtransportation_idを登録する
#もしこれまでに登録されたことのあるsuicaであれば、入力されたnicknameに変更する
try:
nickname=update_sql(f"select * from {DATA_TB}")['nickname']
#そのニックネームの人が交通系ICカードを何枚登録しているかをカウント
count0=int(update_sql(f"select count(TRANSPORTATION_ID1) from student_tb where NICKNAME='{nickname}'")['count(TRANSPORTATION_ID1)'])+ \
int(update_sql(f"select count(TRANSPORTATION_ID2) from student_tb where NICKNAME='{nickname}'")['count(TRANSPORTATION_ID2)'])
#そのidmがデータベースに登録されているか否かをカウント
count1=update_sql(f"select count(*) from student_tb where TRANSPORTATION_ID1='{idm}'")['count(*)']
count2=update_sql(f"select count(*) from student_tb where TRANSPORTATION_ID2='{idm}'")['count(*)']
if count0==0 and count1==0 and count2==0:
#そのニックネームに交通系ICカードが登録されていない、且つ
#そのidmを持つ交通系ICがデータベースのどこにも登録されていない
#入力されたニックネームのところに交通系ICのidmを入れる
update_sql(f"update student_tb set TRANSPORTATION_ID1='{idm}' where NICKNAME='{nickname}'")
elif count0==1 and count1==0 and count2==0:
#そのニックネームに交通系ICカードが登録されている、且つ
#そのidmを持つ交通系ICがデータベースのどこにも登録されていない
#入力されたニックネームのところに交通系ICのidmを入れる
update_sql(f"update student_tb set TRANSPORTATION_ID2='{idm}' where NICKNAME='{nickname}'")
else: #そのidmと結び付けられているところのnicknameを入力されたものに変える
#もしそのニックネームがデータベースに既に登録されていれば例外
assert update_sql(f"select count(*) from student_tb where NICKNAME='{nickname}'")['count(*)'] == 0
try: update_sql(f"update student_tb set NICKNAME='{nickname}' where TRANSPORTATION_ID1='{idm}'")
except: pass
try: update_sql(f"update student_tb set NICKNAME='{nickname}' where TRANSPORTATION_ID2='{idm}'")
except: raise
result='success'
except: result='failure'
finally:
update_sql(f"update {DATA_TB} set result='{result}'")
update_sql(f"update {DATA_TB} set flag='1'")
print(result)
#@snoop()
def Read(clf: nfc, STATUS: str) -> None:
latestID = "0"
latestTIME = datetime.datetime.now()
while True:
#学生証の読み取り
target_req = nfc.clf.RemoteTarget("212F")
target_res = clf.sense(target_req, iterations=1, interval=0.01)
#読み取りを交通系ICカード専用モードに設定。これによりiPhoneのSuicaやPasmoを呼び出せる
target_req.sensf_req = bytearray.fromhex("0000030000")
if not target_res is None: #もし学生証が読み込めていたら
try:
name, univ_id=scan_UNIV(target_res, clf)
#入退室管理モードの時
if update_sql(f'select * from {DATA_TB}')['flag']=="1":
ID=connected_UNIV(univ_id) #電通研の各個人に割り振られているIDを学生証のIDから抽出
latestID, latestTIME=process(ID, STATUS, latestID, latestTIME)
else: #登録モードの時
regist_UNIV(name, univ_id) #学生証のIDと名前をデータベースに登録 or ニックネームの変更
time.sleep(2.0)
#except Exception as e: print(e)
except: pass
else: #もし交通系ICカードが読み込めていたら or どちらも読み込めていなかったら
target_res = clf.sense(target_req, iterations=30, interval=0.01)
try:
#交通系ICカードの読み取り。もしここで読み込めなかったら、またループの最初に戻る
idm=scan_transport(target_res, clf)
#入退室管理モードの時
if update_sql(f'select * from {DATA_TB}')['flag']=="1":
ID=connected_transport(idm) #電通研の各個人に割り振られているIDを交通系ICカードのidmから抽出
latestID, latestTIME=process(ID, STATUS, latestID, latestTIME)
else: #登録モードの時
regist_transportation(idm) #交通系ICのidmをデータベースに登録 or ニックネームの変更
time.sleep(2.0)
# except Exception as e: print(e)
except: pass
if __name__ == "__main__":
#カード読み取りシステムの実行=============
print('===== I\'M READY =====')
with nfc.ContactlessFrontend(usb) as clf:
Read(clf, STATUS)
|
python
|
import yaml
import os
import time
import re
from my_devices import nxos1, nxos2
from netmiko import ConnectHandler
from ciscoconfparse import CiscoConfParse
from jinja2 import FileSystemLoader, StrictUndefined, Template
from jinja2.environment import Environment
env = Environment(undefined=StrictUndefined)
#env.loader = FileSystemLoader("C://Users//John Celani//Documents//Scripts//Python Class//Week5")
env.loader = FileSystemLoader(".")
sw1_vars = {
"hostname" : "!_nxos1",
"int" : "Ethernet1/1",
"ip_add" : "10.1.100.1",
"ip_subnet" : "24",
"local_as" : 22,
"remote_ip" : "10.1.100.2",
"remote_as" : 22,
}
sw2_vars = {
"hostname" : "!_nxos2",
"int" : "Ethernet1/1",
"ip_add" : "10.1.100.2",
"ip_subnet" : "24",
"local_as" : 22,
"remote_ip" : "10.1.100.1",
"remote_as" : 22,
}
template_file = 'exercise_2_2.j2'
nxos1["j2_vars"] = sw1_vars
nxos2["j2_vars"] = sw2_vars
for device in (nxos1, nxos2):
temp_dict = device.copy()
j2_vars_temp = temp_dict.pop("j2_vars")
template = env.get_template(template_file)
temp_config = template.render(**j2_vars_temp)
configs = [temp_config.strip() for temp_config in temp_config.splitlines()]
netconnect = ConnectHandler(**temp_dict)
device["net_conn"] = netconnect
print(f"Sending Configurations to {netconnect.find_prompt()}")
output = netconnect.send_config_set(configs)
print("Completed")
print()
print("Waiting 15s for BGP to Converge")
print()
time.sleep(15)
print("Testing BGP and Connectivity")
print()
for device in (nxos1, nxos2):
remote_ip = device["j2_vars"]["remote_ip"]
netconnect = device["net_conn"]
local_ip = device["host"]
print(f"Checking BGP Connectivity on {local_ip} to {remote_ip}")
bgpoutput = netconnect.send_command(f"show ip bgp summary | include {remote_ip}")
match = re.search(r"\s+(\S+)\s*$", bgpoutput)
prefix_received = match.group(1)
try:
int(prefix_received)
print( f"{local_ip} BGP Reached Established state with {remote_ip}")
except ValueError:
print(f"{local_ip} BGP failed to reach established state with {remote_ip}")
print()
print(f"Testing connectivity from {local_ip} to {remote_ip}")
pingoutput = netconnect.send_command(f"ping {remote_ip}", delay_factor=5)
if "64 bytes from" not in pingoutput:
print(f"Failed ping test to {remote_ip}")
else:
print(f"Conenctivity between {local_ip} to {remote_ip} succesful")
print()
for device in (nxos1, nxos2):
netconnect = device["net_conn"]
netconnect.disconnect()
|
python
|
# -*- coding: utf-8 -*-
import ipaddress
from dnsdb_common.library.exception import BadParam
from dnsdb_common.library.utils import format_ip
from . import commit_on_success
from . import db
from .models import DnsColo
from .models import DnsRecord
from .models import IpPool
from .models import Subnets
class SubnetIpDal(object):
@staticmethod
def get_colo_by_group(group):
return [record.colo_name
for record in
db.session.query(DnsColo.colo_name).filter_by(colo_group=group).order_by(DnsColo.colo_name)]
@staticmethod
def list_region(**condition):
q = Subnets.query
if condition:
q = q.filter_by(**condition)
return [item.json_serialize() for item in q.order_by(Subnets.region_name, Subnets.subnet)]
@staticmethod
def get_region_by_ip(ip):
ip, _ = format_ip(ip)
record = IpPool.query.filter_by(fixed_ip=ip).first()
if not record:
raise BadParam('no such ip: %s' % ip, msg_ch=u'没有对应的ip记录')
return SubnetIpDal.get_region_by_name(record.region)
@staticmethod
def get_region_by_name(region):
record = Subnets.query.filter_by(region_name=region).first()
if not record:
raise BadParam('no such subnet with region_name: %s' % region, msg_ch=u'没有对应的网段记录')
return record.json_serialize()
@staticmethod
def get_region_by_name_like(region):
region = '%{}%'.format(region)
records = Subnets.query.filter(Subnets.region_name.like(region))
return [record.json_serialize() for record in records]
@staticmethod
def is_intranet_region(region):
record = Subnets.query.filter_by(region_name=region).first()
if not record:
raise BadParam('no such subnet with region_name: %s' % region, msg_ch=u'没有对应的网段记录')
return record.intranet
@staticmethod
def is_ip_exist(record):
return IpPool.query.filter_by(fixed_ip=record).first() is not None
@staticmethod
def get_subnet_ip(region):
records = IpPool.query.outerjoin(DnsRecord, DnsRecord.record == IpPool.fixed_ip).add_columns(
IpPool.fixed_ip, IpPool.allocated,
DnsRecord.domain_name).filter(IpPool.region == region).order_by(IpPool.fixed_ip)
result = [{"ip": item.fixed_ip, "domain": item.domain_name} for item in records]
return result
@staticmethod
def add_subnet(subnet, region, colo, comment, username):
subnet = ipaddress.ip_network(subnet)
intranet = subnet.is_private
net_id = subnet.network_address
broadcast_ip = subnet.broadcast_address
is_ipv6 = (subnet.version == 6)
ips_dict_list = []
for i in subnet:
if i == net_id or i == broadcast_ip:
continue
ips_dict_list.append({
'region': region,
'fixed_ip': str(i),
'is_ipv6': is_ipv6
})
if Subnets.query.filter_by(region_name=region).first():
raise BadParam('region already exist', msg_ch='网段名已存在')
try:
with db.session.begin(subtransactions=True):
subnet_item = Subnets(
region_name=region,
subnet=str(subnet),
create_user=username,
intranet=intranet,
colo=colo,
is_ipv6=is_ipv6
)
if comment:
subnet_item.comment = comment
db.session.add(subnet_item)
db.session.bulk_insert_mappings(IpPool, ips_dict_list)
except Exception:
raise BadParam('Ip conflict with other regions', msg_ch=u'和已有的网段有交叉,请检查后重试')
@staticmethod
@commit_on_success
def delete_subnet(subnet, region):
record = Subnets.query.filter_by(region_name=region, subnet=subnet).first()
if not record:
raise BadParam('Region does not exist: %s' % region, msg_ch=u'网段不存在')
# 删除一个region
ip_records = SubnetIpDal.get_subnet_ip(region)
if list(filter(lambda x: x['domain'], ip_records)):
raise BadParam('Region %s has records,delete failed!' % region, msg_ch=u'网段正在使用中,不允许删除')
Subnets.query.filter_by(region_name=region, subnet=subnet).delete()
IpPool.query.filter_by(region=region).delete()
@staticmethod
@commit_on_success
def rename_subnet(old_region, new_region, username):
if Subnets.query.filter_by(region_name=new_region).first():
raise BadParam("Region %s existed, rename %s failed" % (new_region, old_region),
msg_ch=u'%s已经存在' % new_region)
if not Subnets.query.filter_by(region_name=old_region).first():
raise BadParam("Region %s does not existed, rename failed" % old_region,
msg_ch=u'%s不存在' % old_region)
Subnets.query.filter(Subnets.region_name == old_region).update({
"region_name": new_region
})
IpPool.query.filter(IpPool.region == old_region).update({
'region': new_region
})
@staticmethod
def get_subnets_by_condition(**kwargs):
session = db.session
query = session.query(Subnets)
if kwargs:
query = query.filter_by(**kwargs)
return query.order_by(Subnets.region_name, Subnets.subnet).all()
@staticmethod
def bulk_update_subnet(update_mapping):
session = db.session
with session.begin(subtransactions=True):
session.bulk_update_mappings(Subnets, update_mapping)
|
python
|
# activity/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.conf.urls import url
from . import views_admin
urlpatterns = [
# url(r'^$', views_admin.batches_home_view, name='batches_home',),
# url(r'^batch_action_list/$', views_admin.batch_action_list_view, name='batch_action_list'),
# url(r'^batch_list/$', views_admin.batch_list_view, name='batch_list'),
# url(r'^batch_list_process/$', views_admin.batch_list_process_view, name='batch_list_process'),
]
|
python
|
# -*- coding: utf-8 -*-
"""
.. module:: openzwave.network
This file is part of **python-openzwave** project https://github.com/OpenZWave/python-openzwave.
:platform: Unix, Windows, MacOS X
:sinopsis: openzwave API
.. moduleauthor: bibi21000 aka Sébastien GALLET <[email protected]>
License : GPL(v3)
**python-openzwave** is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
**python-openzwave** is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with python-openzwave. If not, see http://www.gnu.org/licenses.
"""
import os
#from collections import namedtuple
import time
import sys
import six
if six.PY3:
from pydispatch import dispatcher
else:
from louie import dispatcher
import threading
import libopenzwave
import openzwave
from openzwave.object import ZWaveException, ZWaveTypeException, ZWaveObject
from openzwave.controller import ZWaveController
from openzwave.node import ZWaveNode
from openzwave.option import ZWaveOption
from openzwave.scene import ZWaveScene
from openzwave.singleton import Singleton
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
"""NullHandler logger for python 2.6"""
def emit(self, record):
pass
logger = logging.getLogger('openzwave')
logger.addHandler(NullHandler())
try:
import sqlite3 as lite
except ImportError:
logger.warning('pysqlite is not installed')
class ZWaveNetwork(ZWaveObject):
"""
The network object = homeid.
It contains a reference to the manager and the controller.
It dispatches the following louie signals :
* SIGNAL_NETWORK_FAILED = 'NetworkFailed'
* SIGNAL_NETWORK_STARTED = 'NetworkStarted'
* SIGNAL_NETWORK_READY = 'NetworkReady'
* SIGNAL_NETWORK_STOPPED = 'NetworkStopped'
* SIGNAL_NETWORK_RESETTED = 'DriverResetted'
* SIGNAL_NETWORK_AWAKED = 'DriverAwaked'
* SIGNAL_DRIVER_FAILED = 'DriverFailed'
* SIGNAL_DRIVER_READY = 'DriverReady'
* SIGNAL_DRIVER_RESET = 'DriverReset'
* SIGNAL_DRIVER_REMOVED = 'DriverRemoved'
* SIGNAL_NODE_ADDED = 'NodeAdded'
* SIGNAL_NODE_EVENT = 'NodeEvent'
* SIGNAL_NODE_NAMING = 'NodeNaming'
* SIGNAL_NODE_NEW = 'NodeNew'
* SIGNAL_NODE_PROTOCOL_INFO = 'NodeProtocolInfo'
* SIGNAL_NODE_READY = 'NodeReady'
* SIGNAL_NODE_REMOVED = 'NodeRemoved'
* SIGNAL_SCENE_EVENT = 'SceneEvent'
* SIGNAL_VALUE_ADDED = 'ValueAdded'
* SIGNAL_VALUE_CHANGED = 'ValueChanged'
* SIGNAL_VALUE_REFRESHED = 'ValueRefreshed'
* SIGNAL_VALUE_REMOVED = 'ValueRemoved'
* SIGNAL_POLLING_ENABLED = 'PollingEnabled'
* SIGNAL_POLLING_DISABLED = 'PollingDisabled'
* SIGNAL_CREATE_BUTTON = 'CreateButton'
* SIGNAL_DELETE_BUTTON = 'DeleteButton'
* SIGNAL_BUTTON_ON = 'ButtonOn'
* SIGNAL_BUTTON_OFF = 'ButtonOff'
* SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE = 'EssentialNodeQueriesComplete'
* SIGNAL_NODE_QUERIES_COMPLETE = 'NodeQueriesComplete'
* SIGNAL_AWAKE_NODES_QUERIED = 'AwakeNodesQueried'
* SIGNAL_ALL_NODES_QUERIED = 'AllNodesQueried'
* SIGNAL_ALL_NODES_QUERIED_SOME_DEAD = 'AllNodesQueriedSomeDead'
* SIGNAL_MSG_COMPLETE = 'MsgComplete'
* SIGNAL_ERROR = 'Error'
* SIGNAL_NOTIFICATION = 'Notification'
* SIGNAL_CONTROLLER_COMMAND = 'ControllerCommand'
* SIGNAL_CONTROLLER_WAITING = 'ControllerWaiting'
The table presented below sets notifications in the order they might typically be received,
and grouped into a few logically related categories. Of course, given the variety
of ZWave controllers, devices and network configurations the actual sequence will vary (somewhat).
The descriptions below the notification name (in square brackets) identify whether the
notification is always sent (unless there’s a significant error in the network or software)
or potentially sent during the execution sequence.
Driver Initialization Notification
The notification below is sent when OpenZWave has successfully connected
to a physical ZWave controller.
* DriverReady
[always sent] Sent when the driver (representing a connection between OpenZWave
and a Z-Wave controller attached to the specified serial (or HID) port) has been initialized.
At the time this notification is sent, only certain information about the controller itself is known:
* Controller Z-Wave version
* Network HomeID
* Controller capabilities
* Controller Application Version & Manufacturer/Product ID
* Nodes included in the network
* DriverRemoved
[always sent (either due to Error or by request)] The Driver is being removed.
Do Not Call Any Driver Related Methods after receiving this
Node Initialization Notifications
As OpenZWave starts, it identifies and reads information about each node in the network.
The following notifications may be sent during the initialization process.
* NodeNew
[potentially sent] Sent when a new node has been identified as part of the Z-Wave network.
It is not sent if the node was identified in a prior execution of the OpenZWave library
and stored in the zwcfg*.xml file.
At the time this notification is sent, very little is known about the node itself...
only that it is new to OpenZWave. This message is sent once for each new node identified.
* NodeAdded
[always sent (for each node associated with the controller)]
Sent when a node has been added to OpenZWave’s set of nodes. It can be
triggered either as the zwcfg*.xml file is being read, when a new node
is found on startup (see NodeNew notification above), or if a new node
is included in the network while OpenZWave is running.
As with NodeNew, very little is known about the node at the time the
notification is sent…just the fact that a new node has been identified
and its assigned NodeID.
* NodeProtocolInfo
[potentially sent] Sent after a node’s protocol information has been
successfully read from the controller.
At the time this notification is sent, only certain information about the node is known:
* Whether it is a “listening” or “sleeping” device
* Whether the node is capable of routing messages
* Maximum baud rate for communication
* Version number
* Security byte
NodeNaming
[potentially sent] Sent when a node’s name has been set or changed
(although it may be “set” to “” or NULL).
* ValueAdded
[potentially sent] Sent when a new value has been associated with the node.
At the time this notification is sent, the new value may or may not
have “live” data associated with it. It may be populated, but it may
alternatively just be a placeholder for a value that has not been read
at the time the notification is sent.
* NodeQueriesComplete
[always sent (for each node associated with the controller that has been successfully queried)] Sent when a node’s values and attributes have been fully queried. At the time this notification is sent, the node’s information has been fully read at least once. So this notification might trigger “full” display of the node’s information, values, etc. If this notification is not sent, it indicates that there has been a problem initializing the device. The most common issue is that the node is a “sleeping” device. The NodeQueriesComplete notification will be sent when the node wakes up and the query process completes.
Initialization Complete Notifications
As indicated above, when OpenZWave starts it reads certain information
from a file, from the controller and from the network. The following
notifications identify when this initialization/querying process is complete.
* AwakeNodesQueried
[always sent] Sent when all “listening” -always-on-devices have been
queried successfully. It also indicates, by implication, that there
are some “sleeping” nodes that will not complete their queries until
they wake up. This notification should be sent relatively quickly
after start-up. (Of course, it depends on the number of devices on
the ZWave network and whether there are any messages that “time out”
without a proper response.)
* AllNodesQueried
[potentially sent] Sent when all nodes have been successfully queried.
This notification should be sent relatively quickly if there are
no “sleeping” nodes. But it might be sent quite a while after start-up
if there are sleeping nodes and at least one of these nodes has a long “wake-up” interval.
Other Notifications
In addition to the notifications described above, which are primarily
“initialization” notifications that are sent during program start-up,
the following notifications may be sent as a result of user actions,
external program control, etc.
* ValueChanged : Sent when a value associated with a node has changed. Receipt of this notification indicates that it may be a good time to read the new value and display or otherwise process it accordingly.
* ValueRemoved : Sent when a value associated with a node has been removed.
* Group : Sent when a node’s group association has changed.
* NodeRemoved : Sent when a node has been removed from the ZWave network.
* NodeEvent : Sent when a node sends a Basic_Set command to the controller. This notification can be generated by certain sensors, for example, motion detectors, to indicate that an event has been sensed.
* PollingEnabled : Sent when node/value polling has been enabled.
* PollingDisabled : Sent when node/value polling has been disabled.
* DriverReset : Sent to indicate when a controller has been reset. This notification is intended to replace the potentially hundreds of notifications representing each value and node removed from the network.
About the use of louie signals :
For network, python-openzwave send the following louie signal :
SIGNAL_NETWORK_FAILED : the driver has failed to start.
SIGNAL_NETWORK_STARTED : the driver is ready, but network is not available.
SIGNAL_NETWORK_AWAKED : all awake nodes are queried. Some sleeping nodes may be missing.
SIGNAL_NETWORK_READY : all nodes are queried. Network is fully functionnal.
SIGNAL_NETWORK_RESETTED : the network has been resetted. It will start again.
SIGNAL_NETWORK_STOPPED : the network has been stopped.
Deprecated : SIGNAL_DRIVER_* shouldn't be used anymore.
"""
SIGNAL_NETWORK_FAILED = 'NetworkFailed'
SIGNAL_NETWORK_STARTED = 'NetworkStarted'
SIGNAL_NETWORK_READY = 'NetworkReady'
SIGNAL_NETWORK_STOPPED = 'NetworkStopped'
SIGNAL_NETWORK_RESETTED = 'DriverResetted'
SIGNAL_NETWORK_AWAKED = 'DriverAwaked'
SIGNAL_DRIVER_FAILED = 'DriverFailed'
SIGNAL_DRIVER_READY = 'DriverReady'
SIGNAL_DRIVER_RESET = 'DriverReset'
SIGNAL_DRIVER_REMOVED = 'DriverRemoved'
SIGNAL_GROUP = 'Group'
SIGNAL_NODE = 'Node'
SIGNAL_NODE_ADDED = 'NodeAdded'
SIGNAL_NODE_EVENT = 'NodeEvent'
SIGNAL_NODE_NAMING = 'NodeNaming'
SIGNAL_NODE_NEW = 'NodeNew'
SIGNAL_NODE_PROTOCOL_INFO = 'NodeProtocolInfo'
SIGNAL_NODE_READY = 'NodeReady'
SIGNAL_NODE_REMOVED = 'NodeRemoved'
SIGNAL_SCENE_EVENT = 'SceneEvent'
SIGNAL_VALUE = 'Value'
SIGNAL_VALUE_ADDED = 'ValueAdded'
SIGNAL_VALUE_CHANGED = 'ValueChanged'
SIGNAL_VALUE_REFRESHED = 'ValueRefreshed'
SIGNAL_VALUE_REMOVED = 'ValueRemoved'
SIGNAL_POLLING_ENABLED = 'PollingEnabled'
SIGNAL_POLLING_DISABLED = 'PollingDisabled'
SIGNAL_CREATE_BUTTON = 'CreateButton'
SIGNAL_DELETE_BUTTON = 'DeleteButton'
SIGNAL_BUTTON_ON = 'ButtonOn'
SIGNAL_BUTTON_OFF = 'ButtonOff'
SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE = 'EssentialNodeQueriesComplete'
SIGNAL_NODE_QUERIES_COMPLETE = 'NodeQueriesComplete'
SIGNAL_AWAKE_NODES_QUERIED = 'AwakeNodesQueried'
SIGNAL_ALL_NODES_QUERIED = 'AllNodesQueried'
SIGNAL_ALL_NODES_QUERIED_SOME_DEAD = 'AllNodesQueriedSomeDead'
SIGNAL_MSG_COMPLETE = 'MsgComplete'
SIGNAL_NOTIFICATION = 'Notification'
SIGNAL_CONTROLLER_COMMAND = 'ControllerCommand'
SIGNAL_CONTROLLER_WAITING = 'ControllerWaiting'
STATE_STOPPED = 0
STATE_FAILED = 1
STATE_RESETTED = 3
STATE_STARTED = 5
STATE_AWAKED = 7
STATE_READY = 10
ignoreSubsequent = True
def __init__(self, options, log=None, autostart=True, kvals=True):
"""
Initialize zwave network
:param options: Options to use with manager
:type options: ZWaveOption
:param log: A log file (not used. Deprecated
:type log:
:param autostart: should we start the network.
:type autostart: bool
:param kvals: Enable kvals (use pysqlite)
:type kvals: bool
"""
logger.debug("Create network object.")
self.log = log
self._options = options
ZWaveObject.__init__(self, None, self)
self._controller = ZWaveController(1, self, options)
self._manager = libopenzwave.PyManager()
self._manager.create()
self._state = self.STATE_STOPPED
self.nodes = None
self._semaphore_nodes = threading.Semaphore()
self._id_separator = '.'
self.network_event = threading.Event()
self.dbcon = None
if kvals == True:
try:
self.dbcon = lite.connect(os.path.join(self._options.user_path, 'pyozw.sqlite'), check_same_thread=False)
cur = self.dbcon.cursor()
version = cur.execute('SELECT SQLITE_VERSION()').fetchone()
logger.debug("Use sqlite version : %s", version)
self._check_db_tables()
except lite.Error as e:
logger.warning("Can't connect to sqlite database : kvals are disabled - %s", e.args[0])
self._started = False
if autostart:
self.start()
def __str__(self):
"""
The string representation of the node.
:rtype: str
"""
return u'home_id: [%s] controller: [%s]' % \
(self.home_id_str, self.controller)
def _check_db_tables(self):
"""
Check that the tables for "classes" are in database.
:returns: True if operation succeed. False oterwise
:rtype: boolean
"""
if self.dbcon is None:
return False
cur = self.dbcon.cursor()
for mycls in ['ZWaveOption', 'ZWaveOptionSingleton', 'ZWaveNetwork', 'ZWaveNetworkSingleton', 'ZWaveNode', 'ZWaveController', 'ZWaveValue']:
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?", (mycls,))
data = cur.fetchone()
if data is None:
cur.execute("CREATE TABLE %s(object_id INT, key TEXT, value TEXT)" % mycls)
return True
def start(self):
"""
Start the network object :
- add a watcher
- add a driver
"""
if self._started == True:
return
logger.info(u"Start Openzwave network.")
self._manager.addWatcher(self.zwcallback)
self._manager.addDriver(self._options.device)
self._started = True
def stop(self, fire=True):
"""
Stop the network object.
- remove the watcher
- remove the driver
- clear the nodes
.. code-block:: python
dispatcher.send(self.SIGNAL_NETWORK_STOPPED, **{'network': self})
"""
if self._started == False:
return
logger.info(u"Stop Openzwave network.")
if self.controller is not None:
self.controller.stop()
self.write_config()
try:
self._semaphore_nodes.acquire()
self._manager.removeWatcher(self.zwcallback)
try:
self.network_event.wait(1.0)
except AssertionError:
#For gevent AssertionError: Impossible to call blocking function in the event loop callback
pass
self._manager.removeDriver(self._options.device)
try:
self.network_event.wait(1.0)
except AssertionError:
#For gevent AssertionError: Impossible to call blocking function in the event loop callback
pass
for i in range(0, 60):
if self.controller.send_queue_count <= 0:
break
else:
try:
self.network_event.wait(1.0)
except AssertionError:
#For gevent AssertionError: Impossible to call blocking function in the event loop callback
pass
self.nodes = None
except:
import sys, traceback
logger.exception(u'Stop network : %s')
finally:
self._semaphore_nodes.release()
self._started = False
self._state = self.STATE_STOPPED
try:
self.network_event.wait(1.0)
except AssertionError:
#For gevent AssertionError: Impossible to call blocking function in the event loop callback
pass
if fire:
dispatcher.send(self.SIGNAL_NETWORK_STOPPED, **{'network': self})
def destroy(self):
"""
Destroy the netwok and all related stuff.
"""
if self.dbcon is not None:
self.dbcon.commit()
self.dbcon.close()
self._manager.destroy()
self._options.destroy()
self._manager = None
self._options = None
@property
def home_id(self):
"""
The home_id of the network.
:rtype: int
"""
if self._object_id is None:
return 0
return self._object_id
@home_id.setter
def home_id(self, value):
"""
The home_id of the network.
:param value: new home_id
:type value: int
"""
self._object_id = value
@property
def home_id_str(self):
"""
The home_id of the network as string.
:rtype: str
"""
return "0x%0.8x" % self.home_id
@property
def is_ready(self):
"""
Says if the network is ready for operations.
:rtype: bool
"""
return self._state >= self.STATE_READY
@property
def state(self):
"""
The state of the network. Values may be changed in the future,
only order is important.
You can safely ask node information when state >= STATE_READY
* STATE_STOPPED = 0
* STATE_FAILED = 1
* STATE_RESETTED = 3
* STATE_STARTED = 5
* STATE_AWAKED = 7
* STATE_READY = 10
:rtype: int
"""
return self._state
@state.setter
def state(self, value):
"""
The state of the network. Values may be changed in the future,
only order is important.
* STATE_STOPPED = 0
* STATE_FAILED = 1
* STATE_RESETTED = 3
* STATE_STARTED = 5
* STATE_AWAKED = 7
* STATE_READY = 10
:param value: new state
:type value: int
"""
self._state = value
@property
def state_str(self):
"""
The state of the network. Values may be changed in the future,
only order is important.
You can safely ask node informations when state >= STATE_AWAKED
:rtype: int
"""
if self._state == self.STATE_STOPPED:
return "Network is stopped"
elif self._state == self.STATE_FAILED:
return "Driver failed"
elif self._state == self.STATE_STARTED:
return "Driver initialised"
elif self._state == self.STATE_RESETTED:
return "Driver is reset"
elif self._state == self.STATE_AWAKED:
return "Topology loaded"
elif self._state == self.STATE_READY:
return "Network ready"
else:
return "Unknown state"
@property
def manager(self):
"""
The manager to use to communicate with the lib c++.
:rtype: ZWaveManager
"""
if self._manager is not None:
return self._manager
else:
raise ZWaveException(u"Manager not initialised")
@property
def controller(self):
"""
The controller of the network.
:return: The controller of the network
:rtype: ZWaveController
"""
if self._controller is not None:
return self._controller
else:
raise ZWaveException(u"Controller not initialised")
@property
def nodes(self):
"""
The nodes of the network.
:rtype: dict()
"""
return self._nodes
def nodes_to_dict(self, extras=['all']):
"""
Return a dict representation of the network.
:param extras: The extra inforamtions to add
:type extras: []
:returns: A dict
:rtype: dict()
"""
ret = {}
for ndid in self._nodes.keys():
ret[ndid]=self._nodes[ndid].to_dict(extras=extras)
return ret
def to_dict(self, extras=['kvals']):
"""
Return a dict representation of the network.
:param extras: The extra inforamtions to add
:type extras: []
:returns: A dict
:rtype: dict()
"""
ret = {}
ret['state'] = self.state,
ret['state_str'] = self.state_str,
ret['home_id'] = self.home_id_str,
ret['nodes_count'] = self.nodes_count,
if 'kvals' in extras and self.network.dbcon is not None:
vals = self.kvals
for key in vals.keys():
ret[key]=vals[key]
return ret
@nodes.setter
def nodes(self, value):
"""
The nodes of the network.
:param value: The new value
:type value: dict() or None
"""
if type(value) == type(dict()):
self._nodes = value
else:
self._nodes = dict()
def switch_all(self, state):
"""
Method for switching all devices on or off together. The devices must support
the SwitchAll command class. The command is first broadcast to all nodes, and
then followed up with individual commands to each node (because broadcasts are
not routed, the message might not otherwise reach all the nodes).
:param state: True to turn on the switches, False to turn them off
:type state: bool
"""
if state:
self.manager.switchAllOn(self.home_id)
else:
self.manager.switchAllOff(self.home_id)
def test(self, count=1):
"""
Send a number of test messages to every node and record results.
:param count: The number of test messages to send.
:type count: int
"""
self.manager.testNetwork(self.home_id, count)
def heal(self, upNodeRoute=False):
"""
Heal network by requesting nodes rediscover their neighbors.
Sends a ControllerCommand_RequestNodeNeighborUpdate to every node.
Can take a while on larger networks.
:param upNodeRoute: Optional Whether to perform return routes initialization. (default = false).
:type upNodeRoute: bool
:return: True is the ControllerCommand ins sent. False otherwise
:rtype: bool
"""
if self.network.state < self.network.STATE_AWAKED:
logger.warning(u'Network must be awake')
return False
self.manager.healNetwork(self.home_id, upNodeRoute)
return True
def get_value(self, value_id):
"""
Retrieve a value on the network.
Check every nodes to see if it holds the value
:param value_id: The id of the value to find
:type value_id: int
:return: The value or None
:rtype: ZWaveValue
"""
for node in self.nodes:
if value_id in self.nodes[node].values:
return self.nodes[node].values[value_id]
return None
@property
def id_separator(self):
"""
The separator in id representation.
:rtype: char
"""
return self._id_separator
@id_separator.setter
def id_separator(self, value):
"""
The nodes of the network.
:param value: The new separator
:type value: char
"""
self._id_separator = value
def get_value_from_id_on_network(self, id_on_network):
"""
Retrieve a value on the network from it's id_on_network.
Check every nodes to see if it holds the value
:param id_on_network: The id_on_network of the value to find
:type id_on_network: str
:return: The value or None
:rtype: ZWaveValue
"""
for node in self.nodes.itervalues():
for val in node.values.itervalues():
if val.id_on_network == id_on_network:
return val
return None
def get_scenes(self):
"""
The scenes of the network.
Scenes are generated directly from the lib. There is no notification
support to keep them up to date. So for a batch job, consider
storing them in a local variable.
:return: return a dict() (that can be empty) of scene object. Return None if betwork is not ready
:rtype: dict() or None
"""
if self.state < self.STATE_AWAKED:
return None
else:
return self._load_scenes()
def scenes_to_dict(self, extras=['all']):
"""
Return a JSONifiable dict representation of the scenes.
:param extras: The extra inforamtions to add
:type extras: []
:returns: A dict
:rtype: dict()
"""
ret={}
scenes = self.get_scenes()
for scnid in scenes.keys():
ret[scnid] = scenes[scnid].to_dict(extras=extras)
return ret
def _load_scenes(self):
"""
Load the scenes of the network.
:return: return a dict() (that can be empty) of scene object.
:rtype: dict()
"""
ret = {}
set_scenes = self._manager.getAllScenes()
logger.debug(u'Load Scenes: %s', set_scenes)
for scene_id in set_scenes:
scene = ZWaveScene(scene_id, network=self)
ret[scene_id] = scene
return ret
def create_scene(self, label=None):
"""
Create a new scene on the network.
If label is set, also change the label of the scene
If you store your scenes on a local variable, get a new one
to get the scene id
:param label: The new label
:type label: str or None
:return: return the id of scene on the network. Return 0 if fails
:rtype: int
"""
scene = ZWaveScene(None, network=self)
return scene.create(label)
def scene_exists(self, scene_id):
"""
Check that the scene exists
:param scene_id: The id of the scene to check
:type scene_id: int
:return: True if the scene exist. False in other cases
:rtype: bool
"""
return self._network.manager.sceneExists(scene_id)
@property
def scenes_count(self):
"""
Return the number of scenes
:return: The number of scenes
:rtype: int
"""
return self._network.manager.getNumScenes()
def remove_scene(self, scene_id):
"""
Delete the scene on the network.
:param scene_id: The id of the scene to check
:type scene_id: int
:return: True if the scene was removed. False in other cases
:rtype: bool
"""
return self._network.manager.removeScene(scene_id)
@property
def nodes_count(self):
"""
The nodes count of the network.
:rtype: int
"""
return len(self.nodes)
@property
def sleeping_nodes_count(self):
"""
The count of sleeping nodes on the network.
:rtype: int
"""
result = 0
for node in self.nodes:
if node.is_sleeping:
result += 1
return result
def get_poll_interval(self):
"""
Get the time period between polls of a nodes state
:return: The number of milliseconds between polls
:rtype: int
"""
return self.manager.getPollInterval()
def set_poll_interval(self, milliseconds=500, bIntervalBetweenPolls=True):
"""
Set the time period between polls of a nodes state.
Due to patent concerns, some devices do not report state changes automatically
to the controller. These devices need to have their state polled at regular
intervals. The length of the interval is the same for all devices. To even
out the Z-Wave network traffic generated by polling, OpenZWave divides the
polling interval by the number of devices that have polling enabled, and polls
each in turn. It is recommended that if possible, the interval should not be
set shorter than the number of polled devices in seconds (so that the network
does not have to cope with more than one poll per second).
:param milliseconds: The length of the polling interval in milliseconds.
:type milliseconds: int
:param bIntervalBetweenPolls: If set to true (via SetPollInterval), the pollInterval will be interspersed between each poll (so a much smaller m_pollInterval like 100, 500, or 1,000 may be appropriate). If false, the library attempts to complete all polls within m_pollInterval.
:type bIntervalBetweenPolls: bool
"""
self.manager.setPollInterval(milliseconds, bIntervalBetweenPolls)
def zwcallback(self, args):
"""
The Callback Handler used with the libopenzwave.
n['valueId'] = {
* 'home_id' : v.GetHomeId(),
* 'node_id' : v.GetNodeId(),
* 'commandClass' : PyManager.COMMAND_CLASS_DESC[v.GetCommandClassId()],
* 'instance' : v.GetInstance(),
* 'index' : v.GetIndex(),
* 'id' : v.GetId(),
* 'genre' : PyGenres[v.GetGenre()],
* 'type' : PyValueTypes[v.GetType()],
* #'value' : value.c_str(),
* 'value' : getValueFromType(manager,v.GetId()),
* 'label' : label.c_str(),
* 'units' : units.c_str(),
* 'readOnly': manager.IsValueReadOnly(v)
}
:param args: A dict containing informations about the state of the controller
:type args: dict()
"""
logger.debug('zwcallback args=[%s]', args)
try:
notify_type = args['notificationType']
if notify_type == self.SIGNAL_DRIVER_FAILED:
self._handle_driver_failed(args)
elif notify_type == self.SIGNAL_DRIVER_READY:
self._handle_driver_ready(args)
elif notify_type == self.SIGNAL_DRIVER_RESET:
self._handle_driver_reset(args)
elif notify_type == self.SIGNAL_NODE_ADDED:
self._handle_node_added(args)
elif notify_type == self.SIGNAL_NODE_EVENT:
self._handle_node_event(args)
elif notify_type == self.SIGNAL_NODE_NAMING:
self._handle_node_naming(args)
elif notify_type == self.SIGNAL_NODE_NEW:
self._handle_node_new(args)
elif notify_type == self.SIGNAL_NODE_PROTOCOL_INFO:
self._handle_node_protocol_info(args)
elif notify_type == self.SIGNAL_NODE_READY:
self._handleNodeReady(args)
elif notify_type == self.SIGNAL_NODE_REMOVED:
self._handle_node_removed(args)
elif notify_type == self.SIGNAL_GROUP:
self._handle_group(args)
elif notify_type == self.SIGNAL_SCENE_EVENT:
self._handle_scene_event(args)
elif notify_type == self.SIGNAL_VALUE_ADDED:
self._handle_value_added(args)
elif notify_type == self.SIGNAL_VALUE_CHANGED:
self._handle_value_changed(args)
elif notify_type == self.SIGNAL_VALUE_REFRESHED:
self._handle_value_refreshed(args)
elif notify_type == self.SIGNAL_VALUE_REMOVED:
self._handle_value_removed(args)
elif notify_type == self.SIGNAL_POLLING_DISABLED:
self._handle_polling_disabled(args)
elif notify_type == self.SIGNAL_POLLING_ENABLED:
self._handle_polling_enabled(args)
elif notify_type == self.SIGNAL_CREATE_BUTTON:
self._handle_create_button(args)
elif notify_type == self.SIGNAL_DELETE_BUTTON:
self._handle_delete_button(args)
elif notify_type == self.SIGNAL_BUTTON_ON:
self._handle_button_on(args)
elif notify_type == self.SIGNAL_BUTTON_OFF:
self._handle_button_off(args)
elif notify_type == self.SIGNAL_ALL_NODES_QUERIED:
self._handle_all_nodes_queried(args)
elif notify_type == self.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD:
self._handle_all_nodes_queried_some_dead(args)
elif notify_type == self.SIGNAL_AWAKE_NODES_QUERIED:
self._handle_awake_nodes_queried(args)
elif notify_type == self.SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE:
self._handle_essential_node_queries_complete(args)
elif notify_type == self.SIGNAL_NODE_QUERIES_COMPLETE:
self._handle_node_queries_complete(args)
elif notify_type == self.SIGNAL_MSG_COMPLETE:
self._handle_msg_complete(args)
elif notify_type == self.SIGNAL_NOTIFICATION:
self._handle_notification(args)
elif notify_type == self.SIGNAL_DRIVER_REMOVED:
self._handle_driver_removed(args)
elif notify_type == self.SIGNAL_CONTROLLER_COMMAND:
self._handle_controller_command(args)
else:
logger.warning(u'Skipping unhandled notification [%s]', args)
except:
import sys, traceback
logger.exception(u'Error in manager callback')
def _handle_driver_failed(self, args):
"""
Driver failed to load.
:param args: data sent by the notification
:type args: dict()
dispatcher.send(self.SIGNAL_NETWORK_FAILED, **{'network': self})
"""
logger.warning(u'Z-Wave Notification DriverFailed : %s', args)
self._manager = None
self._controller = None
self.nodes = None
self._state = self.STATE_FAILED
dispatcher.send(self.SIGNAL_DRIVER_FAILED, **{'network': self})
dispatcher.send(self.SIGNAL_NETWORK_FAILED, **{'network': self})
def _handle_driver_ready(self, args):
"""
A driver for a PC Z-Wave controller has been added and is ready to use.
The notification will contain the controller's Home ID,
which is needed to call most of the Manager methods.
dispatcher.send(self.SIGNAL_NETWORK_STARTED, **{'network': self, 'controller': self._controller})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification DriverReady : %s', args)
self._object_id = args['homeId']
try:
controller_node = ZWaveNode(args['nodeId'], network=self)
self._semaphore_nodes.acquire()
self.nodes = None
self.nodes[args['nodeId']] = controller_node
self._controller.node = self.nodes[args['nodeId']]
logger.info(u'Driver ready using library %s', self._controller.library_description)
logger.info(u'home_id 0x%0.8x, controller node id is %d', self.home_id, self._controller.node_id)
logger.debug(u'Network %s', self)
#Not needed. Already sent by the lib
#~ dispatcher.send(self.SIGNAL_DRIVER_READY, \
#~ **{'network': self, 'controller': self._controller})
self._state = self.STATE_STARTED
dispatcher.send(self.SIGNAL_NETWORK_STARTED, \
**{'network': self})
ctrl_state = libopenzwave.PyControllerState[0]
ctrl_message = libopenzwave.PyControllerState[0].doc
dispatcher.send(self.controller.SIGNAL_CONTROLLER, \
**{'state': ctrl_state, 'message': ctrl_message, 'network': self, 'controller': self.controller})
except:
import sys, traceback
logger.exception('Z-Wave Notification DriverReady',)
finally:
self._semaphore_nodes.release()
def _handle_driver_reset(self, args):
"""
This notification is never fired.
Look at
and
All nodes and values for this driver have been removed.
This is sent instead of potentially hundreds of individual node
and value notifications.
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification DriverReset : %s', args)
try:
self._semaphore_nodes.acquire()
logger.debug(u'DriverReset received. Remove all nodes')
self.nodes = None
self._state = self.STATE_RESETTED
dispatcher.send(self.SIGNAL_DRIVER_RESET, \
**{'network': self})
dispatcher.send(self.SIGNAL_NETWORK_RESETTED, \
**{'network': self})
finally:
self._semaphore_nodes.release()
def _handle_driver_removed(self, args):
"""
The Driver is being removed. (either due to Error or by request)
Do Not Call Any Driver Related Methods after receiving this
dispatcher.send(self.SIGNAL_DRIVER_REMOVED, **{'network': self})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification DriverRemoved : %s', args)
try:
self._semaphore_nodes.acquire()
self._state = self.STATE_STOPPED
dispatcher.send(self.SIGNAL_DRIVER_REMOVED, \
**{'network': self})
finally:
self._semaphore_nodes.release()
def _handle_group(self, args):
"""
The associations for the node have changed.
The application should rebuild any group information
it holds about the node.
dispatcher.send(self.SIGNAL_GROUP, **{'network': self, 'node': self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification Group : %s', args)
dispatcher.send(self.SIGNAL_GROUP, \
**{'network': self, 'node': self.nodes[args['nodeId']], 'groupidx': args['groupIdx']})
def _handle_node(self, node):
"""
Sent when a node is changed, added, removed, ...
If you don't interest in nodes event details you can listen to this
signal only.
dispatcher.send(self.SIGNAL_NODE, **{'network': self, 'node':self.nodes[args['nodeId']]})
:param node: the node
:type node: ZWaveNode
"""
logger.debug(u'Z-Wave Notification Node : %s', node)
dispatcher.send(self.SIGNAL_NODE, \
**{'network': self, 'node':node})
def _handle_node_added(self, args):
"""
A new node has been added to OpenZWave's set.
This may be due to a device being added to the Z-Wave network,
or because the application is initializing itself.
dispatcher.send(self.SIGNAL_NODE_ADDED, **{'network': self, 'node': node})
dispatcher.send(self.SIGNAL_NODE, **{'network': self, 'node':self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification NodeAdded : %s', args)
try:
node = ZWaveNode(args['nodeId'], network=self)
self._semaphore_nodes.acquire()
self.nodes[args['nodeId']] = node
dispatcher.send(self.SIGNAL_NODE_ADDED, \
**{'network': self, 'node': self.nodes[args['nodeId']]})
self._handle_node(self.nodes[args['nodeId']])
finally:
self._semaphore_nodes.release()
def _handle_scene_event(self, args):
"""
Scene Activation Set received
Not implemented
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification SceneEvent : %s', args)
dispatcher.send(self.SIGNAL_SCENE_EVENT, \
**{'network': self, 'node': self.nodes[args['nodeId']],
'scene_id': args['sceneId']})
def _handle_node_event(self, args):
"""
A node has triggered an event. This is commonly caused when a
node sends a Basic_Set command to the controller.
The event value is stored in the notification.
dispatcher.send(self.SIGNAL_NODE_EVENT, **{'network': self, 'node': self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification NodeEvent : %s', args)
dispatcher.send(self.SIGNAL_NODE_EVENT,
**{'network': self, 'node': self.nodes[args['nodeId']], 'value': args['event']})
def _handle_node_naming(self, args):
"""
One of the node names has changed (name, manufacturer, product).
dispatcher.send(self.SIGNAL_NODE_NAMING, **{'network': self, 'node': self.nodes[args['nodeId']]})
dispatcher.send(self.SIGNAL_NODE, **{'network': self, 'node':self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification NodeNaming : %s', args)
dispatcher.send(self.SIGNAL_NODE_NAMING, \
**{'network': self, 'node': self.nodes[args['nodeId']]})
self._handle_node(self.nodes[args['nodeId']])
def _handle_node_new(self, args):
"""
A new node has been found (not already stored in zwcfg*.xml file).
:param args: data sent by the notification
:type args: dict()
"""
logger.debug('Z-Wave Notification NodeNew : %s', args)
dispatcher.send(self.SIGNAL_NODE_NEW, \
**{'network': self, 'node_id': args['nodeId']})
def _handle_node_protocol_info(self, args):
"""
Basic node information has been received, such as whether
the node is a listening device, a routing device and its baud rate
and basic, generic and specific types.
It is after this notification that you can call Manager::GetNodeType
to obtain a label containing the device description.
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification NodeProtocolInfo : %s', args)
dispatcher.send(self.SIGNAL_NODE_PROTOCOL_INFO, \
**{'network': self, 'node': self.nodes[args['nodeId']]})
self._handle_node(self.nodes[args['nodeId']])
def _handle_node_removed(self, args):
"""
A node has been removed from OpenZWave's set.
This may be due to a device being removed from the Z-Wave network,
or because the application is closing.
dispatcher.send(self.SIGNAL_NODE_REMOVED, **{'network': self, 'node_id': args['nodeId']})
dispatcher.send(self.SIGNAL_NODE, **{'network': self, 'node':self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification NodeRemoved : %s', args)
try:
self._semaphore_nodes.acquire()
if args['nodeId'] in self.nodes:
node = self.nodes[args['nodeId']]
del self.nodes[args['nodeId']]
dispatcher.send(self.SIGNAL_NODE_REMOVED, \
**{'network': self, 'node': node})
self._handle_node(node)
finally:
self._semaphore_nodes.release()
def _handle_essential_node_queries_complete(self, args):
"""
The queries on a node that are essential to its operation have
been completed. The node can now handle incoming messages.
dispatcher.send(self.SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE, **{'network': self, 'node': self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification EssentialNodeQueriesComplete : %s', args)
dispatcher.send(self.SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE, \
**{'network': self, 'node': self.nodes[args['nodeId']]})
def _handle_node_queries_complete(self, args):
"""
All the initialisation queries on a node have been completed.
dispatcher.send(self.SIGNAL_NODE_QUERIES_COMPLETE, **{'network': self, 'node': self.nodes[args['nodeId']]})
dispatcher.send(self.SIGNAL_NODE, **{'network': self, 'node':self.nodes[args['nodeId']]})
When receiving this value, we consider that the node is ready.
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification NodeQueriesComplete : %s', args)
#the query stage are now completed, set the flag is ready to operate
self.nodes[args['nodeId']].is_ready = True
dispatcher.send(self.SIGNAL_NODE_QUERIES_COMPLETE, \
**{'network': self, 'node': self.nodes[args['nodeId']]})
self._handle_node(self.nodes[args['nodeId']])
def _handle_all_nodes_queried(self, args):
"""
All nodes have been queried, so client application can expected
complete data.
:param args: data sent by the notification
:type args: dict()
dispatcher.send(self.SIGNAL_NETWORK_READY, **{'network': self})
dispatcher.send(self.SIGNAL_ALL_NODES_QUERIED, **{'network': self, 'controller': self._controller})
"""
logger.debug(u'Z-Wave Notification AllNodesQueried : %s', args)
self._state = self.STATE_READY
dispatcher.send(self.SIGNAL_NETWORK_READY, **{'network': self})
dispatcher.send(self.SIGNAL_ALL_NODES_QUERIED, \
**{'network': self, 'controller': self._controller})
def _handle_all_nodes_queried_some_dead(self, args):
"""
All nodes have been queried, but some node ar mark dead, so client application can expected
complete data.
:param args: data sent by the notification
:type args: dict()
dispatcher.send(self.SIGNAL_NETWORK_READY, **{'network': self})
dispatcher.send(self.SIGNAL_ALL_NODES_QUERIED, **{'network': self, 'controller': self._controller})
"""
logger.debug(u'Z-Wave Notification AllNodesQueriedSomeDead : %s', args)
self._state = self.STATE_READY
dispatcher.send(self.SIGNAL_NETWORK_READY, **{'network': self})
dispatcher.send(self.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD, \
**{'network': self, 'controller': self._controller})
def _handle_awake_nodes_queried(self, args):
"""
All awake nodes have been queried, so client application can
expected complete data for these nodes.
dispatcher.send(self.SIGNAL_NETWORK_AWAKED, **{'network': self})
dispatcher.send(self.SIGNAL_AWAKE_NODES_QUERIED, **{'network': self, 'controller': self._controller})
dispatcher.send(self.SIGNAL_NETWORK_AWAKED, **{'network': self})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification AwakeNodesQueried : %s', args)
self._object_id = args['homeId']
try:
if self._state < self.STATE_AWAKED:
self._state = self.STATE_AWAKED
dispatcher.send(self.SIGNAL_NETWORK_AWAKED, **{'network': self})
dispatcher.send(self.SIGNAL_AWAKE_NODES_QUERIED, \
**{'network': self, 'controller': self._controller})
except:
import sys, traceback
logger.error('Z-Wave Notification AwakeNodesQueried : %s', traceback.format_exception(*sys.exc_info()))
finally:
pass
def _handle_polling_disabled(self, args):
"""
Polling of a node has been successfully turned off by a call
to Manager::DisablePoll.
dispatcher.send(self.SIGNAL_POLLING_DISABLED, **{'network': self, 'node' : self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification PollingDisabled : %s', args)
dispatcher.send(self.SIGNAL_POLLING_DISABLED, \
**{'network': self, 'node' : self.nodes[args['nodeId']]})
def _handle_polling_enabled(self, args):
"""
Polling of a node has been successfully turned on by a call
to Manager::EnablePoll.
dispatcher.send(self.SIGNAL_POLLING_ENABLED, **{'network': self, 'node' : self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification PollingEnabled : %s', args)
dispatcher.send(self.SIGNAL_POLLING_ENABLED, \
**{'network': self, 'node' : self.nodes[args['nodeId']]})
def _handle_create_button(self, args):
"""
Handheld controller button event created.
dispatcher.send(self.SIGNAL_CREATE_BUTTON, **{'network': self, 'node' : self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification CreateButton : %s', args)
dispatcher.send(self.SIGNAL_CREATE_BUTTON, \
**{'network': self, 'node' : self.nodes[args['nodeId']]})
def _handle_delete_button(self, args):
"""
Handheld controller button event deleted.
dispatcher.send(self.SIGNAL_DELETE_BUTTON, **{'network': self, 'node' : self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification DeleteButton : %s', args)
dispatcher.send(self.SIGNAL_DELETE_BUTTON, \
**{'network': self, 'node' : self.nodes[args['nodeId']]})
def _handle_button_on(self, args):
"""
Handheld controller button on pressed event.
dispatcher.send(self.SIGNAL_BUTTON_ON, **{'network': self, 'node' : self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification ButtonOn : %s', args)
dispatcher.send(self.SIGNAL_BUTTON_ON, \
**{'network': self, 'node' : self.nodes[args['nodeId']]})
def _handle_button_off(self, args):
"""
Handheld controller button off pressed event.
dispatcher.send(self.SIGNAL_BUTTON_OFF, **{'network': self, 'node' : self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification ButtonOff : %s', args)
dispatcher.send(self.SIGNAL_BUTTON_OFF, \
**{'network': self, 'node' : self.nodes[args['nodeId']]})
def _handle_value(self, node=None, value=None):
"""
Sent when a value is changed, addes, removed, ...
If you don't interrest in values event details you can listen to this
signal only.
dispatcher.send(self.SIGNAL_VALUE, **{'network': self, 'node' : node, 'value' : value})
:param nodeid: the id of the node who hold the value
:type nodeid: int
:param valueid: the id of the value
:type valueid: int
"""
dispatcher.send(self.SIGNAL_VALUE, \
**{'network': self, 'node' : node, \
'value' : value})
def _handle_value_added(self, args):
"""
A new node value has been added to OpenZWave's set.
These notifications occur after a node has been discovered,
and details of its command classes have been received.
Each command class may generate one or more values depending
on the complexity of the item being represented.
dispatcher.send(self.SIGNAL_VALUE_ADDED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : self.nodes[args['nodeId']].values[args['valueId']['id']]})
dispatcher.send(self.SIGNAL_VALUE, **{'network': self, 'node' : node, 'value' : value})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification ValueAdded : %s', args)
self.nodes[args['nodeId']].add_value(args['valueId']['id'])
dispatcher.send(self.SIGNAL_VALUE_ADDED, \
**{'network': self, \
'node' : self.nodes[args['nodeId']], \
'value' : self.nodes[args['nodeId']].values[args['valueId']['id']]})
self._handle_value(node=self.nodes[args['nodeId']], value=self.nodes[args['nodeId']].values[args['valueId']['id']])
def _handle_value_changed(self, args):
"""
A node value has been updated from the Z-Wave network and it is
different from the previous value.
dispatcher.send(self.SIGNAL_VALUE_CHANGED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : self.nodes[args['nodeId']].values[args['valueId']['id']]})
dispatcher.send(self.SIGNAL_VALUE, **{'network': self, 'node' : node, 'value' : value})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification ValueChanged : %s', args)
if args['nodeId'] not in self.nodes:
logger.warning('Z-Wave Notification ValueChanged (%s) for an unknown node %s', args['valueId'], args['nodeId'])
return False
self.nodes[args['nodeId']].change_value(args['valueId']['id'])
dispatcher.send(self.SIGNAL_VALUE_CHANGED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : self.nodes[args['nodeId']].values[args['valueId']['id']]})
self._handle_value(node=self.nodes[args['nodeId']], value=self.nodes[args['nodeId']].values[args['valueId']['id']])
def _handle_value_refreshed(self, args):
"""
A node value has been updated from the Z-Wave network.
dispatcher.send(self.SIGNAL_VALUE_REFRESHED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : self.nodes[args['nodeId']].values[args['valueId']['id']]})
dispatcher.send(self.SIGNAL_VALUE, **{'network': self, 'node' : node, 'value' : value})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification ValueRefreshed : %s', args)
if args['nodeId'] not in self.nodes:
logger.warning('Z-Wave Notification ValueRefreshed (%s) for an unknown node %s', args['valueId'], args['nodeId'])
return False
self.nodes[args['nodeId']].refresh_value(args['valueId']['id'])
dispatcher.send(self.SIGNAL_VALUE_REFRESHED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : self.nodes[args['nodeId']].values[args['valueId']['id']]})
self._handle_value(node=self.nodes[args['nodeId']], value=self.nodes[args['nodeId']].values[args['valueId']['id']])
def _handle_value_removed(self, args):
"""
A node value has been removed from OpenZWave's set.
This only occurs when a node is removed.
dispatcher.send(self.SIGNAL_VALUE_REMOVED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : val})
dispatcher.send(self.SIGNAL_VALUE, **{'network': self, 'node' : node, 'value' : value})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification ValueRemoved : %s', args)
if args['nodeId'] not in self.nodes:
logger.warning(u'Z-Wave Notification ValueRemoved (%s) for an unknown node %s', args['valueId'], args['nodeId'])
return False
if args['valueId']['id'] in self.nodes[args['nodeId']].values:
logger.warning(u'Z-Wave Notification ValueRemoved for an unknown value (%s) on node %s', args['valueId'], args['nodeId'])
dispatcher.send(self.SIGNAL_VALUE_REMOVED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : None, 'valueId' : args['valueId']['id']})
return False
val = self.nodes[args['nodeId']].values[args['valueId']['id']]
if self.nodes[args['nodeId']].remove_value(args['valueId']['id']):
dispatcher.send(self.SIGNAL_VALUE_REMOVED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : val, 'valueId' : args['valueId']['id']})
#self._handle_value(node=self.nodes[args['nodeId']], value=val)
if args['nodeId'] in self.nodes and args['valueId']['id'] in self.nodes[args['nodeId']].values:
del self.nodes[args['nodeId']].values[args['valueId']['id']]
return True
def _handle_notification(self, args):
"""
Called when an error happened, or node changed (awake, sleep, death, no operation, timeout).
dispatcher.send(self.SIGNAL_NOTIFICATION, **{'network': self})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification : %s', args)
dispatcher.send(self.SIGNAL_NOTIFICATION, \
**{'network': self, 'args': args})
def _handle_controller_command(self, args):
"""
Called when a message from controller is sent.
The state could be obtained here :
dispatcher.send(self.SIGNAL_CONTROLLER_WAITING, \
**{'network': self, 'controller': self.controller,
'state_int': args['controllerStateInt'], 'state': args['controllerState'], 'state_full': args['controllerStateDoc'],
})
And the full command here :
dispatcher.send(self.SIGNAL_CONTROLLER_COMMAND, \
**{'network': self, 'controller': self.controller,
'node':self.nodes[args['nodeId']] if args['nodeId'] in self.nodes else None, 'node_id' : args['nodeId'],
'state_int': args['controllerStateInt'], 'state': args['controllerState'], 'state_full': args['controllerStateDoc'],
'error_int': args['controllerErrorInt'], 'error': args['controllerError'], 'error_full': args['controllerErrorDoc'],
})
:param args: data sent by the notification
:type args: dict()
"""
self._controller._handle_controller_command(args)
def _handle_msg_complete(self, args):
"""
The last message that was sent is now complete.
dispatcher.send(self.SIGNAL_MSG_COMPLETE, **{'network': self})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification MsgComplete : %s', args)
dispatcher.send(self.SIGNAL_MSG_COMPLETE, \
**{'network': self})
def write_config(self):
"""
The last message that was sent is now complete.
"""
self._manager.writeConfig(self.home_id)
logger.info(u'ZWave configuration written to user directory.')
"""
initialization callback sequence:
[driverReady]
[nodeAdded] <-------------------------+ This cycle is extremely quick, well under one second.
[nodeProtocolInfo] |
[nodeNaming] |
[valueAdded] <---------------+ |
| |
{REPEATS FOR EACH VALUE} ----+ |
|
[group] <--------------------+ |
| |
{REPEATS FOR EACH GROUP} ----+ |
|
{REPEATS FOR EACH NODE} --------------+
[? (no notification)] <---------------+ (no notification announces the beginning of this cycle)
|
[valueChanged] <-------------+ | This cycle can take some time, especially if some nodes
| | are sleeping or slow to respond.
{REPEATS FOR EACH VALUE} ----+ |
|
[group] <--------------------+ |
| |
{REPEATS FOR EACH GROUP} ----+ |
|
[nodeQueriesComplete] |
|
{REPEATS FOR EACH NODE} --------------+
[awakeNodesQueried] or [allNodesQueried] (with node_id 255)
[driverRemoved]
"""
class ZWaveNetworkSingleton(ZWaveNetwork):
"""
Represents a singleton Zwave network.
"""
__metaclass__ = Singleton
|
python
|
# ---
# name: web-csv
# deployed: true
# title: CSV Reader
# description: Returns the data for the CSVs given by the URLs
# params:
# - name: url
# type: array
# description: Urls for which to get the info
# required: true
# examples:
# - '"https://raw.githubusercontent.com/flexiodata/data/master/sample/sample-contacts.csv"'
# notes:
# ---
import csv
import json
import tempfile
import io
import aiohttp
import asyncio
import itertools
from cerberus import Validator
from contextlib import closing
from collections import OrderedDict
def flexio_handler(flex):
# get the input
input = flex.input.read()
input = json.loads(input)
if not isinstance(input, list):
raise ValueError
# define the expected parameters and map the values to the parameter names
# based on the positions of the keys/values
params = OrderedDict()
params['urls'] = {'required': True, 'validator': validator_list, 'coerce': to_list}
#params['columns'] = {'required': True, 'validator': validator_list, 'coerce': to_list}
input = dict(zip(params.keys(), input))
# validate the mapped input against the validator
v = Validator(params, allow_unknown = True)
input = v.validated(input)
if input is None:
raise ValueError
urls = input['urls']
loop = asyncio.get_event_loop()
temp_fp_all = loop.run_until_complete(fetch_all(urls))
flex.output.content_type = 'application/json'
flex.output.write('[')
# get the columns for each of the input urls
properties = []
for temp_fp in temp_fp_all:
try:
fp = io.TextIOWrapper(temp_fp, encoding='utf-8-sig')
reader = csv.DictReader(fp, delimiter=',', quotechar='"')
for row in reader:
properties = list(row.keys())
break
finally:
fp.seek(0)
fp.detach()
flex.output.write(json.dumps(properties))
for temp_fp in temp_fp_all:
fp = io.TextIOWrapper(temp_fp, encoding='utf-8-sig')
reader = csv.DictReader(fp, delimiter=',', quotechar='"')
for row in reader:
row = ',' + json.dumps([(row.get(p) or '') for p in properties])
flex.output.write(row)
temp_fp.close()
flex.output.write(']')
async def fetch_all(urls):
tasks = []
async with aiohttp.ClientSession() as session:
for url in urls:
tasks.append(fetch(session, url))
temp_fp_all = await asyncio.gather(*tasks)
return temp_fp_all
async def fetch(session, url):
# stream the data from the url into a temporary file and return
# it for processing, after which it'll be closed and deleted
temp_fp = tempfile.TemporaryFile()
async with session.get(url) as response:
while True:
data = await response.content.read(1024)
if not data:
break
temp_fp.write(data)
temp_fp.seek(0) # rewind to the beginning
return temp_fp
def validator_list(field, value, error):
if isinstance(value, str):
return
if isinstance(value, list):
for item in value:
if not isinstance(item, str):
error(field, 'Must be a list with only string values')
return
error(field, 'Must be a string or a list of strings')
def to_list(value):
# if we have a list of strings, create a list from them; if we have
# a list of lists, flatten it into a single list of strings
if isinstance(value, str):
return value.split(",")
if isinstance(value, list):
return list(itertools.chain.from_iterable(value))
return None
|
python
|
# utilities for dealing with webtiles configuration. The actual configuration
# data does *not* go in here.
import collections
import os.path
import logging
from webtiles import load_games
server_config = {}
source_file = None
# light wrapper class that maps get/set/etc to getattr/setattr/etc
# doesn't bother to implement most of the dict interface...
class ConfigModuleWrapper(object):
def __init__(self, module):
self.module = module
def get(self, key, default):
return getattr(self.module, key, default)
def __setitem__(self, key, val):
setattr(self.module, key, val)
def pop(self, key):
r = getattr(self.module, key)
delattr(self.module, key)
return r
def __contains__(self, key):
return hasattr(self.module, key)
# temporary compatibility shim for config calls in templates
allow_password_reset = False
admin_password_reset = False
# classic config: everything is just done in a module
# (TODO: add some alternative)
def init_config_from_module(module):
global server_config, source_file
server_config = ConfigModuleWrapper(module)
source_file = os.path.abspath(module.__file__)
global allow_password_reset, admin_password_reset
allow_password_reset = get('allow_password_reset')
admin_password_reset = get('admin_password_reset')
server_path = None
games = collections.OrderedDict()
game_modes = {} # type: Dict[str, str]
# for values not in this dict, the default is None
defaults = {
'dgl_mode': True,
'logging_config': {
"level": logging.INFO,
"format": "%(asctime)s %(levelname)s: %(message)s"
},
'server_socket_path': None,
'watch_socket_dirs': False,
'use_game_yaml': True,
'milestone_file': [],
'status_file_update_rate': 5,
'lobby_update_rate': 2,
'recording_term_size': (80, 24),
'max_connections': 100,
'connection_timeout': 600,
'max_idle_time': 5 * 60 * 60,
'use_gzip': True,
'kill_timeout': 10,
'nick_regex': r"^[a-zA-Z0-9]{3,20}$",
'max_passwd_length': 20,
'allow_password_reset': False,
'admin_password_reset': False,
'crypt_algorithm': "broken", # should this be the default??
'crypt_salt_length': 16,
'login_token_lifetime': 7, # Days
'daemon': False,
'development_mode': False,
'no_cache': False,
'live_debug': False,
'lobby_update_rate': 2,
}
def get(key, default=None):
global server_config
return server_config.get(key, defaults.get(key, default))
def set(key, val):
global server_config
server_config[key] = val
def pop(key):
global server_config
return server_config.pop(key)
def has_key(key):
global server_config
return key in server_config
def check_keys_all(required, raise_on_missing=False):
# accept either a single str, or an iterable for `required`
if isinstance(required, str):
required = [required]
for k in required:
if not has_key(k) or get(k) is None:
if raise_on_missing:
raise ValueError("Webtiles config: Missing configuration key: %s" % k)
return False
return True
def check_keys_any(required, raise_on_missing=False):
# use `has_keys`: if any member of required is itself a list, require
# all keys in the list
if not any([check_keys_all(key) for key in required]):
if raise_on_missing:
raise ValueError("Webtiles config: Need at least one of %s!" %
", ".join([repr(r) for r in required]))
return False
return True
def check_game_config():
success = True
for (game_id, game_data) in get('games').items():
if not os.path.exists(game_data["crawl_binary"]):
logging.warning("Crawl executable for %s (%s) doesn't exist!",
game_id, game_data["crawl_binary"])
success = False
if ("client_path" in game_data and
not os.path.exists(game_data["client_path"])):
logging.warning("Client data path %s doesn't exist!", game_data["client_path"])
success = False
return success
def load_game_data():
# TODO: should the `load_games` module be refactored into config?
global games
games = get('games', collections.OrderedDict())
if get('use_game_yaml', False):
games = load_games.load_games(games)
# TODO: check_games here or in validate?
if len(games) == 0:
raise ValueError("No games defined!")
if not check_game_config():
raise ValueError("Errors in game data!")
global game_modes
game_modes = load_games.collect_game_modes()
def validate():
# TODO: some way of setting defaults in this module?
check_keys_any(['bind_nonsecure', 'ssl_options'], True)
if has_key('bind_nonsecure') and get('bind_nonsecure'):
check_keys_any(['bind_pairs', ['bind_address', 'bind_port']], True)
if has_key('ssl_options') and get('ssl_options'):
check_keys_any(['ssl_bind_pairs', ['ssl_address', 'ssl_port']], True)
required = ['static_path', 'template_path', 'server_id',
'dgl_status_file', 'init_player_program',]
if get('allow_password_reset') or get('admin_password_reset'):
required.add('lobby_url')
check_keys_all(required, raise_on_missing=True)
smpt_opts = ['smtp_host', 'smtp_port', 'smtp_from_addr']
if check_keys_any(smpt_opts):
check_keys_all(smpt_opts, True)
if (has_key('smtp_user')):
check_keys_all('smtp_password', True)
# set up defaults that are conditioned on other values
if not has_key('settings_db'):
set('settings_db', os.path.join(os.path.dirname(get('password_db')),
"user_settings.db3"))
|
python
|
# black=\033[30m
# red=\033[31m
# green=\033[32m
# orange=\033[33m
# blue=\033[34m
# purple=\033[35m
# cyan=\033[36m
# lightgrey=\033[37m
# darkgrey=\033[90m
# lightred=\033[91m
# lightgreen=\033[92m
# yellow=\033[93m
# lightblue=\033[94m
# pink=\033[95m
# lightcyan=\033[96m
# BOLD = \033[1m
# FAINT = \033[2m
# ITALIC = \033[3m
# UNDERLINE = \033[4m
# BLINK = \033[5m
# NEGATIVE = \033[7m
# CROSSED = \033[9m
# END = \033[0m
from time import sleep
import sys
import os
from remove import remove
def del_lines(i, fname):
for _ in range(i):
sys.stdout.write('\x1b[1A')
remove(fname)
def delete_1_line():
sys.stdout.write('\x1b[1A')
sys.stdout.write('\x1b[2K')
def create():
fname = input('\033[32mEnter filename (default: code.vypr):\033[0m') or ' '
if fname == ' ':
file = open('Testcases/code.vypr', 'w', encoding='utf8')
file.write("import modulename;\nint main()\n{\n return 0;\n}")
else:
fname = f'Testcases/{fname}'
file = open(f'{fname}.vypr', "w", encoding='utf8')
print('''\033[32mWhat Do You Want To Write To Your File?
[Write "$EOF" (without quotes) to end]
[Write "$RET" (without quotes) to delete upper line]
[Write "$REM" (without quotes) to clear file]\033[0m''')
print('***START***')
print('> ', end='')
text = input()
x = 0
while text != '$EOF' and text != '\n$EOF':
if(text == '$RET' or text == '\n$RET'):
file.close()
delete_1_line()
del_lines(1, f'{fname}.vypr')
file = open(f'{fname}.vypr', "a+")
print('> ', end='')
text = input()
x = x-1
elif (text == '$REM' or text == '\n$REM'):
delete_1_line()
for _ in range(x):
delete_1_line()
file.close()
with open(f'{fname}.vypr', 'w') as f:
f.write('')
file = open(f'{fname}.vypr', "a+")
print('> ', end='')
text = input("\b ")
else:
file.write(text+'\n')
print('> ', end='')
text = input()
x = x+1
file.close()
print("\033[93mFile Created Successfully...\033[0m")
if __name__ == '__main__':
create()
|
python
|
import json
def save(name, csar):
# TODO(@tadeboro): Temporary placeholder
with open("{}.deploy".format(name), "w") as fd:
json.dump(dict(name=csar), fd)
def load(name):
# TODO(@tadeboro): Temporary placeholder
with open("{}.deploy".format(name)) as fd:
return json.load(fd)["name"]
|
python
|
import os
import bpy
from bStream import *
from itertools import chain
import math
def load_anim(pth):
stream = bStream(path=pth)
target_name = f"{os.path.basename(pth).split('.')[0]}_PTH"
target_action = bpy.data.actions.new(f"{target_name}_PTH_ACN")
target = bpy.data.objects.new(target_name, None)
# Start loading anmation
frame_count = stream.readUInt16()
print(frame_count)
stream.readUInt16() #Padding
frames = {
'x':[],
'y':[],
'z':[],
'rx':[],
'ry':[],
'rz':[]
}
XGroup = PTHLoadGroup(stream)
YGroup = PTHLoadGroup(stream)
ZGroup = PTHLoadGroup(stream)
RXGroup = PTHLoadGroup(stream)
RYGroup = PTHLoadGroup(stream)
RZGroup = PTHLoadGroup(stream)
key_data_offset = stream.readUInt32()
#Load Frame Data
PTHLoadGroupData(stream, key_data_offset, XGroup, 'x', frames)
PTHLoadGroupData(stream, key_data_offset, YGroup, 'y', frames)
PTHLoadGroupData(stream, key_data_offset, ZGroup, 'z', frames)
PTHLoadGroupData(stream, key_data_offset, RXGroup, 'rx', frames)
PTHLoadGroupData(stream, key_data_offset, RYGroup, 'ry', frames)
PTHLoadGroupData(stream, key_data_offset, RZGroup, 'rz', frames)
#Set Frame Data
bpy.context.scene.frame_end = frame_count
target.animation_data_clear()
target_anim_data = target.animation_data_create()
GenerateFCurves(target_action, "rotation_euler", 'x', 0, frames['rx'])
GenerateFCurves(target_action, "rotation_euler", 'y', 1, frames['rz'], invert=True)
GenerateFCurves(target_action, "rotation_euler", 'z', 2, frames['ry'])
GenerateFCurves(target_action, "location", 'x', 0, frames['x'])
GenerateFCurves(target_action, "location", 'y', 1, frames['z'], invert=True)
GenerateFCurves(target_action, "location", 'z', 2, frames['y'])
target_anim_data.action = target_action
bpy.context.scene.collection.objects.link(target)
def GenerateFCurves(action, curve, track, track_index, keyframes, invert=False):
curve = action.fcurves.new(curve, index=track_index, action_group=f"Loc{track.upper()}")
curve.keyframe_points.add(count=len(keyframes))
if(invert):
for f in range(len(keyframes)):
keyframes[f][1] = -keyframes[f][1]
curve.keyframe_points.foreach_set("co", list(chain.from_iterable(keyframes)))
curve.update()
def GenerateFCurvesRot(action, track, track_index, keyframes, invert=False):
curve = action.fcurves.new("rotation_euler", index=track_index, action_group=f"Loc{track.upper()}")
curve.keyframe_points.add(count=len(keyframes))
if(invert):
for f in range(len(keyframes)):
keyframes[f][1] = -keyframes[f][1]
for f in range(len(keyframes)):
keyframes[f][1] = math.degrees(keyframes[f][1] * 0.0001533981)
curve.keyframe_points.foreach_set("co", list(chain.from_iterable(keyframes)))
curve.update()
def GenerateKeyframes(obj, data_path, keyframes):
for keyframe in keyframes:
obj[data_path] = keyframe[1]
obj.keyframe_insert(data_path, frame=keyframe[0])
def PTHLoadGroup(stream):
return {'KeyCount':stream.readUInt16(),'BeginIndex':stream.readUInt16(),'ElementCount':stream.readUInt16()}
def PTHWriteGroup(stream, group):
stream.writeUInt16(group['KeyCount'])
stream.writeUInt16(group['BeginIndex'])
stream.writeUInt16(group['ElementCount']) # should always be 2 for now
def PTHLoadGroupData(stream, offset, group, out_pos, frames):
stream.seek(offset + (4 * group['BeginIndex']))
for frame in range(0,group['KeyCount']):
frame_data = [stream.readFloat() for x in range(0, group['ElementCount'])]
if(group['ElementCount'] == 1):
frames[out_pos].append([frame, frame_data[0]])
else:
frames[out_pos].append([int(frame_data[0]), frame_data[1]])
def PTHWriteGroupData(stream, curve, data_offset, dummy=None, invert=False):
begin_index = int((stream.fhandle.tell() - data_offset) / 4)
print(f'Writing Group with begin index {begin_index}')
if(dummy is not None):
stream.writeFloat(dummy)
return {'KeyCount': 1, 'BeginIndex':begin_index, 'ElementCount':1}
for keyframe in curve.keyframe_points:
stream.writeFloat(keyframe.co[0])
stream.writeFloat(keyframe.co[1] if not invert else -keyframe.co[1])
stream.writeFloat((keyframe.co[1] - keyframe.handle_right[1]) / (keyframe.co[0] - keyframe.handle_right[0]))
return {'KeyCount': len(curve.keyframe_points), 'BeginIndex':begin_index, 'ElementCount':3}
def save_anim(pth): #TODO
stream = bStream(path=pth)
obj = bpy.context.view_layer.objects.active
if(not(obj.type == 'EMPTY')):
return False
stream.writeUInt16(int(bpy.context.scene.frame_end))
stream.writeUInt16(0)
groups_definitoins = stream.fhandle.tell()
stream.pad(36)
keydata_offset = stream.fhandle.tell()
stream.writeUInt32(0)
target_curves = obj.animation_data.action.fcurves
data_offset = stream.fhandle.tell()
XGroup = PTHWriteGroupData(stream, target_curves[0], data_offset)
YGroup = PTHWriteGroupData(stream, target_curves[2], data_offset)
ZGroup = PTHWriteGroupData(stream, target_curves[1], data_offset, invert=True)
# These groups are written manually due to not being anmiatible in blender or unknown
UnkGroup1 = PTHWriteGroupData(stream, None, data_offset, dummy=0.0)
UnkGroup2 = PTHWriteGroupData(stream, None, data_offset, dummy=0.0)
UnkGroup3 = PTHWriteGroupData(stream, None, data_offset, dummy=0.0)
stream.seek(groups_definitoins)
PTHWriteGroup(stream, XGroup)
PTHWriteGroup(stream, YGroup)
PTHWriteGroup(stream, ZGroup)
PTHWriteGroup(stream, UnkGroup1)
PTHWriteGroup(stream, UnkGroup2)
PTHWriteGroup(stream, UnkGroup3)
stream.seek(keydata_offset)
stream.writeUInt32(data_offset)
return True
|
python
|
# Generated by Django 3.2.5 on 2021-08-11 19:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Allergy',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
],
),
migrations.CreateModel(
name='Diagnosis',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
],
),
migrations.CreateModel(
name='InsuranceProvider',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='Medication',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
],
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('email', models.EmailField(blank=True, max_length=254, null=True, unique=True)),
('phone', models.CharField(blank=True, max_length=25, null=True, unique=True)),
('dob', models.DateField(blank=True, null=True)),
('insurance_member_id', models.CharField(blank=True, max_length=254, null=True)),
('is_new', models.BooleanField(default=True)),
('sex', models.CharField(blank=True, choices=[('M', 'Male'), ('F', 'Female')], max_length=2, null=True)),
('insurance_provider', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='api.insuranceprovider')),
],
),
migrations.CreateModel(
name='ProgressNote',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('weight', models.DecimalField(decimal_places=2, max_digits=6)),
('height', models.DecimalField(decimal_places=2, max_digits=6)),
('blood_pressure_sys', models.IntegerField()),
('blood_pressure_dia', models.IntegerField()),
('chief_complaint', models.CharField(blank=True, max_length=254, null=True)),
('medical_history', models.TextField(blank=True, null=True)),
('treatment', models.CharField(blank=True, max_length=254, null=True)),
('doctors_orders', models.CharField(blank=True, max_length=254, null=True)),
('allergies', models.ManyToManyField(blank=True, to='api.Allergy')),
('diagnoses', models.ManyToManyField(blank=True, to='api.Diagnosis')),
('medication', models.ManyToManyField(blank=True, to='api.Medication')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.patient')),
],
),
migrations.CreateModel(
name='Appointment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.DateTimeField()),
('end', models.DateTimeField()),
('status', models.CharField(choices=[('SC', 'Scheduled'), ('CI', 'Checked In'), ('DO', 'Done')], default='SC', max_length=2)),
('created_at', models.DateTimeField(auto_now_add=True)),
('notes', models.TextField(blank=True, null=True)),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.patient')),
],
),
]
|
python
|
from src.extract_old_site.modules import excavation_details_page as exc_det
import pathlib
import os
from unittest import mock
import pytest
# Structure 1, /dig/html/excavations/exc_is.html
exc_is_html_str = """
<html><head><title>Excavating Occaneechi Town - [Excavations]</title></head>
<frameset cols="408,*" border=1>
<frame name="image" src="slid_azt.html" marginwidth=1 marginheight=1>
<frame name="ctrl" src="ctrl_is.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
ctrl_is_html_str = """
<html><frameset rows="75%,25%" border=1>
<frame name="info" src="info_is.html" marginwidth=1 marginheight=1>
<frame name="zoom" src="zoom_is.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
info_is_html_str = """
<html><body>
<big><b>Structure 1</b></big><p>
<img align="right" src="../images/l/l240r60.gif">
Type: Structure<br>
Dimensions<br>
Length: 13.4 ft<br>
Width: 11.3 ft<br>
Depth: Unknown ft<br>
Volume: Unknown ft<sup><small>3</small></sup><br>
Area: 115.88 ft<sup><small>2</small></sup><p>
<table border=2 width="100%">
<tr><td rowspan=4>Image:<br>
<a href="slid_azt.html" target="image">1</a>
<a href="slid_bdo.html" target="image">2</a>
<a href="slid_bet.html" target="image">3</a>
</td>
<td align="center"><a href="../artifacts/art_is0.html" target="_top">Artifacts</a></td></tr>
<tr><td align="center">Description</td></tr>
<tr><td align="center"><a href="../maps/exc2.html" target="_top">Map</a></td></tr>
<tr><td align="center"><a href="../index.html" target="_top">Home</a></td></tr>
</table></body></html>
"""
zoom_is_html_str = """
<html><body><big>Zoom To:</big><p>
<a href="exc_cl.html" target="_top">Feature 9</a><br>
<a href="exc_fg.html" target="_top">Sq. 240R60</a><br>
<a href="exc_fh.html" target="_top">Sq. 240R70</a><br>
<a href="exc_ft.html" target="_top">Sq. 250R60</a><br>
<a href="exc_fu.html" target="_top">Sq. 250R70</a><br>
</body></html>
"""
slid_azt_html_str = """
<html><body><map name="hotlinks">
<area coords="144,140,224,214" target="_top" href="exc_cl.html">
<area coords="38,78,80,127" target="_top" href="exc_au.html">
<area coords="359,292,388,361" target="_top" href="exc_am.html">
<area coords="364,134,389,198" target="_top" href="exc_iy.html">
<area coords="326,155,363,190" target="_top" href="exc_iy.html">
<area coords="305,3,363,154" target="_top" href="exc_iy.html">
<area coords="364,90,388,133" target="_top" href="exc_ae.html">
<area coords="364,3,389,89" target="_top" href="exc_iy.html">
</map><center><img src="../images/s/str1.gif" usemap="#hotlinks" border=0><p>Figure 1039. Structure 1, plan view (view to north).</center></body></html>
"""
slid_bdo_html_str = """
<html><body><map name="hotlinks">
<area coords="43,102,193,152" target="_top" href="exc_is.html">
<area coords="22,151,113,219" target="_top" href="exc_is.html">
<area coords="194,118,243,220" target="_top" href="exc_is.html">
<area coords="16,220,237,298" target="_top" href="exc_is.html">
<area coords="114,152,196,223" target="_top" href="exc_cl.html">
</map><center><img src="../images/x16/x6801.jpeg" usemap="#hotlinks" border=0><p>Figure 1038. Structure 1 at top of subsoil (view to southwest).</center></body></html>
"""
slid_bet_html_str = """
<html><body><map name="hotlinks">
</map><center><img src="../images/x16/x6968.jpeg" usemap="#hotlinks" border=0><p>Figure 1037. Structure 1 after excavation (view to southwest).</center></body></html>
"""
# Sq. 240R60, /dig/html/excavations/exc_fg.html
exc_fg_html_str = """
<html><head><title>Excavating Occaneechi Town - [Excavations]</title></head>
<frameset cols="408,*" border=1>
<frame name="image" src="slid_ada.html" marginwidth=1 marginheight=1>
<frame name="ctrl" src="ctrl_fg.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
ctrl_fg_html_str = """
<html><frameset rows="75%,25%" border=1>
<frame name="info" src="info_fg.html" marginwidth=1 marginheight=1>
<frame name="zoom" src="zoom_fg.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
info_fg_html_str = """
<html><body>
<big><b>Sq. 240R60</b></big><p>
<img align="right" src="../images/l/l240r60.gif">
Type: Excavation Unit<br>
Dimensions<br>
Length: 10.0 ft<br>
Width: 10.0 ft<br>
Depth: 0.6 ft<br>
Volume: 61.06 ft<sup><small>3</small></sup><br>
Area: 100.00 ft<sup><small>2</small></sup><p>
<table border=2 width="100%">
<tr><td rowspan=4>Image:<br>
<a href="slid_ada.html" target="image">1</a>
<a href="slid_bde.html" target="image">2</a>
</td>
<td align="center"><a href="../artifacts/art_fg0.html" target="_top">Artifacts</a></td></tr>
<tr><td align="center">Description</td></tr>
<tr><td align="center"><a href="../maps/exc0.html" target="_top">Map</a></td></tr>
<tr><td align="center"><a href="../index.html" target="_top">Home</a></td></tr>
</table></body></html>
"""
zoom_fg_html_str = """
<html><body><big>Zoom To:</big><p>
<a href="exc_cl.html" target="_top">Feature 9</a><br>
<a href="exc_is.html" target="_top">Structure 1</a><br>
</body></html>
"""
slid_ada_html_str = """
<html><body><map name="hotlinks">
<area coords="70,283,388,389" target="_top" href="exc_is.html">
<area coords="149,197,386,282" target="_top" href="exc_is.html">
<area coords="343,1,388,197" target="_top" href="exc_is.html">
<area coords="14,1,148,282" target="_top" href="exc_is.html">
<area coords="149,0,342,196" target="_top" href="exc_cl.html">
</map><center><img src="../images/2/240r60.gif" usemap="#hotlinks" border=0><p>Figure 860. Sq. 240R60, top of subsoil (view to north).</center></body></html>
"""
slid_bde_html_str = """
<html><body><map name="hotlinks">
<area coords="175,100,312,160" target="_top" href="exc_cl.html">
<area coords="70,93,113,215" target="_top" href="exc_is.html">
</map><center><img src="../images/x16/x6730.jpeg" usemap="#hotlinks" border=0><p>Figure 859. Sq. 240R60 at top of subsoil (view to north).</center></body></html>
"""
# Extracted
slid_azt_extracted = {
"path": "/dig/html/images/s/str1.gif",
"htmlPagePath": "/dig/html/excavations/slid_azt.html",
"figureNum": "1039",
"caption": "Structure 1, plan view (view to north).",
"clickableAreas": [
{"x1": 144, "y1": 140, "x2": 224, "y2": 214,
"path": "/dig/html/excavations/exc_cl.html"},
{"x1": 38, "y1": 78, "x2": 80, "y2": 127,
"path": "/dig/html/excavations/exc_au.html"},
{"x1": 359, "y1": 292, "x2": 388, "y2": 361,
"path": "/dig/html/excavations/exc_am.html"},
{"x1": 364, "y1": 134, "x2": 389, "y2": 198,
"path": "/dig/html/excavations/exc_iy.html"},
{"x1": 326, "y1": 155, "x2": 363, "y2": 190,
"path": "/dig/html/excavations/exc_iy.html"},
{"x1": 305, "y1": 3, "x2": 363, "y2": 154,
"path": "/dig/html/excavations/exc_iy.html"},
{"x1": 364, "y1": 90, "x2": 388, "y2": 133,
"path": "/dig/html/excavations/exc_ae.html"},
{"x1": 364, "y1": 3, "x2": 389, "y2": 89,
"path": "/dig/html/excavations/exc_iy.html"}
],
"originalDimensions": {
"width": 390,
"height": 390
}
}
slid_bdo_extracted = {
"path": "/dig/html/images/x16/x6801.jpeg",
"htmlPagePath": "/dig/html/excavations/slid_bdo.html",
"figureNum": "1038",
"caption": "Structure 1 at top of subsoil (view to southwest).",
"clickableAreas": [
{"x1": 43, "y1": 102, "x2": 193, "y2": 152,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 22, "y1": 151, "x2": 113, "y2": 219,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 194, "y1": 118, "x2": 243, "y2": 220,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 16, "y1": 220, "x2": 237, "y2": 298,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 114, "y1": 152, "x2": 196, "y2": 223,
"path": "/dig/html/excavations/exc_cl.html"}
],
"originalDimensions": {
"width": 251,
"height": 390
}
}
slid_bet_extracted = {
"path": "/dig/html/images/x16/x6968.jpeg",
"htmlPagePath": "/dig/html/excavations/slid_bet.html",
"figureNum": "1037",
"caption": "Structure 1 after excavation (view to southwest).",
"clickableAreas": [],
"originalDimensions": {
"width": 390,
"height": 347
}
}
slid_ada_extracted = {
"path": "/dig/html/images/2/240r60.gif",
"htmlPagePath": "/dig/html/excavations/slid_ada.html",
"figureNum": "860",
"caption": "Sq. 240R60, top of subsoil (view to north).",
"clickableAreas": [
{"x1": 70, "y1": 283, "x2": 388, "y2": 389,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 149, "y1": 197, "x2": 386, "y2": 282,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 343, "y1": 1, "x2": 388, "y2": 197,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 14, "y1": 1, "x2": 148, "y2": 282,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 149, "y1": 0, "x2": 342, "y2": 196,
"path": "/dig/html/excavations/exc_cl.html"}
],
"originalDimensions": {
"width": 390,
"height": 390
}
}
slid_bde_extracted = {
"path": "/dig/html/images/x16/x6730.jpeg",
"htmlPagePath": "/dig/html/excavations/slid_bde.html",
"figureNum": "859",
"caption": "Sq. 240R60 at top of subsoil (view to north).",
"clickableAreas": [
{"x1": 175, "y1": 100, "x2": 312, "y2": 160,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 70, "y1": 93, "x2": 113, "y2": 215,
"path": "/dig/html/excavations/exc_is.html"}
],
"originalDimensions": {
"width": 390,
"height": 275
}
}
info_is_extracted = {
"name": "Structure 1",
"miniMapIcon": "/dig/html/images/l/l240r60.gif",
"info": {
"Dimensions": {
"Length": "13.4 ft",
"Width": "11.3 ft",
"Depth": "Unknown ft"
},
"Type": "Structure",
"Volume": "Unknown ft<sup>3</sup>",
"Area": "115.88 ft<sup>2</sup>"
},
"images": [slid_azt_extracted, slid_bdo_extracted, slid_bet_extracted],
"artifactsPath": "/dig/html/artifacts/art_is0.html",
"descriptionPath": None
}
info_fg_extracted = {
"name": "Sq. 240R60",
"miniMapIcon": "/dig/html/images/l/l240r60.gif",
"info": {
"Dimensions": {
"Length": "10.0 ft",
"Width": "10.0 ft",
"Depth": "0.6 ft"
},
"Type": "Excavation Unit",
"Volume": "61.06 ft<sup>3</sup>",
"Area": "100.00 ft<sup>2</sup>"
},
"images": [slid_ada_extracted, slid_bde_extracted],
"artifactsPath": "/dig/html/artifacts/art_fg0.html",
"descriptionPath": None
}
zoom_is_extracted = [{
"name": "Feature 9",
"path": "/dig/html/excavations/exc_cl.html"
}, {
"name": "Sq. 240R60",
"path": "/dig/html/excavations/exc_fg.html"
}, {
"name": "Sq. 240R70",
"path": "/dig/html/excavations/exc_fh.html"
}, {
"name": "Sq. 250R60",
"path": "/dig/html/excavations/exc_ft.html"
}, {
"name": "Sq. 250R70",
"path": "/dig/html/excavations/exc_fu.html"
}]
zoom_fg_extracted = [{
"name": "Feature 9",
"path": "/dig/html/excavations/exc_cl.html"
}, {
"name": "Structure 1",
"path": "/dig/html/excavations/exc_is.html"
}]
ctrl_is_fully_extracted = {
"name": "Structure 1",
"miniMapIcon": "/dig/html/images/l/l240r60.gif",
"info": {
"Dimensions": {
"Length": "13.4 ft",
"Width": "11.3 ft",
"Depth": "Unknown ft"
},
"Type": "Structure",
"Volume": "Unknown ft<sup>3</sup>",
"Area": "115.88 ft<sup>2</sup>"
},
"images": [slid_azt_extracted, slid_bdo_extracted, slid_bet_extracted],
"artifactsPath": "/dig/html/artifacts/art_is0.html",
"descriptionPath": None,
"relatedElements": zoom_is_extracted
}
ctrl_fg_fully_extracted = {
"name": "Sq. 240R60",
"miniMapIcon": "/dig/html/images/l/l240r60.gif",
"info": {
"Dimensions": {
"Length": "10.0 ft",
"Width": "10.0 ft",
"Depth": "0.6 ft"
},
"Type": "Excavation Unit",
"Volume": "61.06 ft<sup>3</sup>",
"Area": "100.00 ft<sup>2</sup>"
},
"images": [slid_ada_extracted, slid_bde_extracted],
"artifactsPath": "/dig/html/artifacts/art_fg0.html",
"descriptionPath": None,
"relatedElements": zoom_fg_extracted
}
# fg, then is according to how mock_iterdir is defined later on
exc_dir_fully_extracted = [{
"name": "Sq. 240R60",
"miniMapIcon": "/dig/html/images/l/l240r60.gif",
"info": {
"Dimensions": {
"Length": "10.0 ft",
"Width": "10.0 ft",
"Depth": "0.6 ft"
},
"Type": "Excavation Unit",
"Volume": "61.06 ft<sup>3</sup>",
"Area": "100.00 ft<sup>2</sup>"
},
"images": [slid_ada_extracted, slid_bde_extracted],
"artifactsPath": "/dig/html/artifacts/art_fg0.html",
"descriptionPath": None,
"relatedElements": zoom_fg_extracted,
"path": "/dig/html/excavations/exc_fg.html"
}, {
"name": "Structure 1",
"miniMapIcon": "/dig/html/images/l/l240r60.gif",
"info": {
"Dimensions": {
"Length": "13.4 ft",
"Width": "11.3 ft",
"Depth": "Unknown ft"
},
"Type": "Structure",
"Volume": "Unknown ft<sup>3</sup>",
"Area": "115.88 ft<sup>2</sup>"
},
"images": [slid_azt_extracted, slid_bdo_extracted, slid_bet_extracted],
"artifactsPath": "/dig/html/artifacts/art_is0.html",
"descriptionPath": None,
"relatedElements": zoom_is_extracted,
"path": "/dig/html/excavations/exc_is.html"
}]
def mock_extract_image_page(image_html_str, extra1, extra2, extra3):
if image_html_str == slid_ada_html_str:
return slid_ada_extracted
elif image_html_str == slid_azt_html_str:
return slid_azt_extracted
elif image_html_str == slid_bde_html_str:
return slid_bde_extracted
elif image_html_str == slid_bdo_html_str:
return slid_bdo_extracted
elif image_html_str == slid_bet_html_str:
return slid_bet_extracted
raise Exception("did not find details for this particular img string")
def mock_readfile(filename, parent_dir_path_obj):
resolved_path_obj = pathlib.Path(os.path.normpath(parent_dir_path_obj / filename))
filename = resolved_path_obj.name
parent_dir_str = resolved_path_obj.parent.as_posix()
if parent_dir_str == "C:/dig/html/excavations":
# Structure 1
if filename == "slid_azt.html":
return slid_azt_html_str
elif filename == "slid_bdo.html":
return slid_bdo_html_str
elif filename == "slid_bet.html":
return slid_bet_html_str
elif filename == "zoom_is.html":
return zoom_is_html_str
elif filename == "info_is.html":
return info_is_html_str
elif filename == "ctrl_is.html":
return ctrl_is_html_str
elif filename == "exc_is.html":
return exc_is_html_str
# Sq. 240R60, /dig/html/excavations/exc_fg.html
elif filename == "exc_fg.html":
return exc_fg_html_str
elif filename == "ctrl_fg.html":
return ctrl_fg_html_str
elif filename == "info_fg.html":
return info_fg_html_str
elif filename == "zoom_fg.html":
return zoom_fg_html_str
elif filename == "slid_ada.html":
return slid_ada_html_str
elif filename == "slid_bde.html":
return slid_bde_html_str
raise Exception("did not find file in mock_readfile")
@pytest.mark.parametrize("zoom_html_str,expected_result", [
(zoom_is_html_str, zoom_is_extracted),
(zoom_fg_html_str, zoom_fg_extracted),
("""
<html><body><big>Zoom To:</big><p>
<a href="exc_gw.html" target="_top">Sq. 270R90</a><br>
<a href="exc_gn.html" target="_top">Sq. 270R100</a><br>
</body></html>
""", [{
"name": "Sq. 270R90",
"path": "/dig/html/excavations/exc_gw.html"
}, {
"name": "Sq. 270R100",
"path": "/dig/html/excavations/exc_gn.html"
}])
])
def test_extract_zoom_to(zoom_html_str, expected_result):
assert exc_det.extract_zoom_to(zoom_html_str) == expected_result
@mock.patch("src.extract_old_site.modules.excavation_details_page.extract_image_page")
@pytest.mark.parametrize("info_html_str,expected_result", [
(info_fg_html_str, info_fg_extracted),
(info_is_html_str, info_is_extracted)
])
def test_extract_info_page(mock_ext_i_p, info_html_str, expected_result):
mock_ext_i_p.side_effect = mock_extract_image_page
assert exc_det.extract_info_page(
info_html_str, "/dig/html/excavations", "C:/", mock_readfile
) == expected_result
@mock.patch("src.extract_old_site.modules.excavation_details_page.extract_image_page")
@pytest.mark.parametrize("ctrl_html_str,expected_result", [
(ctrl_fg_html_str, ctrl_fg_fully_extracted),
(ctrl_is_html_str, ctrl_is_fully_extracted)
])
def test_get_ctrl_page_contents(mock_ext_i_p, ctrl_html_str, expected_result):
mock_ext_i_p.side_effect = mock_extract_image_page
assert exc_det.get_ctrl_page_contents(
ctrl_html_str, "/dig/html/excavations", "C:/", mock_readfile
) == expected_result
@mock.patch("src.extract_old_site.modules.excavation_details_page.extract_image_page")
@pytest.mark.parametrize("exc_html_str,expected_result", [
(exc_fg_html_str, ctrl_fg_fully_extracted),
(exc_is_html_str, ctrl_is_fully_extracted)
])
def test_get_exc_page_contents(mock_ext_i_p, exc_html_str, expected_result):
mock_ext_i_p.side_effect = mock_extract_image_page
assert exc_det.get_exc_page_contents(
exc_html_str, "/dig/html/excavations", "C:/", mock_readfile
) == expected_result
@mock.patch("src.extract_old_site.modules.excavation_details_page.extract_image_page")
def test_extract_all_exc_pages(mock_ext_i_p):
mock_ext_i_p.side_effect = mock_extract_image_page
with mock.patch.object(pathlib.Path, "iterdir") as mock_iterdir:
filenames_list = [
"exc_fg.html", "exc_is.html", "info_fg.html", "info_is.html",
"slid_ada.html", "slid_azt.html", "slid_bde.html", "slid_bdo.html", "slid_bet.html",
"zoom_fg.html", "zoom_is.html",
]
iterdir_path_objs = [(pathlib.Path("C:/dig/html/excavations") / filename)
for filename in filenames_list]
mock_iterdir.return_value = iterdir_path_objs
assert exc_det.extract_all_exc_pages("C:/", mock_readfile) == exc_dir_fully_extracted
|
python
|
import numpy as np
import sys
import os
from keras.models import load_model
sys.path.append("../utilities")
import constants
from data import get_train_test
from metrics import plot_n_roc_sic
datasets_c = ['h_qq_rot_charged', 'h_gg_rot_charged', 'cp_qq_rot_charged', 'qx_qg_rot_charged', 's8_gg_rot_charged', 'zp_qq_rot_charged']
datasets_s = ['h_qq', 'h_gg', 'cp_qq', 'qx_qg', 's8_gg', 'zp_qq']
def comp_all(i, datasets = datasets_s, n = 150000):
name = 'all_' + datasets[i] + '_comps'
X_tests = []
y_yests = []
models = []
model_types = []
labels = []
sig = datasets[i]
for j in range(6):
if j == i:
continue
bg = datasets[j]
constants.SIG_H5 = os.path.join(constants.DATA_DIR, sig + '.h5')
constants.BG_H5 = os.path.join(constants.DATA_DIR, bg + '.h5')
X_train, X_test, y_train, y_test, \
_, _, sig_metadata, \
bg_metadata, _ = get_train_test(n=n)
if os.path.isfile('../best_model/' + sig + '_vs_' + bg + '_model'):
model_name = sig + '_vs_' + bg
else:
model_name = bg + '_vs_' + sig
model = load_model('../best_model/' + model_name + '_model')
X_tests.append(X_test)
y_yests.append(y_test)
models.append(model)
model_types.append(True)
labels.append(model_name)
plot_n_roc_sic(name, 'final_curves/sic_'+name, X_tests, y_yests, models, model_types, labels, True, fontfac=0.5)
plot_n_roc_sic(name, 'final_curves/roc_'+name, X_tests, y_yests, models, model_types, labels, False, fontfac=0.5)
if __name__ == '__main__':
for i in range(len(datasets_s)):
comp_all(i)
|
python
|
DEFAULT_REGID = u'strongswan.org'
DEFAULT_ENTITY_NAME = u'strongSwan Project'
DEFAULT_HASH_ALGORITHM = u'sha256'
|
python
|
import sys
import os
import cv2 # it is necessary to use cv2 library
import numpy as np
def main( background, input_filename, output_filename ):
# Read the input image
bak = cv2.imread(background)
img = cv2.imread(input_filename)
dif = img - bak
dif = np.sqrt( np.sum( dif * dif, axis=2 ) )
msk = ( dif > 10 ).astype(np.uint8)*255
kernel = np.ones((3,3),np.uint8)
# opening
msk = cv2.erode(msk, kernel,iterations = 1)
msk = cv2.dilate(msk, kernel,iterations = 1)
# closing
msk = cv2.dilate(msk, kernel,iterations = 2)
msk = cv2.erode(msk, kernel,iterations = 2)
cv2.imwrite( output_filename, msk )
if( __name__ == '__main__' ):
if( len(sys.argv) >= 3 ):
main( sys.argv[1], sys.argv[2], sys.argv[3] )
else:
print( 'usage: python '+sys.argv[0]+' background input_filenname output_filename' )
|
python
|
from pycylon import Table
from pycylon import CylonContext
import numpy as np
ctx: CylonContext = CylonContext(config=None, distributed=False)
data_dictionary = {'col-1': [1, 2, 3, 4], 'col-2': [5, 6, 7, 8], 'col-3': [9, 10, 11, 12]}
tb: Table = Table.from_pydict(ctx, data_dictionary)
print("Convert to PyArrow Table")
print(tb.to_arrow())
print("Convert to Pandas")
print(tb.to_pandas())
print("Convert to Dictionar")
print(tb.to_pydict())
print("Convert to Numpy")
npy: np.ndarray = tb.to_numpy(order='F', zero_copy_only=True)
print(npy)
print(npy.flags)
npy: np.ndarray = tb.to_numpy(order='C', zero_copy_only=True)
print(npy)
print(npy.flags)
|
python
|
from django.forms import Form
def set_form_widgets_attrs(form: Form, attrs: dict):
"""Applies a given HTML attributes to each field widget of a given form.
Example:
set_form_widgets_attrs(my_form, {'class': 'clickable'})
"""
for _, field in form.fields.items():
attrs_ = dict(attrs)
for name, val in attrs.items():
if hasattr(val, '__call__'):
attrs_[name] = val(field)
field.widget.attrs = field.widget.build_attrs(attrs_)
|
python
|
# add_request_point.py
from arcgis.features import Feature, FeatureSet
from arcgis.geometry import Point
from copy import deepcopy
def add_request_point(gis, item_id, address_json, ip_address, user_agent, request_time):
# get feature layer to edit
layer_item = gis.content.get(item_id)
feature_layer = layer_item.layers[0]
# compose a Point object
pt = Point({'x':address_json['longitude'],
'y':address_json['latitude'],
'spatialReference':{'wkid':4326}
})
# compose a Feature object
request_attributes = {'ip_address':ip_address,
'user_agent':user_agent,
'request_address': f"{address_json['city']}, {address_json['region_name']}, {address_json['country_name']}, {address_json['zip']}",
'request_time2':request_time.timestamp()*1000
}
ft = Feature(geometry=pt, attributes=request_attributes)
# Edit the feature layer
edit_result = feature_layer.edit_features(adds=[ft])
return edit_result
|
python
|
from .utils import send_message
__version__ = '1.0.1'
__all__ = ['send_message']
|
python
|
# -*- coding: utf-8 -*-
from django.dispatch import Signal
validate_custom_order_field = Signal(
providing_args=[
'value',
]
)
order_paid = Signal(
providing_args=[
'invoice',
]
)
|
python
|
"""
utility functions
"""
import pandas as pd
import numpy as np
TEST_DF = pd.DataFrame([1,2,3,4,5,6])
def five_mult(x):
"""multiplying a number by 5 function"""
return 5 * x
def tri_recursion(k):
"""recursion of a value"""
if(k>0):
result = k + tri_recursion(k-1)
# print(result)
else:
result = 0
return result
def sum_two_numbers(a,b):
"""sum two numbers"""
return a + b
|
python
|
# -*- coding: utf-8 -*-
"""
Created at 2019-10-30
@author: dongwan.kim
Converting 'https://nlp.seas.harvard.edu/2018/04/03/attention.html'
which is pytorch implementation
to Keras implementation.
# ToDo: copy layer test with simple multi hidden layer regression.
"""
import copy
import numpy as np
import math
import matplotlib.pyplot as plt
from functools import partial
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Dense, Flatten, Conv1D, Dropout, Embedding, Input, Lambda, Layer, Softmax
)
from tensorflow.keras import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import InputSpec
from transformer.test_config import *
class PositionalEncodingK(Layer):
"""
>>> # test implementation
>>> pe = np.zeros([max_words_in_sentence, d_model]); print(pe, pe.shape)
>>> position = np.expand_dims(np.array(range(max_words_in_sentence)), 1); print(position, position.shape)
>>> div_term = np.exp(np.arange(start=0.0, stop=d_model, step=2) * -(math.log(10000.0) / d_model)); print(div_term, div_term.shape)
>>> pe[:, 0::2] = np.sin(position * div_term)
>>> pe[:, 1::2] = np.cos(position * div_term)
>>> pe = np.expand_dims(pe, 0); print(pe, pe.shape)
>>> # plotting
>>> d_model = 12
>>> num_sentences = 1
>>> num_tokens_in_sentence = 100
>>> plt.figure(figsize=(15, 5))
>>> pe = PositionalEncodingK(d_model=d_model, dropout_rate=0)
>>> y = pe(K.zeros((num_sentences, num_tokens_in_sentence, d_model)))
>>> plt.plot(np.arange(num_tokens_in_sentence), K.eval(y)[0, :, 4:8])
>>> plt.legend(["dim %d" % p for p in [4, 5, 6, 7]])
>>> plt.show()
"""
def __init__(self, d_model, dropout_rate, max_len=5000, **kwargs):
"""
Parameters
----------
max_len: max number of tokens in sentence.
d_model: embedding dim
kwargs
"""
super(PositionalEncodingK, self).__init__(**kwargs)
self.dropout = Dropout(rate=dropout_rate)
pe = np.zeros([max_len, d_model])
position = np.expand_dims(np.array(range(max_len)), 1)
div_term = np.exp(
np.arange(start=0.0, stop=d_model, step=2) * -(math.log(10000.0) / d_model)
)
pe[:, 0::2] = np.sin(position * div_term)
pe[:, 1::2] = np.cos(position * div_term)
self.pe = np.expand_dims(pe, 0)
def call(self, x):
# x = x + K.constant(self.pe[:, :x.shape[1].value])
x = x + K.constant(self.pe[:, :x.shape[1]])
return self.dropout(x)
def compute_output_shape(self, input_shape):
return input_shape
class EmbeddingsK(Layer):
"""
>>> x = K.constant([[0, 6, 1, 1, 1]]); print(x, x.shape) # one sentence with 5 token
>>> y = EmbeddingsK(d_model=12, vocab=7)(x) # embedding on 12 dim for 7 tokens total.
>>> out = K.eval(y)
>>> print(out, out.shape)
>>> np.random.seed(0)
>>> emb_weight = np.random.rand(7, 12) # total 7 tokens and hidden size is 12
>>> x = K.constant([list(range(7))]); print(x, x.shape) # one sentence with 5 token
>>> y = EmbeddingsK(d_model=12, vocab=7, weight=emb_weight)(x) # embedding on 12 dim for 7 tokens total.
>>> test_emb_keras = K.eval(y)
>>> print(test_emb_keras, test_emb_keras.shape)
>>> # np.equal(test_emb_pytorch, test_emb_keras)
>>> # np.array_equal(test_emb_pytorch, test_emb_keras)
"""
def __init__(self, d_model, vocab, weight=None):
"""
Parameters
----------
d_model : 512 or 1024 or ..
vocab : size of token dict
"""
super(EmbeddingsK, self).__init__()
self.d_model = d_model
if weight is None:
self.lut = Embedding(input_dim=vocab, output_dim=d_model)
elif isinstance(weight, np.ndarray):
self.lut = Embedding(input_dim=vocab, output_dim=d_model, weights=[weight],
trainable=False)
else:
raise ValueError('Invalid weight')
def call(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class LayerNormK(Layer):
"""
btw in TF2.0, LayerNormalization functionality is provided.
>>> ln = LayerNormK(features=12)
>>> x = K.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]); print(x, x.shape) # one token with d_model=12
>>> y = K.eval(ln(x))
>>>
"""
def __init__(self, features, eps=1e-6):
super(LayerNormK, self).__init__()
self.features = features # d_model
self.eps = eps
self.a_2 = None
self.b_2 = None
def build(self, _):
"""
weights are shared for all layer normalization.
according to description of add_weight function
'Adds a new variable to the layer, or gets an existing one; returns it'
Parameters
----------
_
Returns
-------
"""
self.a_2 = self.add_weight(
name='layer_norm_scale',
shape=(self.features,),
initializer='ones',
trainable=True
)
self.b_2 = self.add_weight(
name='layer_norm_bias',
shape=(self.features,),
initializer='zeros',
trainable=True
)
return super(LayerNormK, self).build(self.features)
def call(self, x):
mean = K.mean(x=x, axis=-1, keepdims=True)
std = K.std(x=x, axis=-1, keepdims=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class GeneratorK(Layer):
"""
linear + softmax for final output layer.
>>> ge = GeneratorK(d_model=12, vocab=7)
>>> x = K.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]); print(x, x.shape) # output of final layer
>>> y = ge(x)
>>> out = K.eval(y)
>>> print(out, out.shape, K.eval(K.argmax(out)))
"""
def __init__(self, d_model, vocab):
"""
Parameters
----------
d_model: hidden size
vocab: size of token dict
"""
super(GeneratorK, self).__init__()
self.proj = Dense(input_shape=(d_model,), units=vocab)
def call(self, x):
"""
softmax followed by log is not stable,
need to use log_softmax after upgrade to tf 2.0
"""
return K.log(x=K.softmax(x, axis=-1))
def subsequent_mask_k(size):
"""
Mask out subsequent positions.
>>> subsequent_mask(3)
tensor([
[
[1, 0, 0],
[1, 1, 0],
[1, 1, 1]
]], dtype=torch.uint8) # [1, 3, 3]
This function gives mask for a sentence with 'size' words.
"""
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return K.equal(K.constant(subsequent_mask), 0)
class BatchK:
def __init__(self, src, trg=None, pad=0):
self.src = src
self.src_mask = K.expand_dims(K.not_equal(src, pad), axis=-2)
if trg is not None:
self.trg = trg[:, :-1] # without last token of sentence
self.trg_y = trg[:, 1:] # without first token of sentence
self.trg_mask = self.make_std_mask(self.trg, pad)
self.ntokens = K.sum(K.cast(K.not_equal(self.trg_y, pad), dtype='uint8'))
@staticmethod
def make_std_mask(trg, pad):
trg_mask = K.expand_dims(K.not_equal(trg, pad), axis=-2)
trg_mask = trg_mask & subsequent_mask_k(size=trg.shape.as_list()[-1])
return trg_mask
class EncoderLayerK(Layer):
"""
"""
def __init__(self):
super(EncoderLayerK, self).__init__()
# ToDo: implement
def clones_k(module, N):
"""
>>> d = Dense(input_shape=(d_model,), units=d_model)
>>> d_list = clones_k(d, 4)
Parameters
----------
module: layer to be copied
N: number of copy
Returns
-------
"""
# return [copy.deepcopy(module) for _ in range(N)] # probability not working
# reference: https://keras.io/layers/about-keras-layers/
config = module.get_config()
return [type(module).from_config(config) for _ in range(N)]
def attention_k(q_w_q, k_w_k, v_w_v, mask=None, dropout=None):
"""
Parameters
----------
q_w_q: (batch size, num heads, num tokens in sentence, d_model / d_k), (5, 2, 4, 6)
k_w_k
v_w_v
mask: (5, 1, 1, 4)
dropout: dropout layer, not dropout rate
Returns
-------
"""
def masked_fill(x, mask, target_mask_val, filled_value=-1e9):
return x * (x != target_mask_val) + (mask == target_mask_val) * filled_value
d_k = q_w_q.shape.as_list()[-1]
scores = K.batch_dot(q_w_q, k_w_k, axes=[3, 3]) / math.sqrt(d_k) # (5, 2, 4, 4)
if mask is not None:
scores = masked_fill(scores, mask, 0, -1e9)
p_attn = K.softmax(scores)
if dropout is not None:
p_attn = dropout(p_attn)
return K.batch_dot(p_attn, v_w_v, axes=[3, 2]), p_attn
class MultiHeadedAttentionK(Layer):
"""
"""
def __init__(self, h, d_model, dropout=0.1, linears=None):
"""
Parameters
----------
h: number of heads
d_model:
"""
super(MultiHeadedAttentionK, self).__init__()
assert d_model % h == 0
self.d_k = d_model // h # d_k = d_v = d_model/h
self.h = h # number of heads
if linears:
assert len(linears) == 4
self.linears = linears
else:
self.linears = clones_k(Dense(input_shape=(d_model,), units=d_model), 4)
self.attn = None
self.dropout = Dropout(rate=dropout)
def call(self, query_key_value_mask):
query, key, value, mask = query_key_value_mask
if mask is not None:
mask = K.expand_dims(mask, 1) # (5, 1, 1, 4)
nbatches = query.shape.as_list()[0]
q_w_q, k_w_k, v_w_v = [
K.permute_dimensions(
x=K.reshape(
x=l(x),
shape=(nbatches, -1, self.h, self.d_k)
),
pattern=(0, 2, 1, 3))
for l, x in zip(self.linears, (query, key, value))
]
x, self.attn = attention_k(q_w_q, k_w_k, v_w_v, mask=mask, dropout=self.dropout)
x = K.reshape(K.permute_dimensions(x, pattern=(0, 2, 1, 3)), shape=(batch_size, -1, d_model))
return self.linears[-1](x)
class SublayerConnectionK(Layer):
# def __init__(self, size, sublayer, dropout):
def __init__(self, size, dropout):
"""
Parameters
----------
size: features = d_model
dropout: dropout rate
"""
super(SublayerConnectionK, self).__init__()
self.norm = LayerNormK(features=size)
self.dropout = Dropout(rate=dropout)
# self.sublayer = sublayer
def call(self, x, sublayer):
return x + self.dropout(sublayer(self.norm(x)))
class PositionwiseFeedForwardK(Layer):
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForwardK, self).__init__()
self.w_1 = Dense(input_shape=(d_model,), units=d_ff)
self.w_2 = Dense(input_shape=(d_ff,), units=d_model)
self.dropout = Dropout(rate=dropout)
def call(self, x):
return self.w_2(self.dropout(K.relu(self.w_1(x))))
class Transformer(Layer):
"""
>>> model = Transformer(
d_model=512,
src_vocab=100,
trg_vocab=100,
dropout_rate=0.1,
num_coder_blocks=2,
num_heads=4,
d_ff=1024
)
>>> model.build(input_shape=(None, 12))
>>> model.compile(
optimizer=Adam(
)
"""
def __init__(self, d_model, src_vocab, trg_vocab, dropout_rate, num_coder_blocks, num_heads, d_ff):
super().__init__()
self.d_model = d_model
self.src_vocab = src_vocab
self.trg_vocab = trg_vocab
self.dropout_rate = dropout_rate
self.num_coder_blocks = num_coder_blocks
self.num_heads = num_heads
self.d_ff = d_ff
# noinspection PyAttributeOutsideInit
def build(self, input_shape):
print(input_shape)
# assert isinstance(input_shape, list) and len(input_shape) ==
assert len(input_shape) == 4
src_shape, trg_shape, src_mask_shape, trg_mask_shape = input_shape
self.input_spec = [
InputSpec(shape=(src_shape, None)),
InputSpec(shape=(trg_shape, None)),
InputSpec(shape=(src_mask_shape, None)),
InputSpec(shape=(trg_mask_shape, None))
]
self.src_emb_layer = EmbeddingsK(d_model=self.d_model, vocab=self.src_vocab)
self.src_pe = PositionalEncodingK(d_model=self.d_model, dropout_rate=self.dropout_rate)
self.encoder_mha_list = [
MultiHeadedAttentionK(h=self.num_heads, d_model=self.d_model, dropout=self.dropout_rate)
for _ in range(self.num_coder_blocks)
]
self.encoder_pff_list = [
PositionwiseFeedForwardK(d_model=self.d_model, d_ff=self.d_ff)
for _ in range(self.num_coder_blocks)
]
self.encoder_slc_mha_list = [
SublayerConnectionK(size=self.d_model, sublayer=encoder_mha, dropout=self.dropout_rate)
for encoder_mha in self.encoder_mha_list
]
self.encoder_slc_pff_list = [
SublayerConnectionK(size=self.d_model, sublayer=encoder_pff, dropout=self.dropout_rate)
for encoder_pff in self.encoder_pff_list
]
# self.encoder_slc_list = [
# SublayerConnectionK(size=self.d_model, sublayer=, dropout=self.dropout_rate)
# for _ in range(self.num_coder_blocks * 2)
# ]
self.encoder_layer_norm = LayerNormK(features=d_model)
self.trg_emb_layer = EmbeddingsK(d_model=self.d_model, vocab=self.trg_vocab)
self.trg_pe = PositionalEncodingK(d_model=self.d_model, dropout_rate=self.dropout_rate)
self.decoder_mha_list = [
MultiHeadedAttentionK(h=self.num_heads, d_model=self.d_model, dropout=self.dropout_rate)
for _ in range(self.num_coder_blocks * 2)
]
self.decoder_pff_list = [
PositionwiseFeedForwardK(d_model=self.d_model, d_ff=self.d_ff)
for _ in range(self.num_coder_blocks)
]
self.decoder_slc_mha_list = [
SublayerConnectionK(size=self.d_model, sublayer=decoder_mha, dropout=self.dropout_rate)
for decoder_mha in self.decoder_mha_list
]
self.decoder_slc_pff_list = [
SublayerConnectionK(size=self.d_model, sublayer=decoder_pff, dropout=self.dropout_rate)
for decoder_pff in self.decoder_pff_list
]
self.decoder_layer_norm = LayerNormK(features=d_model)
def call(self, src_trg_smask_tmask):
src, trg, src_mask, trg_mask = src_trg_smask_tmask
input_encoder = self.src_pe(self.src_emb_layer(src))
# encoder
for i in range(self.num_coder_blocks):
# multi headed attention and 1st sublayer connection
self_attn = lambda x: self.encoder_mha_list[i](x, x, x, src_mask)
out_slc1 = self.encoder_slc_mha_list[i](x=input_encoder, sublayer=self_attn)
# position wise feed forward and 2nd sublayer connection
input_encoder = self.encoder_slc_pff_list[i](x=out_slc1, sublayer=self.encoder_pff_list[i])
output_encoder = self.encoder_layer_norm(input_encoder)
# input to decoder (embedding and positional encoding)
input_decoder = self.trg_pe(self.trg_emb_layer(trg))
# decoder
for j in range(self.num_coder_blocks):
# sublayer 1 of decoder
self_attn1 = lambda x: self.decoder_mha_list[j](x, x, x, trg_mask)
out_slc1 = self.decoder_slc_mha_list[j](x=input_decoder, sublayer=self_attn1)
# sublayer 2 of decoder
src_attn2 = lambda x: self.decoder_mha_list[j * 2](x, output_encoder, output_encoder, src_mask)
out_slc2 = self.decoder_slc_mha_list[j * 2](x=out_slc1, sublayer=src_attn2)
# position-wise feed-forward and 2nd sublayer connection
input_encoder = self.decoder_slc_pff_list[j](x=out_slc2, sublayer=self.decoder_pff_list[j])
output_decoder = self.decoder_layer_norm(input_encoder)
return output_decoder
class TransformerSmall(Layer):
"""
>>> model = Sequential([TransformerSmall(
d_model=512,
src_vocab=100,
dropout_rate=0.1,
num_coder_blocks=2,
num_heads=4,
d_ff=1024
)])
>>> dummy_batch = K.constant(np.random.randint(low=0, high=max_words_in_sentence, size=(batch_size, max_words_in_sentence)))
>>> dummy_batch
>>> dummy_src_mask = subsequent_mask_k(max_words_in_sentence)
>>> dummy_src_mask
>>> model([dummy_batch, dummy_src_mask])
>>> model([12, 12])
>>> model.build([12, 12])
>>> model.compile(
optimizer=Adam(lr=0.002)
)
"""
def __init__(self, d_model, src_vocab, dropout_rate, num_coder_blocks, num_heads, d_ff):
super().__init__()
self.d_model = d_model
self.src_vocab = src_vocab
self.dropout_rate = dropout_rate
self.num_coder_blocks = num_coder_blocks
self.num_heads = num_heads
self.d_ff = d_ff
# noinspection PyAttributeOutsideInit
def build(self, input_shape):
print('input_shape:', input_shape)
# assert isinstance(input_shape, list) and len(input_shape) ==
assert len(input_shape) == 2
src_shape, src_mask_shape = input_shape
self.input_spec = [
InputSpec(shape=src_shape),
InputSpec(shape=src_mask_shape)
]
self.src_emb_layer = EmbeddingsK(d_model=self.d_model, vocab=self.src_vocab)
self.src_pe = PositionalEncodingK(d_model=self.d_model, dropout_rate=self.dropout_rate)
self.encoder_mha_list = [
MultiHeadedAttentionK(h=self.num_heads, d_model=self.d_model, dropout=self.dropout_rate)
for _ in range(self.num_coder_blocks)
]
self.encoder_pff_list = [
PositionwiseFeedForwardK(d_model=self.d_model, d_ff=self.d_ff)
for _ in range(self.num_coder_blocks)
]
self.encoder_slc_mha_list = [
SublayerConnectionK(size=self.d_model, dropout=self.dropout_rate)
for _ in self.encoder_mha_list
]
self.encoder_slc_pff_list = [
SublayerConnectionK(size=self.d_model, dropout=self.dropout_rate)
for _ in self.encoder_pff_list
]
self.encoder_layer_norm = LayerNormK(features=d_model)
super().build(input_shape)
def call(self, src_smask):
src, src_mask = src_smask
input_encoder = self.src_pe(self.src_emb_layer(src))
# encoder
for i in range(self.num_coder_blocks):
# multi headed attention and 1st sublayer connection
self_attn = lambda x: self.encoder_mha_list[i]([x, x, x, src_mask])
out_slc1 = self.encoder_slc_mha_list[i](input_encoder, sublayer=self_attn)
# position wise feed forward and 2nd sublayer connection
input_encoder = self.encoder_slc_pff_list[i](x=out_slc1, sublayer=self.encoder_pff_list[i])
output_encoder = self.encoder_layer_norm(input_encoder)
return output_encoder
# if __name__ == '__test__':
# max_words_in_sentence = 4 # of words in each sentence
# batch_size = 5 # of sentences
# size_dict = 7 # size of word dictionary
# d_model = 12
# hidden_size_pff = 11
# num_head = 2
# dropout_rate = 0.1
# num_encoder_layer = 2
# learning_rate = 0.001
#
# x = Input(shape=(max_words_in_sentence,))
# src = K.constant([[0, 3, 0, 2],
# [1, 0, 3, 2],
# [0, 0, 0, 1],
# [1, 0, 0, 1],
# [3, 2, 2, 1]])
# print(src, src.shape)
# src_mask = K.constant([[[1, 1, 1, 1]],
# [[1, 1, 1, 1]],
# [[1, 1, 1, 1]],
# [[1, 1, 1, 1]],
# [[1, 1, 1, 1]]]);
# print(src_mask, src_mask.shape)
# x = EmbeddingsK(d_model=d_model, vocab=size_dict)(src) # embedding on 12 dim for 7 tokens total.
# x = PositionalEncodingK(d_model=d_model, dropout_rate=0.)(x)
#
|
python
|
#!/usr/bin/env python
import argparse
import os
parser = argparse.ArgumentParser(description='splits query name output by HAP.py and builds table required for ABCENTH')
parser.add_argument('--table',default = None, help = 'table output by HAP.py')
parser.add_argument('--hmm_dir',default = None, help = "director with all cluster hmms")
args = parser.parse_args()
if args.table:
for line in open(args.table):
fields = line.replace('\n','').replace('\r','').split('\t')
cluster = fields[0].split('exon')[0]
exon_number = fields[0].split('exon')[1].split('of')[0]
number_of_exons = fields[0].split('of')[1].split('phases')[0]
start_phase = fields[0].split('phases')[1].split('and')[0]
end_phase = fields[0].split('and')[1].split('.')[0]
aa_len = fields[12]
print('\t'.join([cluster] + fields[1:12] + [start_phase,end_phase,aa_len,exon_number,number_of_exons]))
elif args.hmm_dir:
for hmm_file in os.listdir(args.hmm_dir):
if hmm_file[-4:] == ".hmm" and not "fullLenForHMM" in hmm_file:
cluster = hmm_file.split('exon')[0]
exon_number = hmm_file.split('exon')[1].split('of')[0]
number_of_exons = hmm_file.split('of')[1].split('phases')[0]
start_phase = hmm_file.split('phases')[1].split('and')[0]
end_phase = hmm_file.split('and')[1].split('.')[0]
aa_len = open(args.hmm_dir + "/" + hmm_file).read().split('\n')[2].split()[1].replace('\r','')
print('\t'.join([cluster,exon_number,number_of_exons,start_phase,end_phase,aa_len,os.path.abspath(args.hmm_dir) + '/' + hmm_file]))
|
python
|
#!/usr/bin/env python3
"""Three philosophers thinking and eating dumplings - deadlock happens"""
import time
from threading import Thread, Lock
dumplings = 20
class Philosopher(Thread):
def __init__(self, name: str, left_chopstick: Lock, right_chopstick: Lock) -> None:
super().__init__()
self.name = name
self.left_chopstick = left_chopstick
self.right_chopstick = right_chopstick
def run(self) -> None:
# using globally shared variable
global dumplings
while dumplings > 0:
self.left_chopstick.acquire()
print(f"{id(self.left_chopstick)} chopstick grabbed by {self.name}")
self.right_chopstick.acquire()
print(f"{id(self.right_chopstick)} chopstick grabbed by {self.name}")
if dumplings > 0:
dumplings -= 1
print(f"{self.name} eat a dumpling. Dumplings left: {dumplings}")
self.right_chopstick.release()
print(f"{id(self.right_chopstick)} chopstick released by {self.name}")
self.left_chopstick.release()
print(f"{id(self.left_chopstick)} chopstick released by {self.name}")
time.sleep(0.00001)
if __name__ == "__main__":
chopstick_a = Lock()
chopstick_b = Lock()
philosopher_1 = Philosopher("Philosopher #1", chopstick_a, chopstick_b)
philosopher_2 = Philosopher("Philosopher #2", chopstick_b, chopstick_a)
philosopher_1.start()
philosopher_2.start()
|
python
|
#---------------------------------------------------------------------------
# Copyright 2013 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
import sys
import os
import subprocess
import argparse
import re
# add the current to sys.path
SCRIPTS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(SCRIPTS_DIR)
from string import Template
from LoggerManager import getTempLogFile, logger, initConsoleLogging
from PatchInfoParser import PatchInfo, installNameToDirName
from GitUtils import addChangeSet, commitChange, getGitRepoRevisionHash
"""
constants
"""
DEFAULT_OUTPUT_LOG_FILE_NAME = "MCompReposCommitter.log"
PATCH_SRC_WEB_LINK = "http://code.osehra.org/VistA.git/${type}/${hb}/${patch_dir}"
"""
class to commit all the changes under the Packages directory
in VistA-FOIA repository after patch(s) are applied and extracted.
"""
class MCompReposCommitter(object):
def __init__(self, vistAMRepo):
assert os.path.exists(vistAMRepo)
self._vistAMRepoDir = os.path.abspath(vistAMRepo)
self._packagesDir = os.path.join(self._vistAMRepoDir, 'Packages')
def commit(self, commitMsgFile):
self.__addChangeSet__()
self.__commit__(commitMsgFile)
def __addChangeSet__(self):
logger.info("Add change set")
#validChangeFileList = ["\*.zwr", "\*.m"]
addChangeSet(self._packagesDir)
def __commit__(self, commitMsgFile):
logger.info("Commit the change")
commitChange(commitMsgFile, self._packagesDir)
def generateCommitMsgFileByPatchInfo(patchInfo, commitMsgFile,
branch="HEAD", reposDir=None):
reposHash = getGitRepoRevisionHash(branch, reposDir)[:8]
with open(commitMsgFile, 'w') as output:
topicLine = "Install: %s" % patchInfo.installName
if patchInfo.multiBuildsList:
topicLine = "Install: %s" % (", ".join(patchInfo.multiBuildsList))
output.write("%s\n" % topicLine)
output.write("\nPatch Subject: %s" % patchInfo.subject)
output.write('\n')
output.write("Description:\n\n" + '\n'.join([str(x) for x in patchInfo.description]))
output.write('\n')
output.write('\n')
output.write('Use default answers for KIDS load/install questions.\n')
output.write('\n')
if patchInfo.isMultiBuilds: # special logic for multibuilds
buildLink, otherLinks = getWebLinkForPatchSourceMultiBuilds(patchInfo,
reposHash)
output.write('Multi-Build: %s\n' % buildLink)
for link in otherLinks:
if link:
output.write('Patch-Files: %s\n' % link)
else:
packageLink = getWebLinkForPatchSourceByFile(patchInfo.kidsFilePath,
reposHash)
output.write('Patch-Files: %s\n' % packageLink)
def getWebLinkForPatchSourceMultiBuilds(patchInfo, reposHash):
# find the package path from the patchInfo
buildLink = getWebLinkForPatchSourceByFile(patchInfo.kidsFilePath,
reposHash, fileType=True)
otherLink = []
for item in patchInfo.otherKidsInfoList:
if item[0]:
otherLink.append(getWebLinkForPatchSourceByFile(item[0], reposHash))
else:
otherLink.append(None)
return buildLink, otherLink
def getWebLinkForPatchSourceByFile(filePath, reposHash, fileType=False):
packageDir = os.path.dirname(filePath)
typeName = "tree"
if fileType:
typeName = "blob"
packageDir = filePath
packageDir = packageDir[packageDir.find('Packages'):]
packageDir = packageDir.replace('\\','/').replace(' ','+')
webLink = Template(PATCH_SRC_WEB_LINK)
packageLink = webLink.substitute(type=typeName,
patch_dir=packageDir,
hb="master")
return packageLink
def testSinglePatchCommitMsg():
patchInfo = PatchInfo()
patchInfo.installName = "LR*5.2*334"
patchInfo.kidsFilePath = "C:/users/jason.li/git/VistA/Packages/"\
"Lab Service/Patches/LR_5.2_334/LR_52_334.KIDs.json"
commitMsgFile = getDefaultCommitMsgFileByPatchInfo(patchInfo)
print commitMsgFile
generateCommitMsgFileByPatchInfo(patchInfo, commitMsgFile,
"origin/master", SCRIPTS_DIR)
def testMultiBuildPatchCommitMsg():
patchInfo = PatchInfo()
patchInfo.installName = "HDI*1.0*7"
patchInfo.kidsFilePath = "C:/users/jason.li/git/VistA/Packages/"\
"MultiBuilds/LAB_LEDI_IV.KIDs.json"
patchInfo.kidsInfoPath = \
"C:/users/jason.li/git/VistA/Packages/Health Data and Informatics/"\
"Patches/HDI_1.0_7/HDI-1_SEQ-8_PAT-7.TXT"
patchInfo.kidsInfoSha1 = None
patchInfo.isMultiBuilds = True
patchInfo.multiBuildsList = ["HDI*1.0*7", "LR*5.2*350", "LA*5.2*74"]
patchInfo.otherKidsInfoList = [
["C:/users/jason.li/git/VistA/Packages/Lab Service/"\
"Patches/LR_5.2_350/LR-5P2_SEQ-332_PAT-350.TXT" , None],
["C:/users/jason.li/git/VistA/Packages/Automated Lab Instruments/"\
"Patches/LA_5.2_74/LA-5P2_SEQ-57_PAT-74.TXT", None],
]
commitMsgFile = getDefaultCommitMsgFileByPatchInfo(patchInfo)
generateCommitMsgFileByPatchInfo(patchInfo, commitMsgFile,
"origin/master", SCRIPTS_DIR)
def getDefaultCommitMsgFileByPatchInfo(patchInfo, dir=None):
outputFile = installNameToDirName(patchInfo.installName) + ".msg"
if dir is None:
return getTempLogFile(outputFile)
else:
return os.path.join(dir, outputFile)
def testMain():
testSinglePatchCommitMsg()
testMultiBuildPatchCommitMsg()
def main():
pass
if __name__ == '__main__':
main()
|
python
|
import tkinter
from time import strftime
top = tkinter.Tk()
top.title('Clock')
top.resizable(0, 0)
def time():
string = strftime('%H:%M:%S %p')
clockTime.config(text=string)
clockTime.after(1000, time)
clockTime = tkinter.Label(top, font=(
'courier new', 40,), background='black', foreground='white')
clockTime.pack(anchor='center')
time()
top.mainloop()
|
python
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os, common
import subprocess
from proton import *
from common import Skipped
class SslTest(common.Test):
def __init__(self, *args):
common.Test.__init__(self, *args)
def setup(self):
try:
self.server_domain = SSLDomain(SSLDomain.MODE_SERVER)
self.client_domain = SSLDomain(SSLDomain.MODE_CLIENT)
except SSLUnavailable, e:
raise Skipped(e)
def teardown(self):
self.server_domain = None
self.client_domain = None
class SslTestConnection(object):
""" Represents a single SSL connection.
"""
def __init__(self, domain=None, session_details=None):
try:
self.ssl = None
self.domain = domain
self.transport = Transport()
self.connection = Connection()
self.transport.bind(self.connection)
if domain:
self.ssl = SSL( self.transport, self.domain, session_details )
except SSLUnavailable, e:
raise Skipped(e)
def _pump(self, ssl_client, ssl_server, buffer_size=1024):
""" Allow two SslTestConnections to transfer data until done.
"""
out_client_leftover_by_server = ""
out_server_leftover_by_client = ""
i = 0
while True:
out_client = out_client_leftover_by_server + (ssl_client.transport.output(buffer_size) or "")
out_server = out_server_leftover_by_client + (ssl_server.transport.output(buffer_size) or "")
if out_client:
number_server_consumed = ssl_server.transport.input(out_client)
if number_server_consumed is None:
# special None return value means input is closed so discard the leftovers
out_client_leftover_by_server = ""
else:
out_client_leftover_by_server = out_client[number_server_consumed:]
if out_server:
number_client_consumed = ssl_client.transport.input(out_server)
if number_client_consumed is None:
# special None return value means input is closed so discard the leftovers
out_server_leftover_by_client = ""
else:
out_server_leftover_by_client = out_server[number_client_consumed:]
if not out_client and not out_server: break
i = i + 1
def _testpath(self, file):
""" Set the full path to the certificate,keyfile, etc. for the test.
"""
return os.path.join(os.path.dirname(__file__),
"ssl_db/%s" % file)
def _do_handshake(self, client, server):
""" Attempt to connect client to server. Will throw a TransportException if the SSL
handshake fails.
"""
client.connection.open()
server.connection.open()
self._pump(client, server)
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump(client, server)
def test_defaults(self):
""" By default, both the server and the client support anonymous
ciphers - they should connect without need for a certificate.
"""
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
# check that no SSL connection exists
assert not server.ssl.cipher_name()
assert not client.ssl.protocol_name()
#client.transport.trace(Transport.TRACE_DRV)
#server.transport.trace(Transport.TRACE_DRV)
client.connection.open()
server.connection.open()
self._pump( client, server )
# now SSL should be active
assert server.ssl.cipher_name() is not None
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_ssl_with_small_buffer(self):
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
small_buffer_size = 1
self._pump( client, server, small_buffer_size )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_server_certificate(self):
""" Test that anonymous clients can still connect to a server that has
a certificate configured.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_server_authentication(self):
""" Simple SSL connection with authentication of the server
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_client_authentication(self):
""" Force the client to authenticate.
"""
# note: when requesting client auth, the server _must_ send its
# certificate, so make sure we configure one!
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
server = SslTest.SslTestConnection( self.server_domain )
# give the client a certificate, but let's not require server authentication
self.client_domain.set_credentials(self._testpath("client-certificate.pem"),
self._testpath("client-private-key.pem"),
"client-password")
self.client_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_client_authentication_fail_bad_cert(self):
""" Ensure that the server can detect a bad client certificate.
"""
# note: when requesting client auth, the server _must_ send its
# certificate, so make sure we configure one!
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
server = SslTest.SslTestConnection( self.server_domain )
self.client_domain.set_credentials(self._testpath("bad-server-certificate.pem"),
self._testpath("bad-server-private-key.pem"),
"server-password")
self.client_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
try:
self._pump( client, server )
assert False, "Server failed to reject bad certificate."
except TransportException, e:
pass
def test_client_authentication_fail_no_cert(self):
""" Ensure that the server will fail a client that does not provide a
certificate.
"""
# note: when requesting client auth, the server _must_ send its
# certificate, so make sure we configure one!
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
server = SslTest.SslTestConnection( self.server_domain )
self.client_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
try:
self._pump( client, server )
assert False, "Server failed to reject bad certificate."
except TransportException, e:
pass
def test_client_server_authentication(self):
""" Require both client and server to mutually identify themselves.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
self.client_domain.set_credentials(self._testpath("client-certificate.pem"),
self._testpath("client-private-key.pem"),
"client-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_server_only_authentication(self):
""" Client verifies server, but server does not verify client.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
self.client_domain.set_credentials(self._testpath("client-certificate.pem"),
self._testpath("client-private-key.pem"),
"client-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_bad_server_certificate(self):
""" A server with a self-signed certificate that is not trusted by the
client. The client should reject the server.
"""
self.server_domain.set_credentials(self._testpath("bad-server-certificate.pem"),
self._testpath("bad-server-private-key.pem"),
"server-password")
self.server_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
try:
self._pump( client, server )
assert False, "Client failed to reject bad certificate."
except TransportException, e:
pass
del server
del client
# now re-try with a client that does not require peer verification
self.client_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
client = SslTest.SslTestConnection( self.client_domain )
server = SslTest.SslTestConnection( self.server_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_allow_unsecured_client(self):
""" Server allows an unsecured client to connect if configured.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
# allow unsecured clients on this connection
self.server_domain.allow_unsecured_client()
server = SslTest.SslTestConnection( self.server_domain )
# non-ssl connection
client = SslTest.SslTestConnection()
client.connection.open()
server.connection.open()
self._pump( client, server )
assert server.ssl.protocol_name() is None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_disallow_unsecured_client(self):
""" Non-SSL Client is disallowed from connecting to server.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
server = SslTest.SslTestConnection( self.server_domain )
# non-ssl connection
client = SslTest.SslTestConnection()
client.connection.open()
server.connection.open()
try:
self._pump( client, server )
assert False, "Server did not reject client as expected."
except TransportException:
pass
def test_session_resume(self):
""" Test resume of client session.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
# details will be used in initial and subsequent connections to allow session to be resumed
initial_session_details = SSLSessionDetails("my-session-id")
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain, initial_session_details )
# bring up the connection and store its state
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
# cleanly shutdown the connection
client.connection.close()
server.connection.close()
self._pump( client, server )
# destroy the existing clients
del client
del server
# now create a new set of connections, use last session id
server = SslTest.SslTestConnection( self.server_domain )
# provide the details of the last session, allowing it to be resumed
client = SslTest.SslTestConnection( self.client_domain, initial_session_details )
#client.transport.trace(Transport.TRACE_DRV)
#server.transport.trace(Transport.TRACE_DRV)
client.connection.open()
server.connection.open()
self._pump( client, server )
assert server.ssl.protocol_name() is not None
if(LANGUAGE=="C"):
assert client.ssl.resume_status() == SSL.RESUME_REUSED
else:
# Java gives no way to check whether a previous session has been resumed
pass
client.connection.close()
server.connection.close()
self._pump( client, server )
# now try to resume using an unknown session-id, expect resume to fail
# and a new session is negotiated
del client
del server
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain, SSLSessionDetails("some-other-session-id") )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert server.ssl.protocol_name() is not None
if(LANGUAGE=="C"):
assert client.ssl.resume_status() == SSL.RESUME_NEW
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_multiple_sessions(self):
""" Test multiple simultaineous active SSL sessions with bi-directional
certificate verification, shared across two domains.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
self.client_domain.set_credentials(self._testpath("client-certificate.pem"),
self._testpath("client-private-key.pem"),
"client-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
max_count = 100
sessions = [(SslTest.SslTestConnection( self.server_domain ),
SslTest.SslTestConnection( self.client_domain )) for x in
range(max_count)]
for s in sessions:
s[0].connection.open()
self._pump( s[0], s[1] )
for s in sessions:
s[1].connection.open()
self._pump( s[1], s[0] )
assert s[0].ssl.cipher_name() is not None
assert s[1].ssl.cipher_name() == s[0].ssl.cipher_name()
for s in sessions:
s[1].connection.close()
self._pump( s[0], s[1] )
for s in sessions:
s[0].connection.close()
self._pump( s[1], s[0] )
def test_server_hostname_authentication(self):
""" Test authentication of the names held in the server's certificate
against various configured hostnames.
"""
# Check the CommonName matches (case insensitive).
# Assumes certificate contains "CN=A1.Good.Server.domain.com"
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "a1.good.server.domain.com"
assert client.ssl.peer_hostname == "a1.good.server.domain.com"
self._do_handshake( client, server )
del server
del client
self.teardown()
# Should fail on CN name mismatch:
self.setup()
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "A1.Good.Server.domain.comX"
try:
self._do_handshake( client, server )
assert False, "Expected connection to fail due to hostname mismatch"
except TransportException:
pass
del server
del client
self.teardown()
# Wildcarded Certificate
# Assumes:
# 1) certificate contains Server Alternate Names:
# "alternate.name.one.com" and "another.name.com"
# 2) certificate has wildcarded CommonName "*.prefix*.domain.com"
#
# Pass: match an alternate
self.setup()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "alternate.Name.one.com"
self._do_handshake( client, server )
del client
del server
self.teardown()
# Pass: match an alternate
self.setup()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "ANOTHER.NAME.COM"
self._do_handshake(client, server)
del client
del server
self.teardown()
# Pass: match the pattern
self.setup()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "SOME.PREfix.domain.COM"
self._do_handshake( client, server )
del client
del server
self.teardown()
# Pass: match the pattern
self.setup()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "FOO.PREfixZZZ.domain.com"
self._do_handshake( client, server )
del client
del server
self.teardown()
# Fail: must match prefix on wildcard
self.setup()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "FOO.PREfi.domain.com"
try:
self._do_handshake( client, server )
assert False, "Expected connection to fail due to hostname mismatch"
except TransportException:
pass
del server
del client
self.teardown()
# Fail: leading wildcards are not optional
self.setup()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "PREfix.domain.COM"
try:
self._do_handshake( client, server )
assert False, "Expected connection to fail due to hostname mismatch"
except TransportException:
pass
self.teardown()
|
python
|
# SPDX-FileCopyrightText: 2021 Gabriel Lisaca <[email protected]>
#
# SPDX-License-Identifier: Apache-2.0
import logging
import pytest
@pytest.fixture
def placeholder_elvis_name():
return "placeholder"
@pytest.fixture
def placeholder_domain():
return "example.com"
@pytest.fixture
def placeholder_url(placeholder_domain):
return f"https://{placeholder_domain}"
@pytest.fixture
def caplog_cli_error(caplog):
caplog.set_level(logging.CRITICAL)
return caplog
|
python
|
from exopy.tasks.api import (InstrumentTask)
from atom.api import Float, Unicode, Str, set_default
from qm.qua import *
class ResumeProgramTask(InstrumentTask):
""" Resumes a paused program.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def perform(self):
self.driver.resume()
|
python
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the utopianTree function below.
def utopianTree(n):
value = 1
for i in range(n+1):
if i%2 == 0 and i > 0:
value += 1
if i%2 != 0 and i > 0:
value *= 2
return value
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
result = utopianTree(n)
fptr.write(str(result) + '\n')
fptr.close()
|
python
|
import os, sys, imaplib, rfc822, re, StringIO
import RPi.GPIO as GPIO
import time
server ='mail.xxx.us'
username='[email protected]'
password='xxx'
GPIO.setmode(GPIO.BOARD)
GREEN_LED = 22
RED_LED = 7
GPIO.setup(GREEN_LED, GPIO.OUT)
GPIO.setup(RED_LED, GPIO.OUT)
M = imaplib.IMAP4_SSL(server)
M.login(username, password)
M.select()
try:
while 1:
print "checking email"
typ, data = M.search(None, '(UNSEEN SUBJECT "PIFI MESSAGE")')
for num in data[0].split():
typ, data = M.fetch(num, '(RFC822)')
#print 'Message %s\n%s\n' % (num, data[0][1])
redon = re.search( "RED ON",
data[0][1],
re.MULTILINE|re.DOTALL )
greenon = re.search( "GREEN ON",
data[0][1],
re.MULTILINE|re.DOTALL )
redoff = re.search( "RED OFF",
data[0][1],
re.MULTILINE|re.DOTALL )
greenoff = re.search( "GREEN OFF",
data[0][1],
re.MULTILINE|re.DOTALL )
if redon:
GPIO.output(RED_LED, True)
print "red on"
if greenon:
GPIO.output(GREEN_LED, True)
print "green on"
if redoff:
GPIO.output(RED_LED, False)
print "red off"
if greenoff:
GPIO.output(GREEN_LED, False)
print "green off"
time.sleep(120)
except KeyboardInterrupt:
GPIO.cleanup()
pass
M.close()
M.logout()
|
python
|
from engineauth import models
from engineauth.middleware import AuthMiddleware
import test_base
import webapp2
from webob import Request
__author__ = '[email protected] (Kyle Finley)'
app = AuthMiddleware(webapp2.WSGIApplication())
class TestAppEngineOpenIDStrategy(test_base.BaseTestCase):
def setUp(self):
super(TestAppEngineOpenIDStrategy, self).setUp()
def test_handle_request(self):
# No User or Profile
p_count0 = models.UserProfile.query().count()
u_count0 = models.User.query().count()
self.assertEqual(p_count0, 0)
self.assertEqual(u_count0, 0)
# Create New User
provider = 'gmail.com'
req = Request.blank('/auth/appengine_openid?provider=' + provider)
resp = req.get_response(app)
self.assertEqual(resp.location, 'https://www.google.com/accounts/'
'Login?continue=http%3A//localhost/'
'auth/appengine_openid/callback')
# # Retrieve user from datastore
# user = models.User.get_by_auth_id(auth_id)
# self.assertIn(auth_id, user.auth_ids)
# self.assertTrue(user._has_email(email))
# # Retrieve profile from datastore
# profile = models.UserProfile.get_by_id(auth_id)
# self.assertTrue(profile is not None)
# p_count1 = models.UserProfile.query().count()
# u_count1 = models.User.query().count()
# self.assertEqual(p_count1, 1)
# self.assertEqual(u_count1, 1)
# # Login User
# req = Request.blank('/auth/appengine_openid?provider=' + provider)
# resp = req.get_response(app)
# # Make sure a new User is not created.
# p_count2 = models.UserProfile.query().count()
# u_count2 = models.User.query().count()
# self.assertEqual(p_count2, 1)
# self.assertEqual(u_count2, 1)
|
python
|
# -*- coding: utf-8 -*-
import os
import datetime
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.multiprocessing as mp
from parameters import get_args
import pcode.create_dataset as create_dataset
import pcode.create_optimizer as create_optimizer
import pcode.create_metrics as create_metrics
import pcode.create_model as create_model
import pcode.create_scheduler as create_scheduler
import pcode.utils.topology as topology
import pcode.utils.checkpoint as checkpoint
import pcode.utils.op_paths as op_paths
import pcode.utils.stat_tracker as stat_tracker
import pcode.utils.logging as logging
from pcode.utils.timer import Timer
def init_distributed_world(conf, backend):
if backend == "mpi":
dist.init_process_group("mpi")
elif backend == "nccl" or backend == "gloo":
# init the process group.
_tmp_path = os.path.join(conf.checkpoint, "tmp", conf.timestamp)
op_paths.build_dirs(_tmp_path)
dist_init_file = os.path.join(_tmp_path, "dist_init")
torch.distributed.init_process_group(
backend=backend,
init_method="file://" + os.path.abspath(dist_init_file),
timeout=datetime.timedelta(seconds=120),
world_size=conf.n_mpi_process,
rank=conf.local_rank,
)
else:
raise NotImplementedError
def main(conf):
try:
init_distributed_world(conf, backend=conf.backend)
conf.distributed = True and conf.n_mpi_process > 1
except AttributeError as e:
print(f"failed to init the distributed world: {e}.")
conf.distributed = False
# init the config.
init_config(conf)
# define the timer for different operations.
# if we choose the `train_fast` mode, then we will not track the time.
conf.timer = Timer(
verbosity_level=1 if conf.track_time and not conf.train_fast else 0,
log_fn=conf.logger.log_metric,
on_cuda=conf.on_cuda,
)
# create dataset.
data_loader = create_dataset.define_dataset(conf, force_shuffle=True)
# create model
model = create_model.define_model(conf, data_loader=data_loader)
# define the optimizer.
optimizer = create_optimizer.define_optimizer(conf, model)
# define the lr scheduler.
scheduler = create_scheduler.Scheduler(conf)
# add model with data-parallel wrapper.
if conf.graph.on_cuda:
if conf.n_sub_process > 1:
model = torch.nn.DataParallel(model, device_ids=conf.graph.device)
# (optional) reload checkpoint
try:
checkpoint.maybe_resume_from_checkpoint(conf, model, optimizer, scheduler)
except RuntimeError as e:
conf.logger.log(f"Resume Error: {e}")
conf.resumed = False
# train amd evaluate model.
if "rnn_lm" in conf.arch:
from pcode.distributed_running_nlp import train_and_validate
# safety check.
assert (
conf.n_sub_process == 1
), "our current data-parallel wrapper does not support RNN."
# define the criterion and metrics.
criterion = nn.CrossEntropyLoss(reduction="mean")
criterion = criterion.cuda() if conf.graph.on_cuda else criterion
metrics = create_metrics.Metrics(
model.module if "DataParallel" == model.__class__.__name__ else model,
task="language_modeling",
)
# define the best_perf tracker, either empty or from the checkpoint.
best_tracker = stat_tracker.BestPerf(
best_perf=None if "best_perf" not in conf else conf.best_perf,
larger_is_better=False,
)
scheduler.set_best_tracker(best_tracker)
# get train_and_validate_func
train_and_validate_fn = train_and_validate
else:
from pcode.distributed_running_cv import train_and_validate
# define the criterion and metrics.
criterion = nn.CrossEntropyLoss(reduction="mean")
criterion = criterion.cuda() if conf.graph.on_cuda else criterion
metrics = create_metrics.Metrics(
model.module if "DataParallel" == model.__class__.__name__ else model,
task="classification",
)
# define the best_perf tracker, either empty or from the checkpoint.
best_tracker = stat_tracker.BestPerf(
best_perf=None if "best_perf" not in conf else conf.best_perf,
larger_is_better=True,
)
scheduler.set_best_tracker(best_tracker)
# get train_and_validate_func
train_and_validate_fn = train_and_validate
# save arguments to disk.
checkpoint.save_arguments(conf)
# start training.
train_and_validate_fn(
conf,
model=model,
criterion=criterion,
scheduler=scheduler,
optimizer=optimizer,
metrics=metrics,
data_loader=data_loader,
)
def init_config(conf):
# define the graph for the computation.
cur_rank = dist.get_rank() if conf.distributed else 0
conf.graph = topology.define_graph_topology(
graph_topology=conf.graph_topology,
world=conf.world,
n_mpi_process=conf.n_mpi_process, # the # of total main processes.
# the # of subprocess for each main process.
n_sub_process=conf.n_sub_process,
comm_device=conf.comm_device,
on_cuda=conf.on_cuda,
rank=cur_rank,
)
conf.is_centralized = conf.graph_topology == "complete"
# re-configure batch_size if sub_process > 1.
if conf.n_sub_process > 1:
conf.batch_size = conf.batch_size * conf.n_sub_process
# configure cuda related.
if conf.graph.on_cuda:
assert torch.cuda.is_available()
torch.manual_seed(conf.manual_seed)
torch.cuda.manual_seed(conf.manual_seed)
torch.cuda.set_device(conf.graph.device[0])
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True if conf.train_fast else False
# define checkpoint for logging.
checkpoint.init_checkpoint(conf)
# configure logger.
conf.logger = logging.Logger(conf.checkpoint_dir)
# display the arguments' info.
logging.display_args(conf)
if __name__ == "__main__":
conf = get_args()
if conf.optimizer == "parallel_choco":
mp.set_start_method("forkserver", force=True)
# mp.set_start_method("spawn", force=True)
mp.set_sharing_strategy("file_system")
main(conf)
|
python
|
import vigra
from init_exp import meta
from volumina_viewer import volumina_n_layer
def view_train():
ds = meta.get_dataset('snemi3d_train')
pmap = vigra.readHDF5('/home/constantin/Downloads/traininf-cst-inv.h5', 'data')
volumina_n_layer([ds.inp(0), ds.inp(1), pmap, ds.seg(0),ds.gt()])
def view_test(res1, res2):
ds = meta.get_dataset('snemi3d_test')
#volumina_n_layer([ds.inp(0), ds.inp(1), pm_new, pm_new1], ['raw','pm_old', 'pm_new1', 'pm_new2'])
#else:
volumina_n_layer([ds.inp(0), ds.inp(1), ds.seg(0), res1, res2], ['raw','pmap','ws','curr_res','best_res'])
def view_test_pmaps(new_pmaps):
ds = meta.get_dataset('snemi3d_test')
raw = ds.inp(0)
pm_old = ds.inp(1)
pm_2d = vigra.readHDF5('/home/constantin/Work/neurodata_hdd/snemi3d_data/probabilities/pmaps_icv2_test.h5', 'data')
data = [raw, pm_old, pm_2d]
data.extend(new_pmaps)
labels = ['raw', '3d_v2', '2d', '3d_v3_i1', '3d_v3_i2', '3d_v3_i3', 'ensemble']
volumina_n_layer(data, labels)
if __name__ == '__main__':
meta.load()
res1 = vigra.readHDF5('/home/constantin/Work/multicut_pipeline/software/multicut_exp/rebuttal/snemi/snemi_ultimate_seglmc_myel_myelmerged.h5', 'data')
#res2 = vigra.readHDF5('/home/constantin/Work/multicut_pipeline/software/multicut_exp/rebuttal/snemi/snemi_final_segmc_myel.h5', 'data')
res3 = vigra.readHDF5('/home/constantin/Work/multicut_pipeline/software/multicut_exp/rebuttal/snemi/round3/snemi_final_seglmc_myel_myelmerged.h5', 'data')
view_test(res1, res3)
|
python
|
#Test the frame by frame image output for image classification using a previous classifier
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateBatch, ImageFileCreateEntry, Region
from msrest.authentication import ApiKeyCredentials
import os, time, uuid
import pandas as pd
import glob
# Replace with valid values
ENDPOINT = " " #Use the Cognitive Services endpoint
training_key = " " #Take from the second resource which is used at the project inception stage
prediction_key = " " #Take this from the resource created for the Prediction Cog Service
prediction_resource_id = " "
# use the entire string generated by the Prediction Endpoint
credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)
prediction_credentials = ApiKeyCredentials(in_headers={"Prediction-key": prediction_key})
predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)
os.chdir(' ') #Put the local folder where the code module resides
#Additional settings
publish_iteration_name = " " #Use the name of the model, not the iteration name
project_name = " " #Use the project name
projectid = "" #Use the project id. Run the code in lines 37 through 40 to get the project id
base_image_location = base_image_location = os.path.join (os.path.dirname(__file__), "Images")
#You can use any sub folder in the main folder but change the name of the folder where the images reside that need image classification
#Get the project credentials
project = trainer.get_project(projectid)
#Getting the tag
tags = trainer.get_tags(projectid)
#for i in tags:
# print(i)
#print(project.id)
#print(tags)
#Running the classification
#Testing the prediction end point
# Now there is a trained endpoint that can be used to make a prediction
prediction_credentials = ApiKeyCredentials(in_headers={"Prediction-key": prediction_key})
predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)
output_folder = "" #Put the folder where the csv files will be placed.
directory = ' ' #Use the folder where the images are
for filename in os.listdir(directory):
if filename.endswith("jpg"):
with open(os.path.join(directory, filename), "rb") as image_contents:
results = predictor.classify_image(
project.id, publish_iteration_name, image_contents.read())
# Display the results.
with open('%s.csv' %filename, 'wt') as csv:
os.chdir('') #Use the folder where the csv files need to be written
csv.write("ImageName,TagName,Probability\n")
for prediction in results.predictions:
#for tag
csv.write((filename + "," + prediction.tag_name +
", {0:.2f}%".format(prediction.probability * 100)+"\n")
)
# Once the individual files are generated, using glob to combine them into one corpus
extension = 'csv'
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames ])
os.chdir(' ') #Use the folder where the final combined file needs to reside.
combined_csv.to_csv('ImageClassificationFinal.csv', index=False) #Saving our combined csv data as a new file!
#Remove all csv files created individually
directory = " " #Folder where the csv files are there
files_in_directory = os.listdir(directory)
filtered_files = [file for file in files_in_directory if file.endswith(".csv")]
for file in filtered_files:
path_to_file = os.path.join(directory, file)
os.remove(path_to_file)
|
python
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _WeightedLoss, _Loss
def one_hot(class_labels, num_classes=None):
if num_classes==None:
return torch.zeros(len(class_labels), class_labels.max()+1).scatter_(1, class_labels.unsqueeze(1), 1.)
else:
return torch.zeros(len(class_labels), num_classes).scatter_(1, class_labels.unsqueeze(1), 1.)
class CrossEntropyLoss(nn.CrossEntropyLoss):
pass
class MSELoss(nn.MSELoss):
pass
class KLDivLoss(_Loss):
def __init__(self):
super(KLDivLoss, self).__init__()
def forward(self,pert,dp):
return F.kl_div(pert.softmax(dim=-1).log(), dp.softmax(dim=-1).repeat(len(pert),1), reduction='batchmean')
class CoSLoss(_WeightedLoss):
def __init__(self):
super(CoSLoss, self).__init__()
self.name='CoS'
def forward(self, logit_i_p, logit_p, target=None):
if target is not None: # label_dependent (deprecated)
target_logits = (target * logit_i_p).sum(1)
loss = - 0.05*target_logits - torch.cosine_similarity(logit_p,logit_i_p)
else: # label_free
loss = 1-torch.cosine_similarity(logit_p, logit_i_p)
return torch.mean(loss)
|
python
|
from wordfilter import censored_words
from lxml import etree
import datetime
import javmovie
BASEURL="https://www.javlibrary.com/en/vl_searchbyid.php?keyword="
DIRECTURL="https://www.javlibrary.com/en/?v="
xpath_title = "/html/body/div[3]/div[2]/div[1]/h3/a"
xpath_javcode = "/html/body/div[3]/div[2]/table/tr/td[2]/div/div[1]/table/tr/td[2]"
xpath_tags = "/html/body/div[3]/div[2]/table/tr/td[2]/div/div[8]/table/tr/td[2]"
xpath_tags_no_rating = "/html/body/div[3]/div[2]/table/tr/td[2]/div/div[7]/table/tr/td[2]"
xpath_actress = "/html/body/div[3]/div[2]/table/tr/td[2]/div/div[9]/table/tr/td[2]"
xpath_studiolabel = "/html/body/div[3]/div[2]/table/tr/td[2]/div/div[6]/table/tr/td[2]/span/a"
xpath_releasedate = "/html/body/div[3]/div[2]/table/tr/td[2]/div/div[2]/table/tr/td[2]"
xpath_image = "/html/body/div[3]/div[2]/table/tr/td[1]/div/img"
xpath_notfound = "/html/body/div[3]/div[2]/p/em"
xpath_multiple_found = "/html/body/div[3]/div[2]/div[1]"
xpath_multiple_list = "/html/body/div[3]/div[2]/div[2]/div"
releasedate_format = "%Y-%m-%d"
def get_by_jav_id(jav_id, BASEURL=BASEURL):
try:
html = HTTP.Request(BASEURL + jav_id).content
except Exception as e:
return None
tree = etree.HTML(html)
args = {}
if len(tree.xpath(xpath_notfound)) > 0 and "Search returned no result." in tree.xpath(xpath_notfound)[0].text:
return None
if BASEURL != DIRECTURL and len(tree.xpath(xpath_multiple_found)) > 0 and tree.xpath(xpath_multiple_found)[0].text is not None:
if "ID Search Result" in tree.xpath(xpath_multiple_found)[0].text:
if len(tree.xpath(xpath_multiple_list)[0]) > 0:
results = []
for videolink in tree.xpath(xpath_multiple_list)[0]:
vid = get_by_jav_id(videolink[0].attrib["href"].replace("./?v=", ""), DIRECTURL)
results.append(vid)
return results
args["jav_code"] = tree.xpath(xpath_javcode)[0].text
title = str(tree.xpath(xpath_title)[0].text).replace("[" + args["jav_code"] + "]", "").replace(args["jav_code"], "").lower()
for word, replacement in censored_words.items():
title = title.replace(word.lower(), replacement)
args["title"] = title.title().strip()
tags = []
try:
for a in tree.xpath(xpath_tags)[0]:
tags.append(a[0].text.title())
except AttributeError:
for a in tree.xpath(xpath_tags_no_rating)[0]:
tags.append(a[0].text.title())
args["tags"] = tags
if len(tree.xpath(xpath_studiolabel)) > 0:
args["studio_label"] = tree.xpath(xpath_studiolabel)[0].text
date = tree.xpath(xpath_releasedate)[0].text
if date is None:
args["release_date"] = None
else:
args["release_date"] = datetime.datetime.strptime(date, releasedate_format)
args["image_url"] = ("https:" + tree.xpath(xpath_image)[0].attrib["src"]) if tree.xpath(xpath_image)[0].attrib["src"].startswith("//") else tree.xpath(xpath_image)[0].attrib["src"]
return javmovie.JAVMovie(args)
|
python
|
#!/usr/bin/env python
# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import unittest
from monasca_analytics.sink import iptables_sqlite as ipt_snk
class TestIptablesSQLiteSink(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self._valid_config = {"module": "IptablesSQLiteSink"}
self.snk = ipt_snk.IptablesSQLiteSink("fake_id", self._valid_config)
def test_rdds_table_create_query(self):
query = self.snk._rdds_table_create_query()
self.assertEqual("""CREATE TABLE IF NOT EXISTS rdds
(msg TEXT, anomalous TEXT, msg_id TEXT, ctime TEXT)""", query)
def test_rdd_insert_query_valid_rdd(self):
rdd_entry = {
"msg": "test message",
"id": 1,
"anomalous": True,
"ctime": "t1"
}
query = self.snk._rdd_insert_query(rdd_entry)
self.assertEqual(
'INSERT INTO rdds VALUES("test message", "True", "1", "t1")',
query)
def test_rdd_insert_query_invalid_rdd(self):
rdd_entry = {
"msg": "test message",
"anomalous": True,
"ctime": "t1"
}
self.assertRaises(KeyError, self.snk._rdd_insert_query, rdd_entry)
def tearDown(self):
unittest.TestCase.tearDown(self)
os.remove("sqlite_sink.db")
if __name__ == "__main__":
unittest.main()
|
python
|
import telebot
import time
import threading
#Variables Globales
enviados = 0
recibidos = 0
#Decoradores
def controlador_mensajes(cant_enviar):
"""
controlador_mensajes:
Cuenta cuantos mensajes recibe y envia, si recibe o envia mas de 20 entonces duerme por un segundo
sacado de la documentacion de telegram:
My bot is hitting limits, how do I avoid this?
When sending messages inside a particular chat, avoid sending more than one message per second.
We may allow short bursts that go over this limit, but eventually you'll begin receiving 429 errors.
If you're sending bulk notifications to multiple users, the API will not allow more than 30 messages
per second or so. Consider spreading out notifications over large intervals of 8—12 hours for best results.
Also note that your bot will not be able to send more than 20 messages per minute to the same group.
"""
def Decorador(funcion):
def wrapper(*args, **kwargs):
global recibidos,enviados
recibidos +=1
enviados += cant_enviar
if enviados >= 20 or recibidos >= 20:
time.sleep(1)
enviados = 0
recibidos = 0
funcion(*args,**kwargs)
return wrapper
return Decorador
class Bot(telebot.Telebot):
def __init__(self,token, threaded=True, skip_pending=False, num_threads=2):
super().__init__(token, threaded=True, skip_pending=False, num_threads=2)
#messages_handler
"""
diccionario de todos los metodos de mensajes que reciba el bot
"""
messages_handler={
'start':dict(
function=lambda msg, obj= self: obj.start(msg),
filters = dict(
commands=["start"]
)
),
}
#callback_query_answers
"""
diccionario de todos los metodso de callback query answers que reciba el bot
"""
callback_query_handler={
'start':dict(
function=lambda msg, obj= self: obj.start(msg),
filters = dict(
commands=["start"]
)
),
}
"""
para agregar cada comando se debe usar estos metodos
"""
for comando in messages_handler.values():
self.add_message_handler(comando)
for comando in messages_handler.values():
self.add_callback_query_handler(comando)
def bot_polling(token):
while True:
bot = None
try:
bot = Bot(token,threaded=False)
bot.polling(none_stop=True,interval=0,timeout=0)
except Exception as ex: #Error in polling
bot.stop_polling()
else: #Clean exit
bot.stop_polling()
break #End loop
polling_thread = threading.Thread(target=bot_polling)
polling_thread.daemon = True
polling_thread.start()
if __name__ == "__main__":
while True:
try:
time.sleep(120)
except KeyboardInterrupt:
break
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pytest
import numpy as np
from qtpy import QtWidgets, QtCore
from planetaryimage import PDS3Image
from ginga.qtw.ImageViewCanvasQt import ImageViewCanvas
from pdsview import pdsview
from pdsview.channels_dialog import ChannelsDialog
from pdsview.histogram import HistogramWidget, HistogramModel
FILE_1 = os.path.join(
'tests', 'mission_data', '2m132591087cfd1800p2977m2f1.img')
FILE_2 = os.path.join(
'tests', 'mission_data', '2p129641989eth0361p2600r8m1.img')
FILE_3 = os.path.join(
'tests', 'mission_data', '1p190678905erp64kcp2600l8c1.img')
FILE_4 = os.path.join(
'tests', 'mission_data', 'h58n3118.img')
FILE_5 = os.path.join(
'tests', 'mission_data', '1p134482118erp0902p2600r8m1.img')
FILE_6 = os.path.join(
'tests', 'mission_data', '0047MH0000110010100214C00_DRCL.IMG')
FILE_1_NAME = '2m132591087cfd1800p2977m2f1.img'
FILE_2_NAME = '2p129641989eth0361p2600r8m1.img'
FILE_3_NAME = '1p190678905erp64kcp2600l8c1.img'
FILE_4_NAME = 'h58n3118.img'
FILE_5_NAME = '1p134482118erp0902p2600r8m1.img'
FILE_6_NAME = '0047MH0000110010100214C00_DRCL.IMG'
def test_image_stamp():
"""Test that ImageStamp sets correct attributes to pds compatible image"""
pds_image = PDS3Image.open(FILE_1)
test_image = pdsview.ImageStamp(FILE_1, FILE_1, pds_image, pds_image.data)
assert test_image.file_name == FILE_1_NAME
assert test_image.image_name == FILE_1
assert 'PDS' in test_image.label[0]
assert isinstance(test_image.label, list)
assert not test_image.cuts
assert not test_image.sarr
assert not test_image.zoom
assert not test_image.rotation
assert not test_image.transforms
assert test_image.not_been_displayed
class TestImageSet(object):
filepaths = [FILE_1, FILE_2, FILE_3, FILE_4, FILE_5]
test_set = pdsview.ImageSet(filepaths)
def test_init(self):
assert self.test_set._views == set()
assert len(self.test_set.images) == len(self.filepaths)
filepaths = sorted(self.filepaths)
for image, filepath in zip(self.test_set.images, filepaths):
assert image[0].file_name == os.path.basename(filepath)
assert self.test_set._current_image_index == 0
assert self.test_set._channel == 0
# assert self.test_set._last_channel is None
assert self.test_set._x_value == 0
assert self.test_set._y_value == 0
assert self.test_set._pixel_value == (0, )
assert self.test_set.use_default_text
assert self.test_set.rgb == []
assert self.test_set.current_image is not None
def test_next_prev_enabled(self):
assert self.test_set.next_prev_enabled
test_set2 = pdsview.ImageSet([])
assert not test_set2.next_prev_enabled
@pytest.mark.parametrize(
"index, expected, channel",
[
(1, 1, 1),
(5, 0, 4),
(11, 1, -1),
(-1, 4, 7),
(-13, 2, 42),
(0, 0, 0)
])
def test_current_image_index(self, index, expected, channel):
self.test_set.channel = channel
self.test_set.current_image_index = index
assert self.test_set.current_image_index == expected
assert self.test_set.current_image == self.test_set.images[expected]
assert self.test_set.channel == 0
def test_channel(self):
assert self.test_set._channel == self.test_set.channel
assert len(self.test_set.current_image) == 1
self.test_set.channel = 42
# When the current image only has one band, don't change the channel
assert self.test_set.channel == 0
assert self.test_set._channel == self.test_set.channel
# TODO: When an rgb image is in the default test_mission_data, test
# actually chaning the channel
def test_x_value(self):
assert self.test_set.x_value == self.test_set._x_value
self.test_set.x_value = 42.123456789
assert isinstance(self.test_set.x_value, int)
assert self.test_set.x_value == 42
assert self.test_set.x_value == self.test_set._x_value
self.test_set.x_value = 0
assert self.test_set.x_value == 0
assert self.test_set.x_value == self.test_set._x_value
def test_y_value(self):
assert self.test_set.y_value == self.test_set._y_value
self.test_set.y_value = 42.123456789
assert isinstance(self.test_set.y_value, int)
assert self.test_set.y_value == 42
assert self.test_set.y_value == self.test_set._y_value
self.test_set.y_value = 0
assert self.test_set.y_value == 0
assert self.test_set.y_value == self.test_set._y_value
def test_pixel_value(self):
def check_pixel_value(new_pixel, expected):
self.test_set.pixel_value = new_pixel
assert self.test_set.pixel_value == expected
assert isinstance(self.test_set.pixel_value, tuple)
for val in self.test_set.pixel_value:
assert isinstance(val, float)
assert self.test_set.pixel_value == (0.0,)
check_pixel_value(
(2.3456, 3.4567, 4.5678), (2.346, 3.457, 4.568))
check_pixel_value([2.3456, 3.4567, 4.5678], (2.346, 3.457, 4.568))
check_pixel_value(
np.array([2.3456, 3.4567, 4.5678]), (2.346, 3.457, 4.568))
check_pixel_value(
42.1234, (42.123,))
check_pixel_value(
int(42), (42.0,))
check_pixel_value(
0, (0,))
def test_pixel_value_text(self):
assert self.test_set.pixel_value_text == 'Value: 0.000'
# TODO: TEST WITH RGB IMAGE
def test_image_set_append_method(self):
"""Test append method with multiple images"""
filepaths = [FILE_1]
new_files = [FILE_2, FILE_3]
test_set = pdsview.ImageSet(filepaths)
assert test_set.current_image_index == 0
assert test_set.current_image[0].file_name == FILE_1_NAME
assert len(test_set.images) == 1
assert not(test_set.next_prev_enabled)
# Mimic how append method is used in pdsview
first_new_image = len(test_set.images)
test_set.append(new_files, first_new_image)
assert test_set.current_image_index == 1
assert test_set.current_image[0].file_name == FILE_2_NAME
assert FILE_3_NAME in str(test_set.images)
assert test_set.next_prev_enabled
def test_bands_are_composite(self):
self.test_set.rgb = [image[0] for image in self.test_set.images[:3]]
assert not self.test_set.bands_are_composite
# TODO: TEST WITH RGB IMAGE
# TODO: TEST create_rgb_image WHEN RGB IMAGE IN TEST DATA
def test_ROI_data(self):
"""Test the ROI_data to cut out the correct region of data"""
test_set = pdsview.ImageSet([FILE_3])
width = test_set.current_image[0].width
height = test_set.current_image[0].height
test_data_1 = test_set.ROI_data(0, 0, width, height)
assert test_data_1[0][0] == 23
assert test_data_1[512][16] == 25
assert test_data_1[1023][31] == 115
test_data_2 = test_set.ROI_data(9.5, 18.5, 11.5, 20.5)
assert test_data_2[0][0] == 22
assert test_data_2[0][1] == 23
assert test_data_2[1][0] == 24
assert test_data_2[1][1] == 24
def test_ROI_pixels(self):
"""Test ROI_pixels to return the correct number of pixels for a ROI"""
test_set = pdsview.ImageSet([FILE_3])
test_pixels = test_set.ROI_pixels(9.5, 18.5, 11.5, 20.5)
assert test_pixels == 4
def test_ROI_std_dev(self):
"""Test ROI_std_dev to return the correct standard deviation for ROI"""
test_set = pdsview.ImageSet([FILE_3])
test_std_dev = test_set.ROI_std_dev(9.5, 18.5, 11.5, 20.5)
assert test_std_dev == 0.829156
def test_ROI_mean(self):
"""Test ROI_mean to return the correct mean value of pixels for ROI"""
test_set = pdsview.ImageSet([FILE_3])
test_mean = test_set.ROI_mean(9.5, 18.5, 11.5, 20.5)
assert test_mean == 23.25
def test_ROI_median(self):
"""Test ROI_median to return the correct median value for a ROI"""
test_set = pdsview.ImageSet([FILE_3])
test_median = test_set.ROI_median(9.5, 18.5, 11.5, 20.5)
assert test_median == 23.5
def test_ROI_min(self):
"""Test ROI_min to return the correct minimum pixel value for a ROI"""
test_set = pdsview.ImageSet([FILE_3])
test_min = test_set.ROI_min(9.5, 18.5, 11.5, 20.5)
assert test_min == 22
def test_ROI_max(self):
"""Test ROI_mx to return the correct maximum pixel value for a ROI"""
test_set = pdsview.ImageSet([FILE_3])
test_max = test_set.ROI_max(9.5, 18.5, 11.5, 20.5)
assert test_max == 24
# TODO test channels when there is a 3 band test image
class TestPDSController(object):
filepaths = [FILE_1, FILE_2, FILE_3, FILE_4, FILE_5]
test_set = pdsview.ImageSet(filepaths)
controller = pdsview.PDSController(test_set, None)
def test_init(self):
assert self.controller.model == self.test_set
assert self.controller.view is None
def test_next_image(self):
assert self.test_set.current_image_index == 0
self.controller.next_image()
assert self.test_set.current_image_index == 1
self.test_set.current_image_index = len(self.test_set.images) - 1
self.controller.next_image()
assert self.test_set.current_image_index == 0
def test_previous_image(self):
assert self.test_set.current_image_index == 0
self.controller.previous_image()
last = len(self.test_set.images) - 1
assert self.test_set.current_image_index == last
self.test_set.current_image_index = 1
self.controller.previous_image()
assert self.test_set.current_image_index == 0
def test_next_channel(self):
assert self.test_set.channel == 0
self.controller.next_channel()
assert self.test_set.channel == 0
# TODO: TEST MORE WHEN THERE IS AN RGB IMAGE
def test_previous_channel(self):
assert self.test_set.channel == 0
self.controller.previous_channel()
assert self.test_set.channel == 0
# TODO: TEST MORE WHEN THERE IS AN RGB IMAGE
def test_new_x_value(self):
self.controller.new_x_value(42.123456789)
assert isinstance(self.test_set.x_value, int)
assert self.test_set.x_value == 42
assert self.test_set.x_value == self.test_set._x_value
self.controller.new_x_value(0)
assert self.test_set.x_value == 0
assert self.test_set.x_value == self.test_set._x_value
def test_new_y_value(self):
assert self.test_set.y_value == self.test_set._y_value
self.controller.new_y_value(42.123456789)
assert isinstance(self.test_set.y_value, int)
assert self.test_set.y_value == 42
assert self.test_set.y_value == self.test_set._y_value
self.controller.new_y_value(0)
assert self.test_set.y_value == 0
assert self.test_set.y_value == self.test_set._y_value
def test_new_pixel_value(self):
def check_pixel_value(new_pixel, expected):
self.controller.new_pixel_value(new_pixel)
assert self.test_set.pixel_value == expected
assert isinstance(self.test_set.pixel_value, tuple)
for val in self.test_set.pixel_value:
assert isinstance(val, float)
assert self.test_set.pixel_value == (0.0,)
check_pixel_value(
(2.3456, 3.4567, 4.5678), (2.346, 3.457, 4.568))
check_pixel_value([2.3456, 3.4567, 4.5678], (2.346, 3.457, 4.568))
check_pixel_value(
np.array([2.3456, 3.4567, 4.5678]), (2.346, 3.457, 4.568))
check_pixel_value(
42.1234, (42.123,))
check_pixel_value(
int(42), (42.0,))
check_pixel_value(
0, (0,))
images = test_set.images
@pytest.mark.parametrize(
'image_index, expected',
[
(0, [images[0][0], images[1][0], images[2][0]]),
(1, [images[1][0], images[2][0], images[3][0]]),
(len(images) - 1, [images[-1][0], images[0][0], images[1][0]])
])
def test_populate_rgb(self, image_index, expected):
test_rgb = self.controller._populate_rgb(image_index)
assert test_rgb == expected
def test_update_rgb(self):
expected = [self.images[0][0], self.images[1][0], self.images[2][0]]
self.test_set.rgb = [1, 2, 3]
self.controller.update_rgb()
assert self.test_set.rgb != [1, 2, 3]
assert self.test_set.rgb == expected
class TestPDSViewer(object):
filepaths = [FILE_1, FILE_2, FILE_3, FILE_4, FILE_5]
test_set = pdsview.ImageSet(filepaths)
viewer = pdsview.PDSViewer(test_set)
viewer.show()
def test_init(self):
assert self.viewer.image_set == self.test_set
assert self.viewer in self.test_set._views
assert self.viewer._label_window is None
assert self.viewer._label_window_pos is None
assert self.viewer.channels_window is None
assert not self.viewer.channels_window_is_open
assert self.viewer.channels_window_pos is None
assert isinstance(
self.viewer.view_canvas, ImageViewCanvas)
assert isinstance(
self.viewer.next_image_btn, QtWidgets.QPushButton)
assert isinstance(
self.viewer.previous_image_btn, QtWidgets.QPushButton)
assert isinstance(
self.viewer.open_label, QtWidgets.QPushButton)
assert isinstance(
self.viewer.next_channel_btn, QtWidgets.QPushButton)
assert isinstance(
self.viewer.previous_channel_btn, QtWidgets.QPushButton)
assert isinstance(
self.viewer.restore_defaults, QtWidgets.QPushButton)
assert isinstance(
self.viewer.channels_button, QtWidgets.QPushButton)
assert isinstance(
self.viewer.x_value_lbl, QtWidgets.QLabel)
assert isinstance(
self.viewer.y_value_lbl, QtWidgets.QLabel)
assert isinstance(
self.viewer.pixel_value_lbl, QtWidgets.QLabel)
assert isinstance(
self.viewer.pixels, QtWidgets.QLabel)
assert isinstance(
self.viewer.std_dev, QtWidgets.QLabel)
assert isinstance(
self.viewer.mean, QtWidgets.QLabel)
assert isinstance(
self.viewer.median, QtWidgets.QLabel)
assert isinstance(
self.viewer.min, QtWidgets.QLabel)
assert isinstance(
self.viewer.max, QtWidgets.QLabel)
assert isinstance(
self.viewer.histogram, HistogramModel)
assert isinstance(
self.viewer.histogram_widget, HistogramWidget)
assert isinstance(
self.viewer.rgb_check_box, QtWidgets.QCheckBox)
assert self.viewer.windowTitle() == FILE_5_NAME
assert self.viewer.pixels.text() == '#Pixels: 32768'
assert self.viewer.std_dev.text() == 'Std Dev: 16.100793'
assert self.viewer.mean.text() == 'Mean: 24.6321'
assert self.viewer.median.text() == 'Median: 22.0'
assert self.viewer.min.text() == 'Min: 17'
assert self.viewer.max.text() == 'Max: 114'
assert self.viewer.x_value_lbl.text() == 'X: ????'
assert self.viewer.y_value_lbl.text() == 'Y: ????'
assert self.viewer.pixel_value_lbl.text() == 'Value: ????'
assert not self.viewer.rgb_check_box.isChecked()
def test_current_image(self):
expected = self.test_set.current_image[self.test_set.channel]
assert self.viewer.current_image == expected
def test_refresh_ROI_text(self):
self.viewer.min.setText("Min: 0")
self.viewer.max.setText("Max: 100")
self.viewer._refresh_ROI_text()
assert self.viewer.min.text() == 'Min: 17'
assert self.viewer.max.text() == 'Max: 114'
def test_reset_ROI(self):
self.viewer.min.setText("Min: 0")
self.viewer.max.setText("Max: 100")
self.viewer._reset_ROI()
assert self.viewer.min.text() == 'Min: 17'
assert self.viewer.max.text() == 'Max: 114'
# TODO: When have RGB Image Test _disable_next_previous
def test_reset_display_values(self):
self.viewer.x_value_lbl.setText("X: 42")
self.viewer.y_value_lbl.setText("Y: 42")
self.viewer.pixel_value_lbl.setText("Value: 42")
self.viewer._reset_display_values()
assert self.viewer.x_value_lbl.text() == 'X: ????'
assert self.viewer.y_value_lbl.text() == 'Y: ????'
assert self.viewer.pixel_value_lbl.text() == 'Value: ????'
def test_window_cascade(self, qtbot):
"""Tests the window cascade."""
# Initial checks
assert self.viewer._label_window is None
assert self.viewer.open_label.isEnabled()
# Open the label window and run appropriate checks
qtbot.mouseClick(self.viewer.open_label, QtCore.Qt.LeftButton)
qtbot.add_widget(self.viewer._label_window)
assert self.viewer._label_window is not None
assert self.viewer._label_window._finder_window is None
assert self.viewer._label_window.is_open
# Open the finder window and run appropriate checks
qtbot.mouseClick(
self.viewer._label_window.find_button, QtCore.Qt.LeftButton)
assert self.viewer._label_window._finder_window is not None
qtbot.add_widget(self.viewer._label_window._finder_window)
assert not(self.viewer._label_window._finder_window.query_edit)
# Hide windows and check to make sure they are hidden
qtbot.mouseClick(
self.viewer._label_window._finder_window.ok_button,
QtCore.Qt.LeftButton)
assert self.viewer._label_window._finder_window.isHidden()
qtbot.mouseClick(
self.viewer._label_window.cancel_button, QtCore.Qt.LeftButton)
assert self.viewer._label_window.isHidden()
# Test the ability for the parent (label) to hide the child (finder)
qtbot.mouseClick(
self.viewer.open_label, QtCore.Qt.LeftButton)
qtbot.mouseClick(
self.viewer._label_window.find_button, QtCore.Qt.LeftButton)
assert not(self.viewer._label_window.isHidden())
assert not(self.viewer._label_window._finder_window.isHidden())
qtbot.mouseClick(
self.viewer._label_window.cancel_button, QtCore.Qt.LeftButton)
assert self.viewer._label_window.isHidden()
assert self.viewer._label_window._finder_window.isHidden()
def test_label_refresh(self, qtbot):
"""Tests the label display and refresh features."""
qtbot.mouseClick(self.viewer.open_label, QtCore.Qt.LeftButton)
qtbot.add_widget(self.viewer._label_window)
label_contents = self.viewer._label_window.label_contents
assert label_contents.toPlainText()[233:236] == "341"
qtbot.mouseClick(self.viewer.next_image_btn, QtCore.Qt.LeftButton)
label_contents = self.viewer._label_window.label_contents
assert label_contents.toPlainText()[228:231] == "338"
qtbot.mouseClick(self.viewer.previous_image_btn, QtCore.Qt.LeftButton)
label_contents = self.viewer._label_window.label_contents
assert label_contents.toPlainText()[233:236] == "341"
def test_channels_dialog(self, qtbot):
assert self.viewer.channels_window is None
assert not self.viewer.channels_window_is_open
assert self.viewer.channels_window_pos is None
qtbot.add_widget(self.viewer)
qtbot.mouseClick(self.viewer.channels_button, QtCore.Qt.LeftButton)
assert self.viewer.channels_window is not None
assert self.viewer.channels_window_is_open
assert isinstance(self.viewer.channels_window, ChannelsDialog)
assert self.viewer.channels_window_pos is None
qtbot.add_widget(self.viewer.channels_window)
new_pos = QtCore.QPoint(42, 24)
self.viewer.channels_window.move(new_pos)
qtbot.mouseClick(
self.viewer.channels_window.close_button, QtCore.Qt.LeftButton)
assert self.viewer.channels_window_pos is not None
assert self.viewer.channels_window_pos == new_pos
qtbot.mouseClick(self.viewer.channels_button, QtCore.Qt.LeftButton)
self.viewer.channels_window.pos() == new_pos
def test_apply_parameters(self, qtbot):
"""Test that images maintain their parameters"""
self.viewer.save_parameters()
image1 = self.viewer.current_image
assert image1.sarr[0] == 0
assert image1.sarr[255] == 255
# assert image1.zoom == 1.0
assert image1.rotation == 0.0
assert image1.transforms == (False, False, False)
assert image1.cuts == (17, 25)
# Change parameters
image1.sarr[0] = 42
image1.sarr[255] = 13
self.viewer.view_canvas.get_rgbmap().set_sarr(image1.sarr)
# self.viewer.view_canvas.zoom_to(3)
self.viewer.view_canvas.rotate(45)
self.viewer.view_canvas.transform(False, True, False)
self.viewer.view_canvas.cut_levels(24, 95)
qtbot.mouseClick(self.viewer.next_image_btn, QtCore.Qt.LeftButton)
# Test the second image parameters are None by defualt
image2 = self.viewer.current_image
# Test the view was reset to defualt paramters for the image
assert self.viewer.view_canvas.get_rgbmap().get_sarr()[0] == 0
assert self.viewer.view_canvas.get_rgbmap().get_sarr()[255] == 255
# assert self.viewer.view_canvas.get_zoom() == 1.0
assert self.viewer.view_canvas.get_rotation() == 0.0
assert self.viewer.view_canvas.get_transforms() == (
False, False, False
)
assert self.viewer.view_canvas.get_cut_levels() == (22, 26)
# Test changing back to the first image maintains image1's parameters
qtbot.mouseClick(self.viewer.previous_image_btn, QtCore.Qt.LeftButton)
image1 = self.viewer.image_set.current_image[0]
assert image1.sarr[0] == 42
assert image1.sarr[255] == 13
# assert image1.zoom == 3.0
assert image1.rotation == 45.0
assert image1.transforms == (False, True, False)
assert image1.cuts == (24, 95)
# Test that image2 stored its parameters
image2 = self.viewer.image_set.images[1][0]
assert image2.sarr[0] == 0
assert image2.sarr[255] == 255
# assert image2.zoom == 4.746031746031746
assert image2.rotation == 0.0
assert image2.transforms == (False, False, False)
assert image2.cuts == (22, 26)
def test_restore(self, qtbot):
image1 = self.viewer.image_set.current_image[0]
image1.sarr[0] = 42
image1.sarr[255] = 13
self.viewer.view_canvas.get_rgbmap().set_sarr(image1.sarr)
# self.viewer.view_canvas.zoom_to(3)
self.viewer.view_canvas.rotate(45)
self.viewer.view_canvas.transform(False, True, False)
self.viewer.view_canvas.cut_levels(24, 95)
assert image1.sarr[0] == 42
assert image1.sarr[255] == 13
# assert image1.zoom == 3.0
assert image1.rotation == 45.0
assert image1.transforms == (False, True, False)
assert image1.cuts == (24, 95)
qtbot.mouseClick(self.viewer.restore_defaults, QtCore.Qt.LeftButton)
self.viewer.save_parameters()
assert image1.sarr[0] == 0
assert image1.sarr[255] == 255
# assert image1.zoom == 1.0
assert image1.rotation == 0.0
assert image1.transforms == (False, False, False)
assert image1.cuts == (17, 25)
def test_set_ROI_text(self, qtbot):
"""Test the ROI text to contain the correct values"""
# Test Whole image ROI
assert self.viewer.pixels.text() == '#Pixels: 32768'
assert self.viewer.std_dev.text() == 'Std Dev: 16.100793'
assert self.viewer.mean.text() == 'Mean: 24.6321'
assert self.viewer.median.text() == 'Median: 22.0'
assert self.viewer.min.text() == 'Min: 17'
assert self.viewer.max.text() == 'Max: 114'
# Test 2x2 random ROI
# .5 values because these are the edge of the ROI pixels
self.viewer.set_ROI_text(14.5, 512.5, 16.5, 514.5)
assert self.viewer.pixels.text() == '#Pixels: 4'
assert self.viewer.std_dev.text() == 'Std Dev: 1.000000'
assert self.viewer.mean.text() == 'Mean: 23.0000'
assert self.viewer.median.text() == 'Median: 23.0'
assert self.viewer.min.text() == 'Min: 22'
assert self.viewer.max.text() == 'Max: 24'
def test_top_right_pixel_snap(self):
test_snap_1 = self.viewer.top_right_pixel_snap(10, 5)
assert test_snap_1[0] == 5.5
assert test_snap_1[1]
test_snap_2 = self.viewer.top_right_pixel_snap(-5, 5)
assert not test_snap_2[1]
test_snap_3 = self.viewer.top_right_pixel_snap(5.4, 10)
assert test_snap_3[0] == 5.5
assert test_snap_3[1]
test_snap_4 = self.viewer.top_right_pixel_snap(5.5, 10)
assert test_snap_4[0] == 5.5
assert test_snap_4[1]
test_snap_5 = self.viewer.top_right_pixel_snap(5.6, 10)
assert test_snap_5[0] == 6.5
assert test_snap_5[1]
def test_bottom_left_pixel_snap(self):
test_snap_1 = self.viewer.bottom_left_pixel_snap(-5, 5)
assert test_snap_1[0] == -0.5
assert test_snap_1[1]
test_snap_2 = self.viewer.bottom_left_pixel_snap(10, 5)
assert not test_snap_2[1]
test_snap_3 = self.viewer.bottom_left_pixel_snap(5.4, 10)
assert test_snap_3[0] == 4.5
assert test_snap_3[1]
test_snap_4 = self.viewer.bottom_left_pixel_snap(5.5, 10)
assert test_snap_4[0] == 5.5
assert test_snap_4[1]
def test_left_right_bottom_top(self):
test_coords_1 = self.viewer.left_right_bottom_top(1, 2, 1, 2)
assert test_coords_1[0:4] == (1, 2, 1, 2)
assert test_coords_1[4]
assert test_coords_1[5]
test_coords_2 = self.viewer.left_right_bottom_top(2, 1, 1, 2)
assert test_coords_2[0:4] == (1, 2, 1, 2)
assert not test_coords_2[4]
assert test_coords_2[5]
test_coords_3 = self.viewer.left_right_bottom_top(1, 2, 2, 1)
assert test_coords_3[0:4] == (1, 2, 1, 2)
assert test_coords_3[4]
assert not test_coords_3[5]
test_coords_4 = self.viewer.left_right_bottom_top(2, 1, 2, 1)
assert test_coords_4[0:4] == (1, 2, 1, 2)
assert not test_coords_4[4]
assert not test_coords_4[5]
|
python
|
import copy
import datetime
import functools
import inspect
import os
import textwrap
import traceback
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Union
import click
import tqdm
from experitur.core.context import get_current_context
from experitur.core.parameters import (
Multi,
ParameterGenerator,
check_parameter_generators,
)
from experitur.core.trial import Trial
from experitur.errors import ExperiturError
from experitur.helpers import tqdm_redirect
from experitur.helpers.merge_dicts import merge_dicts
from experitur.util import callable_to_name, ensure_list
if TYPE_CHECKING: # pragma: no cover
from experitur.core.context import Context
def try_str(obj):
try:
return str(obj)
except: # pylint: disable=bare-except # noqa: E722
return "<error>"
class ExperimentError(ExperiturError):
pass
class StopExecution(ExperimentError):
pass
class CommandNotFoundError(ExperimentError):
pass
class TrialNotFoundError(ExperimentError):
pass
def format_trial_parameters(func=None, parameters=None, experiment=None):
if func is not None:
try:
func = func.__name__
except AttributeError:
func = str(func)
else:
func = "_"
if parameters is not None:
parameters = (
"("
+ (", ".join("{}={}".format(k, repr(v)) for k, v in parameters.items()))
+ ")"
)
else:
parameters = "()"
if experiment is not None:
func = "{}:{}".format(str(experiment), func)
return func + parameters
class Experiment:
"""
Define an experiment.
Args:
name (:py:class:`str`, optional): Name of the experiment (Default: None).
parameter_grid (:py:class:`dict`, optional): Parameter grid (Default: None).
parent (:py:class:`~experitur.Experiment`, optional): Parent experiment (Default: None).
meta (:py:class:`dict`, optional): Dict with experiment metadata that should be recorded.
active (:py:class:`bool`, optional): Is the experiment active? (Default: True).
When False, the experiment will not be executed.
volatile (:py:class:`bool`, optional): If True, the results of a successful run will not be saved (Default: False).
minimize (:py:class:`str` or list of str, optional): Metric or list of metrics to minimize.
maximize (:py:class:`str` or list of str, optional): Metric or list of metrics to maximize.
This can be used as a constructor or a decorator:
.. code-block:: python
# When using as a decorator, the name of the experiment is automatically inferred.
@Experiment(...)
def exp1(trial):
...
# Here, the name must be supplied.
exp2 = Experiment("exp2", parent=exp1)
When the experiment is run, `trial` will be a :py:class:`~experitur.Trial` instance.
As such, it has the following characteristics:
- :obj:`dict`-like interface (`trial[<name>]`): Get the value of the parameter named `name`.
- Attribute interface (`trial.<attr>`): Get meta-data for this trial.
- :py:meth:`~experitur.Trial.call`: Run a function and automatically assign parameters.
See :py:class:`~experitur.Trial` for more details.
"""
def __init__(
self,
name: Optional[str] = None,
parameters=None,
parent: "Experiment" = None,
meta: Optional[Mapping] = None,
active: bool = True,
volatile: bool = False,
minimize: Union[str, List[str], None] = None,
maximize: Union[str, List[str], None] = None,
):
if not (isinstance(name, str) or name is None):
raise ValueError(f"'name' has to be a string or None, got {name!r}")
self.ctx = get_current_context()
self.name = name
self.parent = parent
self.meta = meta
self.active = active
self.volatile = volatile
self.minimize, self.maximize = self._validate_minimize_maximize(
minimize, maximize
)
self._own_parameter_generators: List[ParameterGenerator]
self._own_parameter_generators = check_parameter_generators(parameters)
self._pre_trial = None
self._commands: Dict[str, Any] = {}
self.func = None
# Merge parameters from all ancestors
parent = self.parent
while parent is not None:
self._merge(parent)
parent = parent.parent
self._base_parameter_generators: List[ParameterGenerator]
self._base_parameter_generators = (
[] if self.parent is None else self.parent._parameter_generators
)
self.ctx._register_experiment(self)
@staticmethod
def _validate_minimize_maximize(minimize, maximize):
minimize, maximize = ensure_list(minimize), ensure_list(maximize)
common = set(minimize) & set(maximize)
if common:
common = ", ".join(sorted(common))
raise ValueError(f"minimize and maximize share common metrics: {common}")
return minimize, maximize
def __call__(self, func: Callable) -> "Experiment":
"""
Register an entry-point.
Allows an Experiment object to be used as a decorator::
@Experiment()
def entry_point(trial):
...
"""
if not self.name:
self.name = func.__name__
self.func = func
return self
@property
def _parameter_generators(self) -> List[ParameterGenerator]:
return self._base_parameter_generators + self._own_parameter_generators
def add_parameter_generator(
self, parameter_generator: ParameterGenerator, prepend=False
):
if prepend:
self._own_parameter_generators.insert(0, parameter_generator)
else:
self._own_parameter_generators.append(parameter_generator)
@property
def parameter_generator(self) -> ParameterGenerator:
return Multi(self._parameter_generators)
@property
def independent_parameters(self) -> List[str]:
"""Independent parameters. (Parameters that were actually configured.)"""
return sorted(self.varying_parameters + self.invariant_parameters)
@property
def varying_parameters(self) -> List[str]:
"""Varying parameters of this experiment."""
return sorted(self.parameter_generator.varying_parameters.keys())
@property
def invariant_parameters(self) -> List[str]:
"""Varying parameters of this experiment."""
return sorted(self.parameter_generator.invariant_parameters.keys())
def __str__(self):
if self.name is not None:
return self.name
return repr(self)
def __repr__(self): # pragma: no cover
return "Experiment(name={})".format(self.name)
def run(self):
"""
Run this experiment.
Create trials for every combination in the parameter grid and run them.
"""
if not self.active:
print("Skip inactive experiment {}.".format(self.name))
return
if self.func is None:
raise ValueError("No function was registered for {}.".format(self))
if self.name is None:
raise ValueError("Experiment has no name {}.".format(self))
print("Experiment", self)
parameter_generator = self.parameter_generator
print("Independent parameters:")
for k, v in parameter_generator.varying_parameters.items():
print("{}: {}".format(k, v))
# Generate trial configurations
trial_configurations = parameter_generator.generate(self)
pbar = tqdm.tqdm(trial_configurations, unit="")
for trial_configuration in pbar:
# Inject experiment data into trial_configuration
trial_configuration = self._setup_trial_configuration(trial_configuration)
# Run the pre-trial hook to allow the user to interact
# with the parameters before the trial is created and run.
if self._pre_trial is not None:
self._pre_trial(self.ctx, trial_configuration)
if self.ctx.config["skip_existing"]:
# Check, if a trial with this parameter set already exists
existing = self.ctx.store.match(
func=self.func,
parameters=trial_configuration.get("parameters", {}),
)
if len(existing):
pbar.write(
"Skip existing configuration: {}".format(
format_trial_parameters(
func=self.func, parameters=trial_configuration
)
)
)
pbar.set_description("[Skipped]")
continue
trial_configuration = self.ctx.store.create(trial_configuration)
wdir = self.ctx.get_trial_wdir(trial_configuration["id"])
os.makedirs(wdir, exist_ok=True)
trial = Trial(merge_dicts(trial_configuration, wdir=wdir), self.ctx.store)
pbar.write("Trial {}".format(trial.id))
pbar.set_description("Running trial {}...".format(trial.id))
# Run the trial
try:
with tqdm_redirect.redirect_stdout():
result = self.run_trial(trial)
result = self._validate_trial_result(result)
except Exception: # pylint: disable=broad-except
msg = textwrap.indent(traceback.format_exc(-1), " ")
pbar.write("{} failed!".format(trial.id))
pbar.write(msg)
if not self.ctx.config["catch_exceptions"]:
raise
else:
if self.volatile:
trial.remove()
pbar.set_description("Running trial {}... Done.".format(trial.id))
def run_trial(self, trial: Trial):
"""Run the current trial and save the results."""
# Record intital state
trial.success = False
trial.time_start = datetime.datetime.now()
trial.result = None
trial.error = None
trial.save()
try:
result = self.func(trial)
except (Exception, KeyboardInterrupt) as exc:
# TODO: Store.log_error()
# Log complete exc to file
error_fn = os.path.join(trial.wdir, "error.txt")
with open(error_fn, "w") as f:
f.write(str(exc))
f.write(traceback.format_exc())
f.write("\n")
for k, v in inspect.trace()[-1][0].f_locals.items():
f.write(f"{k}: {try_str(v)}\n")
trial.error = ": ".join(filter(None, (exc.__class__.__name__, str(exc))))
print("\n", flush=True)
print(
f"Error running {trial.id}.\n"
f"See {error_fn} for the complete traceback.",
flush=True,
)
raise exc
else:
trial.result = result
trial.success = True
finally:
trial.time_end = datetime.datetime.now()
trial.save()
return trial.result
def _setup_trial_configuration(self, trial_configuration):
trial_configuration.setdefault("parameters", {})
return merge_dicts(
trial_configuration,
experiment={
"name": self.name,
"parent": self.parent.name if self.parent is not None else None,
"func": callable_to_name(self.func),
"meta": self.meta,
# Parameters that where actually configured.
"independent_parameters": self.independent_parameters,
"varying_parameters": self.varying_parameters,
"minimize": self.minimize,
"maximize": self.maximize,
},
)
def _validate_trial_result(self, trial_result: Optional[dict]):
if trial_result is None:
trial_result = {}
if not isinstance(trial_result, dict):
raise ExperimentError(
f"Experiments are expected to return a dict, got {trial_result!r}"
)
missing_metrics = (
set(self.maximize) | set(self.maximize)
) - trial_result.keys()
if missing_metrics:
missing_metrics = ", ".join(sorted(missing_metrics))
raise ExperimentError(f"Missing metrics in result: {missing_metrics}")
return trial_result
def _merge(self, other):
"""
Merge configuration of other into self.
This does not include parameter generators!
`other` is usually the parent experiment.
"""
# Copy attributes: func, meta, ...
for name in ("func", "meta"):
ours = getattr(self, name)
theirs = getattr(other, name)
if ours is None and theirs is not None:
# Shallow-copy regular attributes
setattr(self, name, copy.copy(theirs))
elif isinstance(ours, dict) and isinstance(theirs, dict):
# Merge dict attributes
setattr(self, name, {**theirs, **ours})
def pre_trial(self, func):
"""Update the pre-trial hook.
The pre-trial hook is called after the parameters for a trial are
calculated and before its ID is calculated and it is run.
This hook can be used to alter the parameters.
Use :code:`pre_trial(None)` to reset the hook.
This can be used as a decorator::
@experiment()
def exp(trial):
...
@exp.pre_trial
def pre_trial_handler(ctx, trial_parameters):
...
Args:
func: A function with the signature (ctx, trial_parameters).
"""
self._pre_trial = func
def command(self, name=None, *, target="trial"):
"""Attach a command to an experiment.
.. code-block:: python
@experiment()
def experiment1(trial):
...
@experiment1.command()
def frobnicate(trial):
...
"""
if target not in ("trial", "experiment"):
msg = "target has to be one of 'trial', 'experiment', not {}.".format(
target
)
raise ValueError(msg)
def _decorator(f):
_name = name or f.__name__
self._commands[_name] = (f, target)
return f
return _decorator
def do(self, cmd_name, target_name, cmd_args):
try:
cmd, target = self._commands[cmd_name]
except KeyError:
raise CommandNotFoundError(cmd_name)
if target == "trial":
try:
trial = self.ctx.store[target_name]
except KeyError as exc:
raise TrialNotFoundError(target_name) from exc
# Inject the Trial
cmd_wrapped = functools.partial(cmd, Trial(trial, self.ctx.store))
# Copy over __click_params__ if they exist
try:
cmd_wrapped.__click_params__ = cmd.__click_params__
except AttributeError:
pass
cmd = click.command(name=cmd_name)(cmd_wrapped)
cmd.main(args=cmd_args, standalone_mode=False)
elif target == "experiment":
# Inject self
cmd_wrapped = functools.partial(cmd, self)
# Copy over __click_params__ if they exist
try:
cmd_wrapped.__click_params__ = cmd.__click_params__
except AttributeError:
pass
cmd = click.command(name=cmd_name)(cmd_wrapped)
cmd.main(args=cmd_args, standalone_mode=False)
else:
msg = "target={} is not implemented.".format(target)
raise NotImplementedError(msg)
|
python
|
from importlib.util import find_spec
from os.path import isfile, join
import xdg.BaseDirectory
from json_database import JsonStorage
from xdg import BaseDirectory as XDG
from ovos_utils.json_helper import load_commented_json, merge_dict
from ovos_utils.log import LOG
from ovos_utils.system import search_mycroft_core_location
def get_ovos_config():
config = {"xdg": True,
"base_folder": "mycroft",
"config_filename": "mycroft.conf",
"default_config_path": find_default_config()}
try:
if isfile("/etc/OpenVoiceOS/ovos.conf"):
config = merge_dict(config,
load_commented_json(
"/etc/OpenVoiceOS/ovos.conf"))
elif isfile("/etc/mycroft/ovos.conf"):
config = merge_dict(config,
load_commented_json("/etc/mycroft/ovos.conf"))
except:
# tolerate bad json TODO proper exception (?)
pass
# This includes both the user config and
# /etc/xdg/OpenVoiceOS/ovos.conf
for p in xdg.BaseDirectory.load_config_paths("OpenVoiceOS"):
if isfile(join(p, "ovos.conf")):
try:
xdg_cfg = load_commented_json(join(p, "ovos.conf"))
config = merge_dict(config, xdg_cfg)
except:
# tolerate bad json TODO proper exception (?)
pass
# let's check for derivatives specific configs
# the assumption is that these cores are exclusive to each other,
# this will never find more than one override
# TODO this works if using dedicated .venvs what about system installs?
cores = config.get("module_overrides") or {}
for k in cores:
if find_spec(k):
config = merge_dict(config, cores[k])
break
else:
subcores = config.get("submodule_mappings") or {}
for k in subcores:
if find_spec(k):
config = merge_dict(config, cores[subcores[k]])
break
return config
def is_using_xdg():
return get_ovos_config().get("xdg", True)
def get_xdg_base():
return get_ovos_config().get("base_folder") or "mycroft"
def save_ovos_core_config(new_config):
OVOS_CONFIG = join(xdg.BaseDirectory.save_config_path("OpenVoiceOS"),
"ovos.conf")
cfg = JsonStorage(OVOS_CONFIG)
cfg.update(new_config)
cfg.store()
return cfg
def set_xdg_base(folder_name):
LOG.info(f"XDG base folder set to: '{folder_name}'")
save_ovos_core_config({"base_folder": folder_name})
def set_config_filename(file_name, core_folder=None):
if core_folder:
set_xdg_base(core_folder)
LOG.info(f"config filename set to: '{file_name}'")
save_ovos_core_config({"config_filename": file_name})
def set_default_config(file_path=None):
file_path = file_path or find_default_config()
LOG.info(f"default config file changed to: {file_path}")
save_ovos_core_config({"default_config_path": file_path})
def find_default_config():
mycroft_root = search_mycroft_core_location()
if not mycroft_root:
raise FileNotFoundError("Couldn't find mycroft core root folder.")
return join(mycroft_root, "mycroft", "configuration", "mycroft.conf")
def find_user_config():
if is_using_xdg():
path = join(XDG.xdg_config_home, get_xdg_base(), get_config_filename())
if isfile(path):
return path
old, path = get_config_locations(default=False, web_cache=False,
system=False, old_user=True,
user=True)
if isfile(path):
return path
if isfile(old):
return old
# mark1 runs as a different user
sysconfig = MycroftSystemConfig()
platform_str = sysconfig.get("enclosure", {}).get("platform", "")
if platform_str == "mycroft_mark_1":
path = "/home/mycroft/.mycroft/mycroft.conf"
return path
def get_config_locations(default=True, web_cache=True, system=True,
old_user=True, user=True):
locs = []
ovos_cfg = get_ovos_config()
if default:
locs.append(ovos_cfg["default_config_path"])
if system:
locs.append(f"/etc/{ovos_cfg['base_folder']}/{ovos_cfg['config_filename']}")
if web_cache:
locs.append(f"{XDG.xdg_config_home}/{ovos_cfg['base_folder']}/web_cache.json")
if old_user:
locs.append(f"~/.{ovos_cfg['base_folder']}/{ovos_cfg['config_filename']}")
if user:
if is_using_xdg():
locs.append(f"{XDG.xdg_config_home}/{ovos_cfg['base_folder']}/{ovos_cfg['config_filename']}")
else:
locs.append(f"~/.{ovos_cfg['base_folder']}/{ovos_cfg['config_filename']}")
return locs
def get_webcache_location():
return join(XDG.xdg_config_home, get_xdg_base(), 'web_cache.json')
def get_xdg_config_locations():
# This includes both the user config and
# /etc/xdg/mycroft/mycroft.conf
xdg_paths = list(reversed(
[join(p, get_config_filename())
for p in XDG.load_config_paths(get_xdg_base())]
))
return xdg_paths
def get_config_filename():
return get_ovos_config().get("config_filename") or "mycroft.conf"
def set_config_name(name, core_folder=None):
# TODO deprecate, was only out in a couple versions
# renamed to match HolmesV
set_config_filename(name, core_folder)
def read_mycroft_config():
conf = LocalConf("tmp/dummy.conf")
conf.merge(MycroftDefaultConfig())
conf.merge(MycroftSystemConfig())
conf.merge(MycroftUserConfig())
return conf
def update_mycroft_config(config, path=None):
if path is None:
conf = MycroftUserConfig()
else:
conf = LocalConf(path)
conf.merge(config)
conf.store()
return conf
class LocalConf(JsonStorage):
"""
Config dict from file.
"""
allow_overwrite = True
def __init__(self, path=None):
super(LocalConf, self).__init__(path)
class ReadOnlyConfig(LocalConf):
""" read only """
def __init__(self, path, allow_overwrite=False):
super().__init__(path)
self.allow_overwrite = allow_overwrite
def reload(self):
old = self.allow_overwrite
self.allow_overwrite = True
super().reload()
self.allow_overwrite = old
def __setitem__(self, key, value):
if not self.allow_overwrite:
raise PermissionError
super().__setitem__(key, value)
def __setattr__(self, key, value):
if not self.allow_overwrite:
raise PermissionError
super().__setattr__(key, value)
def merge(self, *args, **kwargs):
if not self.allow_overwrite:
raise PermissionError
super().merge(*args, **kwargs)
def store(self, path=None):
if not self.allow_overwrite:
raise PermissionError
super().store(path)
class MycroftUserConfig(LocalConf):
def __init__(self):
path = find_user_config()
super().__init__(path)
class MycroftDefaultConfig(ReadOnlyConfig):
def __init__(self):
path = get_ovos_config()["default_config_path"]
super().__init__(path)
if not self.path or not isfile(self.path):
LOG.debug(f"mycroft root path not found, could not load default .conf: {self.path}")
def set_root_config_path(self, root_config):
# in case we got it wrong / non standard
self.path = root_config
self.reload()
class MycroftSystemConfig(ReadOnlyConfig):
def __init__(self, allow_overwrite=False):
path = get_config_locations(default=False, web_cache=False,
system=True, old_user=False,
user=False)[0]
super().__init__(path, allow_overwrite)
class MycroftXDGConfig(LocalConf):
def __init__(self):
path = get_config_locations(default=False, web_cache=False,
system=False, old_user=False,
user=True)[0]
super().__init__(path)
|
python
|
'''
TOOL SHARE
steven small
stvnsmll
Full Project Structure:
~/toolshare
|-- application.py # main script (this file)
|__ /views # contains all blueprints for app.routes
|-- __init__.py # empty
|-- neighborhoods.py
|-- tools_and_actions.py
|-- users.py
|__ /sub_modules # contains all helper and supporting functions
|-- __init__.py # imports all from each sub module
|-- helpers.py
|-- config.py
|-- emails.py
|-- image_mgmt.py
|-- SQL.py
|__ /templates # contains all of the html jinja layout templates and files
|-- layout.html
|__ /accountmgmt
|__ /emailtemplates
|__ /FAQs # sub-folder with its own layout template and files for FAQs
|-- FAQ_home.html
|-- FAQ_layout.html
|__ /pages
|__ /general
|__ /neighborhood
|__ /tools
|__ /static
|__ /LandingMedia
|__ /manifest
|__ /toolimages
|-- FOO.js
|-- BAR.css
|-- other_images.png
...
|-- requirements.txt
|-- toolshare.db
|-- README.md
|-- LICENSE
|-- Procfile
application.py (main) Structure:
1- Library imports
2- Flask application setup
A- Initialize the Flask app
B- Configure the database
C- Setup AWS S3 for image storage
D- Configure email functionality
E- Webapp installation requirements
3 - Register Bluebprints (app routes)
A- Main features: tools & actions
B- Neighborhood management
C- User management
4- Misc other helper functions
'''
################################################################
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
# | [1] IMPORTS | #
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
################################################################
import os
#for baggage
import io
import base64
import sub_modules
import datetime
import requests
#for baggage
import boto3, botocore
from flask import Flask, send_from_directory, make_response
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
#for sending emails
from flask_mail import Mail
#import all of the helper functions from sub_modules (helpers.py, emails.py, image_mgmt.py, SQL.py)
from sub_modules import *
from sub_modules import config
#import blueprints for all of the app.routes
from views.neighborhoods import neighborhoods_bp
from views.tools_and_actions import tools_and_actions_bp
from views.users import users_bp
################################################################
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
# | [2] FLASK APPLICATION SETUP | #
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
################################################################
#----------------------------------------------------
# A- INITIALIZE FLASK APP
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
#----------------------------------------------------
# B- CONFIGURE DATABASE
# sqlite = 1 (development)
# postgreSQL = 2 (production on Heroku)
DATABASE__TYPE = 2
try:
db = SQL.SQL_db(os.getenv("DATABASE_URL"))
print("postgreSQL database: production mode")
except:
print("UNABLE TO CONNECT TO postgreSQL DATABASE")
db = SQL.SQL_db("sqlite:///toolshare.db")
app.config["SESSION_FILE_DIR"] = mkdtemp()# <-- not used for Heroku
print("sqlite3 database: development mode")
DATABASE__TYPE = 1
# assign the database object to a config variable to be accessed by other modules
app.config['database_object'] = db
Session(app)
#----------------------------------------------------
# C- SETUP STORAGE ON S3 FOR IMAGES
# setup s3 file storage
app.config['S3_BUCKET'] = config.S3_BUCKET
app.config['S3_REGION'] = config.S3_REGION
app.config['S3_KEY'] = os.environ.get('AWS_ACCESS_KEY_ID')
app.config['S3_SECRET'] = os.environ.get('AWS_SECRET_ACCESS_KEY')
app.config['S3_LOCATION'] = 'http://{}.s3.amazonaws.com/'.format(config.S3_BUCKET)
s3 = boto3.client(
"s3",
aws_access_key_id=app.config['S3_KEY'],
aws_secret_access_key=app.config['S3_SECRET'],
region_name=app.config['S3_REGION'],
config=botocore.client.Config(signature_version='s3v4')
)
# assign the s3 object to a config variable to be accessed by other modules
app.config["s3_object"] = s3
# Used for *local* image upload
# code credit: https://roytuts.com/upload-and-display-image-using-python-flask/
UPLOAD_FOLDER = 'static/toolimages/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
#----------------------------------------------------
# D- CONFIGURE EMAIL FUNCTIONALITY
app.config['MAIL_SERVER'] = config.MAIL_SERVER
app.config['MAIL_PORT'] = config.MAIL_PORT
app.config['MAIL_USERNAME'] = config.MAIL_USERNAME
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
# set to 1 to send emails when every action happens (approve or reject)
# set to 0 to only send the required account management emails
SEND_EMAIL_ACTIONS = 0
app.config["SEND_EMAIL_ACTIONS"] = SEND_EMAIL_ACTIONS
app.config["mail_object"] = mail
#----------------------------------------------------
# E- WEB APP INSTALLATION REQUIREMENTS
@app.route('/manifest.json')
def manifest():
return send_from_directory('static/manifest', 'manifest.json')
@app.route('/sw.js')
def service_worker():
response = make_response(send_from_directory('static', 'sw.js'))
response.headers['Cache-Control'] = 'no-cache'
return response
################################################################
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
# | [3] REGISTER BLUEPRINTS (routes) | #
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
################################################################
#----------------------------------------------------
# A- MAIN FEATURES: TOOLS & ACTIONS
app.register_blueprint(tools_and_actions_bp)
#----------------------------------------------------
# B- NEIGHBORHOOD MANAGEMENT
app.register_blueprint(neighborhoods_bp)
#----------------------------------------------------
# C- USER MANAGEMENT
app.register_blueprint(users_bp)
#tmp. for the lugger tracker
@app.route("/found_luggage", methods=["GET", "POST"])
def found_luggage():
'''Log user out'''
if request.method == "POST":
#confirm reCaptcha
if DATABASE__TYPE == 1:#no captcha needed
recaptcha_passed = True
else:
print("POST from the production bag website")
parameters = request.form
print(parameters)
recaptcha_passed = False
print("testing recaptcha")
recaptcha_response = parameters.get('g-recaptcha-response')
try:
recaptcha_secret = os.environ.get('RECAPTCHA_SECRET')
response = requests.post(f'https://www.google.com/recaptcha/api/siteverify?secret={recaptcha_secret}&response={recaptcha_response}').json()
recaptcha_passed = response.get('success')
except Exception as e:
print(f"failed to get reCaptcha: {e}")
return apology("reCaptcha fail...")
print(f"reCaptcha Status: {recaptcha_passed}")
if recaptcha_passed:
returnAction = request.form.get("returnAction")
longitude = request.form.get("longit")
latitude = request.form.get("latit")
bag_name = request.form.get("bag_name")
bagID = request.form.get("bagID")
ipaddress = request.form.get("ipaddress")
usermessage = request.form.get("usermessage")
print(f"The included message was: {usermessage}")
print(f"IP Address: {ipaddress}")
email_address = os.environ.get('BAG_EMAIL')
print(returnAction)
noEmail = request.form.get("noEmail")
location_shared = 0
if returnAction == "location":
maplink = "https://www.latlong.net/c/?lat=" + latitude + "&long=" + longitude
print(f"Bag location = Lo:{longitude}, La:{latitude}")
print(maplink)
location_shared = 1
#send email
print("send the location email!")
now = datetime.datetime.now()
message = f"Bag: {bag_name} scanned at {now}\n\nIP Addr: {ipaddress}\n\nLatLong={latitude}:{longitude}\n{maplink}\n\nMessage:\n{usermessage}"
if noEmail != "1":#don't send if in development mode...
sub_modules.emails.send_mail([email_address],"bag log - LOCATION!",message)
#pass
print("location mail sent")
if returnAction == "sendMessage":
print("send the message email!")
now = datetime.datetime.now()
message = f"Bag: {bag_name} scanned at {now}\n\nIP Addr: {ipaddress}\n\nMessage:\n{usermessage}"
sub_modules.emails.send_mail([email_address],"bag log - message!",message)
print(".mail sent.")
extra_url = ""
extra_url2 = ""
print(noEmail)
if noEmail == "1":
extra_url = "&noEmail=1"
if location_shared == 1:
extra_url2 = "&locshared=1"
print(extra_url2)
#if DATABASE__TYPE == 1:
return redirect(url_for('found_luggage') + f'?bagID={bagID}' + extra_url + extra_url2)
#else:
# fullURL = f"https://sharetools.tk/found_luggage?bagID={bagID}{extra_url}{extra_url2}"
# print(fullURL)
# return redirect(fullURL)
else:#reCaptcha failed...
return apology("reCaptcha fail...")
else:#GET
list_of_actual_bags = {
"10d8520f7f2246c4b246437d6e5985e7": "green_carryon",
"6851b0e7efd640b3853ea2eda21c9863": "sjs_black_checkunder",
"093bd25584754feab29938fcbd85193e": "hcs_grey_checkunder",
"0198f1b8385a4c61b116b80cb7f3eca1": "big_carryon_backpack",
"6ce2b15894c4414f88627f9cf673d273": "small_roller_carryon_black",
"8e7d5a80643843d6bc84c8eb73678d1c": "green_duffel_bag",
"25a98613f623400aa14336a47a5bae20": "sjs_volleyball_6_bag",
"80aaa20845dc403cbe17704e8c1e5776": "purple_big_checkunder"
}
bagID = request.args.get("bagID")
if bagID in list_of_actual_bags:
print("valid bag")
else:
return render_template("foundluggage.html")
bag_name = list_of_actual_bags[bagID]
s3 = app.config["s3_object"]
image_uuid_with_ext = bagID + ".jpeg"
expire_in=3600
imageURL = ""
#get the bag image
# just send the full asw filepath for now
#return "{}{}".format(app.config["S3_LOCATION"], image_uuid_with_ext) <--- delete this...
# returns the presigned url for the full-sized image
try:
imageURL = s3.generate_presigned_url('get_object',
Params={'Bucket': app.config["S3_BUCKET"],
'Key': image_uuid_with_ext},
ExpiresIn=expire_in)#seconds
except:# ClientError as e:
#logging.error(e)
e = "get_image_s3, misc error"
print("Something Happened - ImageFetchFail: ", e)
#personal details stored in environment variables
luggage_owner = os.environ.get('BAG_OWNER')
luggage_firstname = luggage_owner.split(" ")[0]
email_address = os.environ.get('BAG_EMAIL')
phone_number = os.environ.get('BAG_PHONE')
address = os.environ.get('BAG_ADDRESS')
if request.headers.getlist("X-Forwarded-For"):
print(request.headers.getlist("X-Forwarded-For"))
visiting_IP = request.headers.getlist("X-Forwarded-For")[0]
else:
visiting_IP = request.remote_addr
#send the email!
noEmail = request.args.get("noEmail")
if noEmail == "1":
print("Don't send the email")
else:
noEmail = "0"
print("send the email!")
now = datetime.datetime.now()
message = f"Bag: {bag_name} scanned at {now}\n\nIP Addr: {visiting_IP}"
sub_modules.emails.send_mail([email_address],"bag log - scan",message)
print(".mail sent.")
#user selected to share their location
locshared = request.args.get("locshared")
if locshared == "1":
#thank the user
locshared = True
pass
else:
locshared = False
return render_template("foundluggage.html", owner=luggage_owner,
firstname=luggage_firstname,
email=email_address,
phone=phone_number,
address=address,
bagID=bagID,
bag_name=bag_name,
ipaddress = visiting_IP,
imageURL = imageURL,
noEmail = noEmail,
locshared = locshared)
#tmp. for the lugger tracker
@app.route("/make_QR", methods=["GET", "POST"])
def make_QR_Code():
if request.method == "POST":
return apology("No POST allowed", 403)
else:#GET
bagID = request.args.get("bagID")
noEmail = request.args.get("noEmail")
extra_url = ""
if noEmail == "1":
extra_url = "&noEmail=1"
img = qrcode.make(f"https://www.sharetools.tk/found_luggage?bagID={bagID}{extra_url}")
data = io.BytesIO()
img.save(data, "PNG")
encoded_qr_image = base64.b64encode(data.getvalue())
#pass to template:
qrcode_data=encoded_qr_image.decode('utf-8')
return render_template("simpleqrcode_page.html", qrcode_data = qrcode_data)
################################################################
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
# | [4] misc other helper functions... | #
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
################################################################
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
|
python
|
from collections import OrderedDict
from .attributes import read_attribute_dict
from .core import read_word, read_line
# non-word characters that we allow in tag names, ids and classes
DOM_OBJECT_EXTRA_CHARS = ('-',)
def read_tag(stream):
"""
Reads an element tag, e.g. span, ng-repeat, cs:dropdown
"""
part1 = read_word(stream, DOM_OBJECT_EXTRA_CHARS)
if stream.ptr < stream.length and stream.text[stream.ptr] == ':':
stream.ptr += 1
part2 = read_word(stream, DOM_OBJECT_EXTRA_CHARS)
else:
part2 = None
return (part1 + ':' + part2) if part2 else part1
def read_element(stream, compiler):
"""
Reads an element, e.g. %span, #banner{style:"width: 100px"}, .ng-hide(foo=1)
"""
assert stream.text[stream.ptr] in ('%', '.', '#')
tag = None
empty_class = False
if stream.text[stream.ptr] == '%':
stream.ptr += 1
tag = read_tag(stream)
elif stream.text[stream.ptr] == '.':
# Element may start with a period representing an unidentified div rather than a CSS class. In this case it
# can't have other classes or ids, e.g. .{foo:"bar"}
next_ch = stream.text[stream.ptr + 1] if stream.ptr < stream.length - 1 else None
if not (next_ch.isalnum() or next_ch == '_' or next_ch in DOM_OBJECT_EXTRA_CHARS):
stream.ptr += 1
empty_class = True
_id = None
classes = []
if not empty_class:
while stream.ptr < stream.length and stream.text[stream.ptr] in ('#', '.'):
is_id = stream.text[stream.ptr] == '#'
stream.ptr += 1
id_or_class = read_word(stream, DOM_OBJECT_EXTRA_CHARS)
if is_id:
_id = id_or_class
else:
classes.append(id_or_class)
attributes = OrderedDict()
while stream.ptr < stream.length and stream.text[stream.ptr] in ('{', '('):
attributes.update(read_attribute_dict(stream, compiler))
if stream.ptr < stream.length and stream.text[stream.ptr] == '>':
stream.ptr += 1
nuke_outer_ws = True
else:
nuke_outer_ws = False
if stream.ptr < stream.length and stream.text[stream.ptr] == '<':
stream.ptr += 1
nuke_inner_ws = True
else:
nuke_inner_ws = False
if stream.ptr < stream.length and stream.text[stream.ptr] == '/':
stream.ptr += 1
self_close = True
else:
self_close = tag in Element.SELF_CLOSING
if stream.ptr < stream.length and stream.text[stream.ptr] == '=':
stream.ptr += 1
django_variable = True
else:
django_variable = False
if stream.ptr < stream.length:
inline = read_line(stream)
if inline is not None:
inline = inline.strip()
else:
inline = None
return Element(tag, _id, classes, attributes, nuke_outer_ws, nuke_inner_ws, self_close, django_variable, inline)
class Element(object):
"""
An HTML element with an id, classes, attributes etc
"""
SELF_CLOSING = (
'meta', 'img', 'link', 'br', 'hr', 'input', 'source', 'track', 'area', 'base', 'col', 'command', 'embed',
'keygen', 'param', 'wbr'
)
DEFAULT_TAG = 'div'
def __init__(self, tag, _id, classes, attributes, nuke_outer_whitespace, nuke_inner_whitespace, self_close,
django_variable, inline_content):
self.tag = tag or self.DEFAULT_TAG
self.attributes = attributes
self.nuke_inner_whitespace = nuke_inner_whitespace
self.nuke_outer_whitespace = nuke_outer_whitespace
self.self_close = self_close
self.django_variable = django_variable
self.inline_content = inline_content
# merge ids from the attribute dictionary
ids = [_id] if _id else []
id_from_attrs = attributes.get('id')
if isinstance(id_from_attrs, (tuple, list)):
ids += id_from_attrs
elif isinstance(id_from_attrs, str):
ids += [id_from_attrs]
# merge ids to a single value with _ separators
self.id = '_'.join(ids) if ids else None
# merge classes from the attribute dictionary
class_from_attrs = attributes.get('class', [])
if not isinstance(class_from_attrs, (tuple, list)):
class_from_attrs = [class_from_attrs]
self.classes = class_from_attrs + classes
def render_attributes(self, options):
def attr_wrap(val):
return '%s%s%s' % (options.attr_wrapper, val, options.attr_wrapper)
rendered = []
for name, value in self.attributes.items():
if name in ('id', 'class') or value in (None, False):
# this line isn't recorded in coverage because it gets optimized away (http://bugs.python.org/issue2506)
continue # pragma: no cover
if value is True: # boolean attribute
if options.xhtml:
rendered.append("%s=%s" % (name, attr_wrap(name)))
else:
rendered.append(name)
else:
value = self._escape_attribute_quotes(value, options.attr_wrapper)
rendered.append("%s=%s" % (name, attr_wrap(value)))
if len(self.classes) > 0:
rendered.append("class=%s" % attr_wrap(" ".join(self.classes)))
if self.id:
rendered.append("id=%s" % attr_wrap(self.id))
return ' '.join(rendered)
@staticmethod
def _escape_attribute_quotes(v, attr_wrapper):
"""
Escapes quotes with a backslash, except those inside a Django tag
"""
escaped = []
inside_tag = False
for i, _ in enumerate(v):
if v[i:i + 2] == '{%':
inside_tag = True
elif v[i:i + 2] == '%}':
inside_tag = False
if v[i] == attr_wrapper and not inside_tag:
escaped.append('\\')
escaped.append(v[i])
return ''.join(escaped)
|
python
|
import scipy.misc
import scipy.io
from ops import *
from setting import *
def img_net(inputs, bit, numclass):
data = scipy.io.loadmat(MODEL_DIR)
layers = (
'conv1', 'relu1', 'norm1', 'pool1', 'conv2', 'relu2', 'norm2', 'pool2', 'conv3', 'relu3', 'conv4', 'relu4',
'conv5', 'relu5', 'pool5', 'fc6', 'relu6', 'fc7', 'relu7')
weights = data['layers'][0]
labnet = {}
current = tf.convert_to_tensor(inputs, dtype='float32')
for i, name in enumerate(layers):
if name.startswith('conv'):
kernels, bias = weights[i][0][0][0][0]
bias = bias.reshape(-1)
pad = weights[i][0][0][1]
stride = weights[i][0][0][4]
current = conv_layer(current, kernels, bias, pad, stride, i, labnet)
elif name.startswith('relu'):
current = tf.nn.relu(current)
elif name.startswith('pool'):
stride = weights[i][0][0][1]
pad = weights[i][0][0][2]
area = weights[i][0][0][5]
current = pool_layer(current, stride, pad, area)
elif name.startswith('fc'):
kernels, bias = weights[i][0][0][0][0]
bias = bias.reshape(-1)
current = full_conv(current, kernels, bias, i, labnet)
elif name.startswith('norm'):
current = tf.nn.local_response_normalization(current, depth_radius=2, bias=2.000, alpha=0.0001, beta=0.75)
labnet[name] = current
W_fc8 = tf.random_normal([1, 1, 4096, SEMANTIC_EMBED], stddev=1.0) * 0.01
b_fc8 = tf.random_normal([SEMANTIC_EMBED], stddev=1.0) * 0.01
w_fc8 = tf.Variable(W_fc8, name='w' + str(20))
b_fc8 = tf.Variable(b_fc8, name='bias' + str(20))
fc8 = tf.nn.conv2d(current, w_fc8, strides=[1, 1, 1, 1], padding='VALID')
fc8 = tf.nn.bias_add(fc8, b_fc8)
relu8 = tf.nn.relu(fc8)
labnet['feature'] = relu8
W_fc9 = tf.random_normal([1, 1, SEMANTIC_EMBED, bit], stddev=1.0) * 0.01
b_fc9 = tf.random_normal([bit], stddev=1.0) * 0.01
w_fc9 = tf.Variable(W_fc9, name='w' + str(21))
b_fc9 = tf.Variable(b_fc9, name='bias' + str(21))
# fc9 = tf.nn.conv2d(fc8, w_fc9, strides=[1, 1, 1, 1], padding='VALID')
fc9 = tf.nn.conv2d(relu8, w_fc9, strides=[1, 1, 1, 1], padding='VALID')
fc9 = tf.nn.bias_add(fc9, b_fc9)
labnet['hash'] = tf.nn.tanh(fc9)
W_fc10 = tf.random_normal([1, 1, SEMANTIC_EMBED, numclass], stddev=1.0) * 0.01
b_fc10 = tf.random_normal([numclass], stddev=1.0) * 0.01
w_fc10 = tf.Variable(W_fc10, name='w' + str(22))
b_fc10 = tf.Variable(b_fc10, name='bias' + str(22))
# fc10 = tf.nn.conv2d(fc8, w_fc10, strides=[1, 1, 1, 1], padding='VALID')
fc10 = tf.nn.conv2d(relu8, w_fc10, strides=[1, 1, 1, 1], padding='VALID')
fc10 = tf.nn.bias_add(fc10, b_fc10)
labnet['label'] = tf.nn.sigmoid(fc10)
return tf.squeeze(labnet['hash']), tf.squeeze(labnet['feature']), tf.squeeze(labnet['label'])
def lab_net(imput_label, bit, numClass):
LAYER1_NODE = 4096
labnet = {}
W_fc1 = tf.random_normal([1, numClass, 1, LAYER1_NODE], stddev=1.0) * 0.01
b_fc1 = tf.random_normal([1, LAYER1_NODE], stddev=1.0) * 0.01
labnet['fc1W'] = tf.Variable(W_fc1)
labnet['fc1b'] = tf.Variable(b_fc1)
labnet['conv1'] = tf.nn.conv2d(imput_label, labnet['fc1W'], strides=[1, 1, 1, 1], padding='VALID')
W1_plus_b1 = tf.nn.bias_add(labnet['conv1'], tf.squeeze(labnet['fc1b']))
relu1 = tf.nn.relu(W1_plus_b1)
norm1 = tf.nn.local_response_normalization(relu1, depth_radius=2, bias=2.000, alpha=0.0001, beta=0.75)
W_fc2 = tf.random_normal([1, 1, LAYER1_NODE, SEMANTIC_EMBED], stddev=1.0) * 0.01
b_fc2 = tf.random_normal([1, SEMANTIC_EMBED], stddev=1.0) * 0.01
labnet['fc2W'] = tf.Variable(W_fc2)
labnet['fc2b'] = tf.Variable(b_fc2)
labnet['conv2'] = tf.nn.conv2d(norm1, labnet['fc2W'], strides=[1, 1, 1, 1], padding='VALID')
fc2 = tf.nn.bias_add(labnet['conv2'], tf.squeeze(labnet['fc2b']))
relu2 = tf.nn.relu(fc2)
labnet['feature'] = relu2
#norm2 = tf.nn.local_response_normalization(relu2, depth_radius=2, bias=2.000, alpha=0.0001, beta=0.75)
W_fc3 = tf.random_normal([1, 1, SEMANTIC_EMBED, bit], stddev=1.0) * 0.01
b_fc3 = tf.random_normal([1, bit], stddev=1.0) * 0.01
labnet['fc3W'] = tf.Variable(W_fc3)
labnet['fc3b'] = tf.Variable(b_fc3)
labnet['conv3'] = tf.nn.conv2d(relu2, labnet['fc3W'], strides=[1, 1, 1, 1], padding='VALID')
output_h = tf.nn.bias_add(labnet['conv3'], tf.squeeze(labnet['fc3b']))
labnet['hash'] = tf.nn.tanh(output_h)
W_fc4 = tf.random_normal([1, 1, SEMANTIC_EMBED, numClass], stddev=1.0) * 0.01
b_fc4 = tf.random_normal([1, numClass], stddev=1.0) * 0.01
labnet['fc4W'] = tf.Variable(W_fc4)
labnet['fc4b'] = tf.Variable(b_fc4)
labnet['conv4'] = tf.nn.conv2d(relu2, labnet['fc4W'], strides=[1, 1, 1, 1], padding='VALID')
label_ = tf.nn.bias_add(labnet['conv4'], tf.squeeze(labnet['fc4b']))
labnet['label'] = tf.nn.sigmoid(label_)
return tf.squeeze(labnet['hash']), tf.squeeze(labnet['feature']), tf.squeeze(labnet['label'])
def dis_net_IL(feature, keep_prob, reuse=False, name="disnet_IL"):
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
disnet = {}
relu1 = relu(conv2d(feature, [1,SEMANTIC_EMBED,1,512], [1,1,1,1], 'VALID', 1.0, "disnet_IL_fc1"))
dropout1 = tf.nn.dropout(relu1, keep_prob)
relu2 = relu(conv2d(dropout1, [1,1,512,256], [1,1,1,1], 'VALID', 1.0, "disnet_IL_fc2"))
dropout2 = tf.nn.dropout(relu2, keep_prob)
disnet['output'] = conv2d(dropout2, [1, 1, 256, 1], [1, 1, 1, 1], 'VALID', 1.0, "disnet_IL_out")
# relu1 = relu(batch_norm(conv2d(feature, [1, 1, SEMANTIC_EMBED, 512], [1, 1, 1, 1], 'VALID', 1.0, "disnet_IL_fc1")))
# dropout1 = tf.nn.dropout(relu1, keep_prob)
# relu2 = relu(batch_norm(conv2d(dropout1, [1, 1, 512, 256], [1, 1, 1, 1], 'VALID', 1.0, "disnet_IL_fc2")))
# dropout2 = tf.nn.dropout(relu2, keep_prob)
# disnet['output'] = conv2d(dropout2, [1, 1, 256, 1], [1, 1, 1, 1], 'VALID', 1.0, "disnet_IL_out")
return tf.squeeze(disnet['output'])
def dis_net_TL(feature, keep_prob, reuse=False, name="disnet_TL"):
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
disnet = {}
relu1 = relu(conv2d(feature, [1, SEMANTIC_EMBED, 1, 512], [1, 1, 1, 1], 'VALID', 1.0, "disnet_TL_fc1"))
dropout1 = tf.nn.dropout(relu1, keep_prob)
relu2 = relu(conv2d(dropout1, [1, 1, 512, 256], [1, 1, 1, 1], 'VALID', 1.0, "disnet_TL_fc2"))
dropout2 = tf.nn.dropout(relu2, keep_prob)
disnet['output'] = conv2d(dropout2, [1, 1, 256, 1], [1, 1, 1, 1], 'VALID', 1.0, "disnet_TL_out")
# relu1 = relu(batch_norm(conv2d(feature, [1, 1, SEMANTIC_EMBED, 512], [1, 1, 1, 1], 'VALID', 1.0, "disnet_TL_fc1")))
# dropout1 = tf.nn.dropout(relu1, keep_prob)
# relu2 = relu(batch_norm(conv2d(dropout1, [1, 1, 512, 256], [1, 1, 1, 1], 'VALID', 1.0, "disnet_TL_fc2")))
# dropout2 = tf.nn.dropout(relu2, keep_prob)
# disnet['output'] = conv2d(dropout2, [1, 1, 256, 1], [1, 1, 1, 1], 'VALID', 1.0, "disnet_TL_out")
return tf.squeeze(disnet['output'])
def txt_net(text_input, dimy, bit, numclass):
txtnet={}
MultiScal = MultiScaleTxt(text_input)
W_fc1 = tf.random_normal([1, dimy, 6, 4096], stddev=1.0) * 0.01
b_fc1 = tf.random_normal([1, 4096], stddev=1.0) * 0.01
fc1W = tf.Variable(W_fc1)
fc1b = tf.Variable(b_fc1)
txtnet['conv1'] = tf.nn.conv2d(MultiScal, fc1W, strides=[1, 1, 1, 1], padding='VALID')
W1_plus_b1 = tf.nn.bias_add(txtnet['conv1'], tf.squeeze(fc1b))
txtnet['fc1'] = tf.nn.relu(W1_plus_b1)
txtnet['norm1'] = tf.nn.local_response_normalization(txtnet['fc1'], depth_radius=2, bias=2.000, alpha=0.0001, beta=0.75)
W_fc2 = tf.random_normal([1, 1, 4096, SEMANTIC_EMBED], stddev=1.0) * 0.01
b_fc2 = tf.random_normal([1, SEMANTIC_EMBED], stddev=1.0) * 0.01
fc2W = tf.Variable(W_fc2)
fc2b = tf.Variable(b_fc2)
txtnet['conv2'] = tf.nn.conv2d(txtnet['norm1'], fc2W, strides=[1, 1, 1, 1], padding='VALID')
W2_plus_b2 = tf.nn.bias_add(txtnet['conv2'], tf.squeeze(fc2b))
relu2 = tf.nn.relu(W2_plus_b2)
txtnet['feature'] = relu2
txtnet['norm2'] = tf.nn.local_response_normalization(relu2, depth_radius=2, bias=2.000, alpha=0.0001, beta=0.75)
W_fc3 = tf.random_normal([1, 1, SEMANTIC_EMBED, bit], stddev=1.0) * 0.01
b_fc3 = tf.random_normal([bit], stddev=1.0) * 0.01
fc3W = tf.Variable(W_fc3)
fc3b = tf.Variable(b_fc3)
txtnet['conv3'] = tf.nn.conv2d(txtnet['norm2'], fc3W, strides=[1, 1, 1, 1], padding='VALID')
W3_plus_b3 = tf.nn.bias_add(txtnet['conv3'], tf.squeeze(fc3b))
txtnet['hash'] = tf.nn.tanh(W3_plus_b3)
W_fc4 = tf.random_normal([1, 1, SEMANTIC_EMBED, numclass], stddev=1.0) * 0.01
b_fc4 = tf.random_normal([numclass], stddev=1.0) * 0.01
fc4W = tf.Variable(W_fc4)
fc4b = tf.Variable(b_fc4)
txtnet['conv4'] = tf.nn.conv2d(txtnet['norm2'], fc4W, strides=[1, 1, 1, 1], padding='VALID')
W4_plus_b4 = tf.nn.bias_add(txtnet['conv4'], tf.squeeze(fc4b))
txtnet['label'] = tf.nn.sigmoid(W4_plus_b4)
return tf.squeeze(txtnet['hash']), tf.squeeze(txtnet['feature']), tf.squeeze(txtnet['label'])
def interp_block(text_input, level):
shape = [1, 1, 5 * level, 1]
stride = [1, 1, 5 * level, 1]
prev_layer = tf.nn.avg_pool(text_input, ksize=shape, strides=stride, padding='VALID')
W_fc1 = tf.random_normal([1, 1, 1, 1], stddev=1.0) * 0.01
fc1W = tf.Variable(W_fc1)
prev_layer = tf.nn.conv2d(prev_layer, fc1W, strides=[1, 1, 1, 1], padding='VALID')
prev_layer = tf.nn.relu(prev_layer)
prev_layer = tf.image.resize_images(prev_layer, [1, dimTxt])
return prev_layer
def MultiScaleTxt(input):
interp_block1 = interp_block(input, 10)
interp_block2 = interp_block(input, 6)
interp_block3 = interp_block(input, 3)
interp_block6 = interp_block(input, 2)
interp_block10 = interp_block(input, 1)
output = tf.concat([input,
interp_block10,
interp_block6,
interp_block3,
interp_block2,
interp_block1], axis = -1)
return output
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Search utilities."""
from invenio_search.api import RecordsSearch
class DocumentSearch(RecordsSearch):
"""RecordsSearch for documents."""
class Meta:
"""Search only on documents index."""
index = "documents"
doc_types = None
class ItemSearch(RecordsSearch):
"""RecordsSearch for items."""
class Meta:
"""Search only on items index."""
index = "items"
doc_types = None
def search_by_document_pid(self, document_pid=None):
"""Retrieve items based on the given document pid."""
search = self
if document_pid:
search = search.filter("term", document_pid=document_pid)
else:
raise ValueError("Must specify document_pid param")
return search
class LocationSearch(RecordsSearch):
"""RecordsSearch for locations."""
class Meta:
"""Search only on locations index."""
index = "locations"
doc_types = None
class InternalLocationSearch(RecordsSearch):
"""RecordsSearch for internal locations."""
class Meta:
"""Search only on internal locations index."""
index = "internal_locations"
doc_types = None
|
python
|
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import click
@click.command('')
@click.option('-i', '--input', 'opt_inputs', required=True,
multiple=True,
help="Input files to merge")
@click.option('-o', '--output', 'opt_output', required=True,
help='Output file')
@click.option('--minify', 'opt_minify', is_flag=True,
default=False,
help='Minify JSON')
@click.option('--replace-path', 'opt_replace_path',
help="Replace file parent path")
@click.pass_context
def cli(ctx, opt_inputs, opt_output, opt_replace_path, opt_minify):
"""Merge JSON detections"""
# ------------------------------------------------
# imports
from os.path import join
from pathlib import Path
from tqdm import tqdm
from vframe.utils import file_utils
from vframe.settings import app_cfg
# ------------------------------------------------
# start
log = app_cfg.LOG
# load first file
merge_results = {}
# merge
for fp_in in tqdm(opt_inputs, desc='Files'):
# load json
log.debug(f'load: {fp_in}')
detection_results = file_utils.load_json(fp_in)
# add all the current detections to cumulative detections
for detection_result in detection_results:
# replaced place in item data
if opt_replace_path is not None:
detection_result['filepath'] = join(opt_replace_path, Path(detection_result['filepath']).name)
filepath = detection_result['filepath']
if not filepath in merge_results.keys():
merge_results[filepath] = {'filepath': filepath}
for frame_idx, frame_data in detection_result['frames_data'].items():
if not 'frames_data' in merge_results[filepath].keys():
merge_results[filepath]['frames_data'] = {}
if not frame_idx in merge_results[filepath]['frames_data'].keys():
merge_results[filepath]['frames_data'][frame_idx] = {}
for model_name, model_results in frame_data.items():
merge_results[filepath]['frames_data'][frame_idx][model_name] = model_results
# write
results_out = list(merge_results.values())
file_utils.write_json(results_out, opt_output, minify=opt_minify)
|
python
|
# 该模型模仿自 keras/examples/lstm_text_generation.py 和吴恩达老师课程中讲的并不相同
# 吴恩达老师的模型是训练一个RNN模型,然后每一次输入一个单词,预测出下一个最可能的单词作为输出
# 此模型则是利用 corpus 构建一个监督学习模型,模型的构成为,选择一个当前字母作为x,下一个字母作为y,从而构建模型
# 当前采用了每一个单词之后使用 \n 补齐,对模型效果可能会有点影响
from os import path
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import LSTM
from keras.preprocessing.text import one_hot
import numpy as np
# from keras.preprocessing.sequence import pad_sequences
# 提供的不能用,因为 split 不支持 ''
# one = one_hot("t e x t", 27, lower=True, split=' ')
# print(one)
# 读取训练集
def get_train_data():
dataset_path = path.join(path.dirname(__file__), "dataset.csv")
with open(dataset_path, "r") as f:
dataset = f.read()
chars = list(set(dataset.lower()))
dataset = dataset.split('\n')
# 找到长度最长的名字
maxlen = len(max(dataset))
# 使用 \n 填充长度短于 maxlen 的名字
dataset = [item.ljust(maxlen, '\n') for item in dataset]
return dataset, chars, maxlen
dataset, chars, maxlen = get_train_data()
vocab_size = len(chars)
print(f'There are {len(dataset)} total names and {len(chars)} unique characters in your data.')
# embeding
char_to_ix = {ch: i for i, ch in enumerate(sorted(chars))}
ix_to_char = {i: ch for i, ch in enumerate(sorted(chars))}
# print(char_to_ix)
# print(ix_to_char)
# def word_to_one_hot(word):
# # 将单词转换成 one-hot
# one_hot = []
# for w in word:
# zeros = np.zeros((vocab_size, 1))
# zeros[char_to_ix[w]] = 1
# one_hot.append(zeros)
# return one_hot
# def one_hot_to_word(one_hot):
# # 将 one-hot 转换成单词
# word = ''
# for one in one_hot:
# # 找到 word index
# index = 0
# for i in one:
# if i[0] == 1:
# word += ix_to_char[index]
# index += 1
# return word
# print(word_to_one_hot("text"))
# print(one_hot_to_word(word_to_one_hot("text")))
# build model
def build():
model = Sequential()
# model.add(Embedding(len(chars) + 1, 64, input_length=10))
# 模型将输入一个大小为 (batch, input_length) 的整数矩阵。
# 输入中最大的整数(即词索引)不应该大于 999 (词汇表大小)
# 现在 model.output_shape == (None, 10, 64),其中 None 是 batch 的维度。
model.add(LSTM(128))
# model.add(Dropout(0.5))
# model.add(Dense(1, activation='sigmoid'))
# model.compile(loss='binary_crossentropy',
# optimizer='rmsprop',
# metrics=['accuracy'])
# model.fit(x_train, y_train, batch_size=16, epochs=10)
# score = model.evaluate(x_test, y_test, batch_size=16)
|
python
|
import tensorflow as tf
initializer = tf.keras.initializers.HeNormal()
regularizer = tf.keras.regularizers.L1(l1=.001)
inputs = tf.keras.Input(shape=(8,8,19))
filters = 32
x = tf.keras.layers.Conv2D(filters,(3,3),padding='same',kernel_regularizer=regularizer, bias_regularizer=regularizer, kernel_initializer=initializer)(inputs)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Conv2D(filters,(3,3),padding='same',kernel_regularizer=regularizer, bias_regularizer=regularizer, kernel_initializer=initializer)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Conv2D(filters,(3,3),padding='same',kernel_regularizer=regularizer, bias_regularizer=regularizer, kernel_initializer=initializer)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Conv2D(1,(1,1),padding='same',kernel_regularizer=regularizer, bias_regularizer=regularizer, kernel_initializer=initializer)(x)
x = tf.keras.layers.Flatten()(x)
outputs = tf.keras.layers.Dense(3,kernel_regularizer=regularizer, bias_regularizer=regularizer, kernel_initializer=initializer)(x)
model = tf.keras.Model(inputs=inputs,outputs=outputs)
model.compile(optimizer=tf.keras.optimizers.Adadelta(learning_rate=.1),loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True), metrics=[tf.keras.metrics.CategoricalAccuracy()])
model.summary()
model.save('current_model.h5')
|
python
|
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import os,unittest
from reportlab.platypus import Spacer, SimpleDocTemplate, Table, TableStyle, LongTable
from reportlab.platypus.doctemplate import PageAccumulator
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch, cm
from reportlab.lib.utils import simpleSplit
from reportlab.lib import colors
styleSheet = getSampleStyleSheet()
class MyPageAccumulator(PageAccumulator):
def pageEndAction(self,canv,doc):
L42 = [x[0] for x in self.data if not x[0]%42]
L13 = [x[0] for x in self.data if not x[0]%13]
if L42 and L13:
s = 'Saw multiples of 13 and 42'
elif L13:
s = 'Saw multiples of 13'
elif L42:
s = 'Saw multiples of 42'
else:
return
canv.saveState()
canv.setFillColor(colors.purple)
canv.setFont("Helvetica",6)
canv.drawString(1*inch,1*inch,s)
canv.restoreState()
PA = MyPageAccumulator('_42_divides')
class MyDocTemplate(SimpleDocTemplate):
def beforeDocument(self):
for pt in self.pageTemplates:
PA.attachToPageTemplate(pt)
def textAccum2():
doc = MyDocTemplate(outputfile('test_platypus_accum2.pdf'),
pagesize=(8.5*inch, 11*inch), showBoundary=1)
story=[]
story.append(Paragraph("A table with 500 rows", styleSheet['BodyText']))
sty = [ ('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
('FONTNAME',(0,0),(-1,-1),'Helvetica'),
('FONTSIZE',(0,0),(-1,-1),10),
]
def myCV(s,fontName='Helvetica',fontSize=10,maxWidth=72):
return '\n'.join(simpleSplit(s,fontName,fontSize,maxWidth))
data = [[PA.onDrawStr(str(i+1),i+1),
myCV("xx "* (i%10),maxWidth=100-12),
myCV("blah "*(i%40),maxWidth=200-12)]
for i in range(500)]
t=LongTable(data, style=sty, colWidths = [50,100,200])
story.append(t)
doc.build(story)
def textAccum1():
doc = MyDocTemplate(outputfile('test_platypus_accum1.pdf'),
pagesize=(8.5*inch, 11*inch), showBoundary=1)
story=[]
story.append(Paragraph("A table with 500 rows", styleSheet['BodyText']))
sty = [ ('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
]
data = [[str(i+1), Paragraph("xx "* (i%10),
styleSheet["BodyText"]),
Paragraph(("blah "*(i%40))+PA.onDrawText(i+1), styleSheet["BodyText"])]
for i in range(500)]
t=LongTable(data, style=sty, colWidths = [50,100,200])
story.append(t)
doc.build(story)
class TablesTestCase(unittest.TestCase):
"Make documents with tables"
def test1(self):
textAccum1()
def test2(self):
textAccum2()
def makeSuite():
return makeSuiteForClasses(TablesTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
|
python
|
from qgis.PyQt.QtCore import Qt, QTimer
from qgis.core import QgsProject, QgsRectangle, QgsWkbTypes
from qgis.gui import QgsMapToolEmitPoint, QgsRubberBand
POLLING_RATE_MS = 250
class WindowShow(QWidget):
def __init__(self, mode='single_picture'):
super().__init__()
self.initUI()
self._button_counter = 0
def initUI(self):
vbox = QVBoxLayout()
hbox_text = QHBoxLayout()
self.text_lbl = QLabel()
self.text_lbl.setAlignment(Qt.AlignTop)
hbox_text.addWidget(self.text_lbl)
hbox_button = QHBoxLayout()
button = QPushButton('press me')
button.clicked.connect(self.add_counter_button_pressed)
hbox_button.addWidget(button)
vbox.addLayout(hbox_text)
vbox.addLayout(hbox_button)
self.setLayout(vbox)
self.move(400, 300)
self.setWindowTitle('Picture ... ')
self.show()
@property
def button_counter(self):
return self._button_counter
def show_text(self):
self.text_lbl.setText('Something more interesting ...')
def add_counter_button_pressed(self):
self._button_counter += 1
class SelectRectangleMapTool(QgsMapToolEmitPoint):
def __init__(self, canvas):
self.canvas = canvas
QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = QgsRubberBand(self.canvas, True)
self.rubberBand.setColor(Qt.blue)
self.rubberBand.setFillColor(Qt.transparent)
self.rubberBand.setWidth(2)
self.timer_poll_id = QTimer()
self.timer_poll_id.timeout.connect(self.call_button_counter)
self.reset()
def reset(self):
self.startPoint = self.endPoint = None
self.isEmittingPoint = False
self.rubberBand.reset(True)
self.timer_poll_id.stop()
self.window_show = None
self.counter = 0
def canvasPressEvent(self, e):
self.reset()
self.start_point = self.toMapCoordinates(e.pos())
self.end_point = self.start_point
self.isEmittingPoint = True
def canvasReleaseEvent(self, e):
self.isEmittingPoint = False
self.show_rect(self.start_point, self.end_point)
self.window_show = WindowShow()
self.window_show.show_text()
self.counter = 0
self.timer_poll_id.start(POLLING_RATE_MS)
def canvasMoveEvent(self, e):
if not self.isEmittingPoint:
return
self.end_point = self.toMapCoordinates(e.pos())
self.show_rect(self.start_point, self.end_point)
def show_rect(self, start_point, end_point):
self.rubberBand.reset(QgsWkbTypes.PolygonGeometry)
if start_point.x() == end_point.x() or start_point.y() == end_point.y():
return
self.rubberBand.addPoint(QgsPointXY(start_point.x(), start_point.y()), False)
self.rubberBand.addPoint(QgsPointXY(start_point.x(), end_point.y()), False)
self.rubberBand.addPoint(QgsPointXY(end_point.x(), end_point.y()), False)
self.rubberBand.addPoint(QgsPointXY(end_point.x(), start_point.y()), True)
self.rubberBand.show()
def call_button_counter(self):
if not self.window_show:
return
new_counter = self.window_show.button_counter
if new_counter != self.counter:
self.counter = new_counter
print(f'Button pressed in WindowShow: {self.counter}')
else:
return
def deactivate(self):
self.reset()
QgsMapTool.deactivate(self)
self.deactivated.emit()
canvas = iface.mapCanvas()
select_pic = SelectRectangleMapTool(canvas)
canvas.setMapTool(select_pic)
|
python
|
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from modelscript.metamodels.classes.associations import opposite
from modelscript.metamodels.objects import PackagableElement, Entity
from modelscript.base.exceptions import (
UnexpectedCase,
MethodToBeDefined)
class Link(PackagableElement, Entity, metaclass=ABCMeta):
def __init__(self,
model, association,
sourceObject, targetObject,
name=None,
package=None,
step=None,
astNode=None, lineNo=None,
description=None):
#type: (ObjectModel, Union[Association, Placeholder], Object, Object, Optional[Text], Optional[Package], Optional['Step'],Optional['ASTNode'], Optional[int], Optional[TextBlock]) -> None
PackagableElement.__init__(
self,
model=model,
name=name,
package=package,
step=step,
astNode=astNode,
lineNo=lineNo,
description=description
)
Entity.__init__(self)
self.association=association
#type: association
self.sourceObject = sourceObject
# type: Object
self.targetObject = targetObject
# type: Object
# Singleton-like link roles to allow direct comparison
# of link role instances. (see linkRole method)
self._linkRole=OrderedDict()
self._linkRole['source']=LinkRole(self, 'source')
self._linkRole['target']=LinkRole(self, 'target')
@abstractmethod
def isPlainLink(self):
# just used to prevent creating object of this class
# (ABCMeta is not enough)
raise MethodToBeDefined( #raise:OK
'method isPlainLink() is not defined.'
)
def object(self, position):
#type: () -> RolePosition
if position=='source':
return self.sourceObject
elif position=='target':
return self.targetObject
else:
raise UnexpectedCase( #raise:OK
'role position "%s" is not implemented' % position)
def linkRole(self, position):
return self._linkRole[position]
def __str__(self):
return '(%s,%s,%s)' % (
self.sourceObject.name,
self.association.name,
self.targetObject.name
)
class LinkRole(object):
def __init__(self, link, position):
self.link=link
self.position=position
@property
def object(self):
return self.link.object(self.position)
@property
def association(self):
return self.link.association
@property
def role(self):
return self.link.association.role(self.position)
@property
def roleType(self):
return self.role.type
@property
def objectType(self):
return self.object.class_
@property
def opposite(self):
return self.link.linkRole(opposite(self.position))
def __str__(self):
if self.position=='source':
return '([[%s]],%s,%s)' % (
self.link.sourceObject.name,
self.association.name,
self.link.targetObject.name
)
elif self.position=='target':
return '(%s,%s,[[%s]])' % (
self.link.sourceObject.name,
self.association.name,
self.link.targetObject.name
)
else:
raise UnexpectedCase( #raise:OK
'Unexpected position: %s' % self.position)
class PlainLink(Link):
def __init__(self,
model, association,
sourceObject, targetObject,
name=None,
package=None,
step=None,
astNode=None, lineNo=None,
description=None):
#type: (ObjectModel, Union[Association, Placeholder], Object, Object, Optional[Text], Optional[Package], Optional['Step'], Optional['ASTNode'], Optional[int], Optional[TextBlock]) -> None
super(PlainLink, self).__init__(
model=model,
association=association,
sourceObject=sourceObject,
targetObject=targetObject,
name=name,
package=package,
step=step,
astNode=astNode,
lineNo=lineNo,
description=description
)
model._plainLinks.append(self)
def isPlainLink(self):
return True
# def delete(self):
# self.state.links=[l for l in self.state.links if l != self]
|
python
|
# coding: utf-8
from atomate.vasp.config import ADD_WF_METADATA
from atomate.vasp.powerups import (
add_wf_metadata,
add_common_powerups,
)
from atomate.vasp.workflows.base.core import get_wf
__author__ = "Ryan Kingsbury, Shyam Dwaraknath, Anubhav Jain"
__email__ = "[email protected], [email protected], [email protected]"
def wf_scan_opt(structure, c=None):
"""
Structure optimization using the SCAN metaGGA functional.
This workflow performs a 2-step optmization. The first step
is a GGA structure optimization using the PBESol functional that serves to
precondition the geometry and charge density. The second step is a
SCAN structure optimization.
The first optimization is force converged with EDIFFG = -0.05,
and the second optimization is force converged with EDIFFG=-0.02.
The bandgap from the first step is used to update the KSPACING parameter,
which sets the appropriate number of k-points for the subsequent SCAN
calculation.
"""
c = c or {}
vasp_input_set_params = {}
if c.get("USER_INCAR_SETTINGS"):
vasp_input_set_params["user_incar_settings"] = c.get("USER_INCAR_SETTINGS")
if c.get("vdw"):
vasp_input_set_params["vdw"] = c.get("vdw")
if c.get("bandgap"):
vasp_input_set_params["bandgap"] = c.get("bandgap")
wf = get_wf(
structure,
"SCAN_optimization.yaml",
common_params={"vasp_input_set_params": vasp_input_set_params}
)
wf = add_common_powerups(wf, c)
if c.get("ADD_WF_METADATA", ADD_WF_METADATA):
wf = add_wf_metadata(wf, structure)
return wf
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import requests
ID = 'id'
NAME = 'nombre'
PROV = 'provincia'
PROV_ID = 'provincia_id'
PROV_NAM = 'provincia_nombre'
DEPT = 'departamento'
DEPT_ID = 'departamento_id'
DEPT_NAM = 'departamento_nombre'
MUN = 'municipio'
MUN_ID = 'municipio_id'
MUN_NAM = 'municipio_nombre'
LOC = 'localidad'
LAT = 'centroide_lat'
LON = 'centroide_lon'
class GeorefWrapper:
"""Interfaz para la API REST de Georef."""
def __init__(self):
self.url = "http://apis.datos.gob.ar/georef/api/"
self.max_bulk_len = 5000
def search_province(self, data):
entity = 'provincias'
return self._get_response(entity, data)
def search_departament(self, data):
entity = 'departamentos'
return self._get_response(entity, data)
def search_municipality(self, data):
entity = 'municipios'
return self._get_response(entity, data)
def search_locality(self, data):
entity = 'localidades'
return self._get_response(entity, data)
def _get_response(self, entity, data):
result = []
result_partial = []
data_len = len([i for i in data[entity] if i])
resource = self.url + entity
# Valida si es necesario compaginar la data a enviar
if data_len > self.max_bulk_len:
data = self._getrows_byslice(
entity, data[entity], self.max_bulk_len)
else:
data = [data]
for row in data:
r = requests.post(resource, json=row)
if 'resultados' in r.content.decode('utf8'):
result_partial.append(json.loads(r.content)['resultados'])
else:
error = self._get_first_error(json.loads(r.content)['errores'])
return {'error': error}
for row in result_partial:
for v in row:
if v[entity]:
result.append({entity: [v[entity][0]]})
else:
result.append({entity: []})
return result
@staticmethod
def _getrows_byslice(entity, seq, rowlen):
data_slice = []
for start in range(0, len(seq), rowlen):
data_slice.append({entity: seq[start:start + rowlen]})
return data_slice
@staticmethod
def _get_first_error(result):
idx = next(i for i, j in enumerate(result) if j)
return result[idx]
|
python
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import luigi
from servicecatalog_puppet.workflow.tag_policies import tag_policies_base_task
from servicecatalog_puppet.workflow.tag_policies import execute_tag_policies_task
from servicecatalog_puppet.workflow.manifest import manifest_mixin
class TagPoliciesForTask(
tag_policies_base_task.TagPoliciesBaseTask, manifest_mixin.ManifestMixen,
):
tag_policies_name = luigi.Parameter()
puppet_account_id = luigi.Parameter()
def params_for_results_display(self):
return {
"puppet_account_id": self.puppet_account_id,
"tag_policies_name": self.tag_policies_name,
"cache_invalidator": self.cache_invalidator,
}
def get_klass_for_provisioning(self):
return execute_tag_policies_task.ExecuteTagPoliciesTask
def run(self):
self.write_output(self.params_for_results_display())
|
python
|
import os
from os import path
from imageio import imread
from konlpy.tag import Hannanum
from wordcloud import WordCloud, ImageColorGenerator
"""This code is to generate and to plot a wordcloud in Korean version.
Of course it is possible to generate a simple wordcloud with the original codes. However
due to the major difference with English and complexity, the result from the original codes will not
be as perfect as we expected.
The major difference between English and Korean(Hangul) is that English words can be devided by space(' ')
while Korean words cannot be divided by space. To make a Korean sentence, every single noun has to be combined with
articles without space(ex. I am --> 나는, 나:I 는:am).
For this reason, even though the text want to say 'I' in every appearance as '나는','나를', '나에게',
the original codes will separate these words as a different meaning and a different word.
'"""
"""To implement the codes, you must install konlpy package which is a module for natural language processing for Korean.
It provides a function with separating the main words and articles, and only extract the main words."""
"""So don't forget to install konlpy package!"""
# get data directory (using getcwd() is needed to support running example in generated IPython notebook)
d = path.dirname(__file__) if "__file__" in locals() else os.getcwd()
#read the color image taken from
back_coloring = imread(path.join(d, d + '/word_cloud/kor_text/image/나뭇잎.jpg'))
#get the path of Korean_fonts otf file
font_path = d + '/word_cloud/examples/fonts/NotoSansKR/NotoSansKR-Black.otf'
def listToString(list1):
str=" " #distinguish nouns by 'space'
return (str.join(list1))
def get_string(path):
f = open(path, "r", encoding="utf-8")
sample = f.read()
f.close()
h = Hannanum()
list_nouns = h.nouns(sample) #get list of nouns from sample
return listToString(list_nouns) #get string of list_nouns
path = d + '/word_cloud/kor_text/황순원_소나기.txt' #path of korean text
tags = get_string(path) # tags : string of list_nouns
wc = WordCloud(font_path=font_path, background_color="white", mask=back_coloring,
max_font_size=100, random_state=42, width=1000, height=860, margin=2) #collocations=false
#display the generated image
wordcloud = wc.generate(tags)
import matplotlib.pyplot as plt
plt.imshow(wordcloud, interpolation ='bilinear')
#image_colors_byImg = ImageColorGenerator(back_coloring)
#plt.imshow(wordcloud.recolor(color_func=image_colors_byImg), interpolation='bilinear')
plt.axis("off")
plt.show()
|
python
|
# -*- coding: utf-8 -*-
"""Aplicando estilo via classe.
Adicionando uma classe através do método `add_class()` e
arquivo css é caregado via linguagem de programação.
"""
import gi
gi.require_version(namespace='Gtk', version='3.0')
from gi.repository import Gtk, Gdk
class MainWindow(Gtk.ApplicationWindow):
def __init__(self):
super().__init__()
self._set_custom_css(file='./css/custom.css')
self.set_title(title='Aplicando estilo via classe')
self.set_default_size(width=1366 / 2, height=768 / 2)
self.set_position(position=Gtk.WindowPosition.CENTER)
self.set_default_icon_from_file(filename='../../assets/icons/icon.png')
hbbox = Gtk.ButtonBox.new(orientation=Gtk.Orientation.HORIZONTAL)
hbbox.set_halign(align=Gtk.Align.CENTER)
hbbox.set_valign(align=Gtk.Align.CENTER)
hbbox.set_spacing(spacing=12)
self.add(widget=hbbox)
button_ok = Gtk.Button.new_with_label(label='OK')
# Adicionando classe natitiva `suggested-action` ao widget.
button_ok.get_style_context().add_class('suggested-action')
hbbox.add(widget=button_ok)
button_cancel = Gtk.Button.new_with_label(label='Cancelar')
# Adicionando classe natitiva `destructive-action` ao widget.
button_cancel.get_style_context().add_class('destructive-action')
hbbox.add(widget=button_cancel)
button_warning = Gtk.Button.new_with_label(label='Cancelar')
# Adicionando classe PERSONALIZADA `warning-action` ao widget.
button_warning.get_style_context().add_class('warning-action')
hbbox.add(widget=button_warning)
@staticmethod
def _set_custom_css(file):
css_provider = Gtk.CssProvider.new()
css_provider.load_from_path(path=file)
screen = Gdk.Screen()
style_context = Gtk.StyleContext.new()
style_context.add_provider_for_screen(
screen=screen.get_default(),
provider=css_provider,
priority=Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION,
)
if __name__ == '__main__':
win = MainWindow()
win.connect('destroy', Gtk.main_quit)
win.show_all()
Gtk.main()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.