prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
COCO evaluator that works in distributed mode.
Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
The difference is that there is less copy-pasting from pycocotools
in the end of the file, as python3 can suppress prints with contextlib
"""
import os
import contextlib
import copy
import numpy as np
import torch
from matplotlib import pyplot as plt
import cv2 as cv
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from util.misc import all_gather
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_types, score_threshold=0.5):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
self.coco_eval_state = {} ## 存储eval state的相关信息
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.coco_eval_state[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
self.eval_imgs_state = {h: [] for h in iou_types} ## 存储eval state的相关信息
self.score_threshold = score_threshold ## 可视化预测结果的置信度阈值
self.save_file = True
## update将coco_dt存入coco_eval中,然后进行coco_eval.accumulate()和coco_eval.summarize()就可以得到评估结果了
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type) ## 得到COCO形式的results<dict>
# self.show_result(results, simple_type=True) # show the image results of class prediction and state prediction
# suppress pycocotools prints
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval_state = self.coco_eval_state[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval_state.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
coco_eval_state.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
img_ids_state, eval_imgs_state = evaluatestate(coco_eval_state) # 评估入侵状态标签
self.eval_imgs[iou_type].append(eval_imgs)
self.eval_imgs_state[iou_type].append(eval_imgs_state)
pass
# 显示预测结果
def show_result(self, results, simple_type=False):
"""
可视化预测结果,包括class prediction和state prediction
:params results: 预测结果
"""
res_list = list()
for res in results:
if res['score'] > self.score_threshold:
res_list.append(res)
results = res_list
# load prediction annotations
coco = self.coco_gt
cocoRes = coco.loadRes(results)
catIds = cocoRes.getCatIds(catNms='pedestrian')
imgIds = cocoRes.getImgIds(catIds=catIds)
img_info = cocoRes.loadImgs(imgIds[np.random.randint(0, len(imgIds))])
annIds = cocoRes.getAnnIds(imgIds=img_info[0]['id'])
anns = cocoRes.loadAnns(annIds)
# load ground truth annotation
annIds_gt = coco.getAnnIds(imgIds=img_info[0]['id'])
anns_gt = coco.loadAnns(annIds_gt)
# load image
root_path = '/data/szy4017/data/intruscapes/images/val'
file_name = img_info[0]['file_name']
imgPath = os.path.join(root_path, file_name)
print(imgPath)
img = cv.imread(imgPath)
cv.imwrite('img.png', img)
img_rgb = bgr2rgb(img)
# plt
plt.title('Class Prediction')
plt.imshow(img_rgb)
cocoRes.showBBox(anns)
plt.savefig('./misc/cls_pred_{}.png'.format(img_info[0]['id']))
plt.show()
plt.title('State Prediction')
plt.imshow(img_rgb)
cocoRes.showIntrusion(anns, simple_type=simple_type)
plt.savefig('./misc/sta_pred_{}.png'.format(img_info[0]['id']))
plt.show()
plt.title('Class Ground Truth')
plt.imshow(img_rgb)
coco.showBBox(anns_gt)
plt.savefig('./misc/cls_gt_{}.png'.format(img_info[0]['id']))
plt.show()
plt.title('State Ground Truth')
plt.imshow(img_rgb)
coco.showIntrusion(anns_gt, simple_type=simple_type)
plt.savefig('./misc/sta_gt_{}.png'.format(img_info[0]['id']))
plt.show()
pass
# 将所有self.eval_imgs的数据进行同步
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
# 将所有self.eval_imgs_state的数据进行同步
# 基于synchronize_between_processes修改得到
def synchronize_between_processes_state(self):
for iou_type in self.iou_types:
self.eval_imgs_state[iou_type] = np.concatenate(self.eval_imgs_state[iou_type], 2)
create_common_coco_eval(self.coco_eval_state[iou_type], self.img_ids, self.eval_imgs_state[iou_type])
# 将coco_eval的值进行累计
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
# 将coco_eval_state的值进行累计
# 基于accumulate修改得到
def accumulate_state(self):
for coco_eval in self.coco_eval_state.values():
coco_eval.accumulate_state()
# 总结eval结果,并输出最终结果
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
# 总结eval结果,并输出最终结果
# 基于summarize修改得到
def summarize_state(self):
for iou_type, coco_eval in self.coco_eval_state.items():
print("State IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
# 将预测模型的预测结果转换成COCO形式
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
# 增加s_scores和s_labels,表示入侵状态预测的状态类别标签和置信度
s_scores = prediction["s_scores"].tolist()
s_labels = prediction["s_labels"].tolist()
# 生成COCO形式的字典,增加state和state_score字段
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
"state": s_labels[k],
"state_score": s_scores[k]
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
'keypoints': keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
# 用于图像的色彩通道转换
def bgr2rgb(img):
# 用cv自带的分割和合并函数
B, G, R = cv.split(img)
return cv.merge([R, G, B])
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = all_gather(img_ids)
all_eval_imgs = all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids =
|
np.array(merged_img_ids)
|
numpy.array
|
# Version 3.1; <NAME>; Polar Geospatial Center, University of Minnesota; 2019
from __future__ import division
import copy
import math
import operator
import os
import sys
import traceback
from collections import deque
from itertools import product
from PIL import Image
from warnings import warn
import cv2
import numpy as np
import osgeo
from osgeo import gdal_array, gdalconst
from osgeo import gdal, ogr, osr
import scipy
import shapely.geometry
import shapely.ops
from scipy import ndimage as sp_ndimage
from skimage.draw import polygon_perimeter
from skimage import morphology as sk_morphology
from skimage.filters.rank import entropy
from skimage.util import unique_rows
gdal.UseExceptions()
if sys.version_info[0] < 3:
from DecimatePoly import DecimatePoly
else:
from lib.DecimatePoly import DecimatePoly
_script_dir = os.path.dirname(os.path.realpath(__file__))
if sys.version_info[0] < 3:
_ext_fid = open(os.path.join(_script_dir, 'outline.c'), 'r')
_outline = _ext_fid.read()
_ext_fid.close()
_ext_fid = open(os.path.join(_script_dir, 'outline_every1.c'), 'r')
_outline_every1 = _ext_fid.read()
_ext_fid.close()
else:
_ext_fid = open(os.path.join(_script_dir, 'outline.c'), 'r', encoding='utf-8')
_outline = _ext_fid.read()
_ext_fid.close()
_ext_fid = open(os.path.join(_script_dir, 'outline_every1.c'), 'r', encoding='utf-8')
_outline_every1 = _ext_fid.read()
_ext_fid.close()
gdal.UseExceptions()
class RasterIOError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class UnsupportedDataTypeError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class InvalidArgumentError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class UnsupportedMethodError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
#############
# Raster IO #
#############
# Legacy; Retained for quick instruction of useful GDAL raster information extraction methods.
def oneBandImageToArrayZXY_projRef(rasterFile):
"""
Opens a single-band raster image as a NumPy 2D array [Z] and returns it along
with [X, Y] coordinate ranges of pixels in the raster grid as NumPy 1D arrays
and the projection definition string for the raster dataset in OpenGIS WKT format.
"""
if not os.path.isfile(rasterFile):
raise RasterIOError("No such rasterFile: '{}'".format(rasterFile))
ds = gdal.Open(rasterFile, gdal.GA_ReadOnly)
proj_ref = ds.GetProjectionRef()
gt = ds.GetGeoTransform()
xmin, ymax = gt[0], gt[3]
dx, dy = gt[1], gt[5]
X = xmin + np.arange(ds.RasterXSize) * dx
Y = ymax + np.arange(ds.RasterYSize) * dy
Z = ds.GetRasterBand(1).ReadAsArray()
return Z, X, Y, proj_ref
def openRaster(file_or_ds, target_srs=None, reproject_resample_method='nearest'):
"""
Open a raster image as a GDAL dataset object.
Parameters
----------
file_or_ds : str (file path) or osgeo.gdal.Dataset
File path of the raster image to open as a GDAL dataset object,
or the GDAL dataset itself.
Returns
-------
ds : osgeo.gdal.Dataset
The raster image as a GDAL dataset.
Notes
-----
If `rasterFile_or_ds` is a GDAL dataset,
it is returned without modification.
"""
ds = None
if type(file_or_ds) == gdal.Dataset:
ds = file_or_ds
elif isinstance(file_or_ds, str):
if not os.path.isfile(file_or_ds):
raise RasterIOError("No such rasterFile: '{}'".format(file_or_ds))
try:
ds = gdal.Open(file_or_ds, gdal.GA_ReadOnly)
except RuntimeError:
print("RuntimeError when opening file/dataset: {}".format(file_or_ds))
raise
else:
raise InvalidArgumentError("Invalid input type for `file_or_ds`: {}".format(
type(file_or_ds)))
if target_srs is not None:
ds = reprojectGDALDataset(ds, target_srs, reproject_resample_method)
return ds
def reprojectGDALDataset(ds_in, target_srs, interp_str):
input_srs_type = type(target_srs)
if input_srs_type is osgeo.osr.SpatialReference:
if not target_srs.IsProjected():
raise RasterIOError("`target_srs` is a osgeo.osr.SpatialReference object but is not projected")
elif input_srs_type is osgeo.gdal.Dataset or input_srs_type is str and os.path.isfile(target_srs):
target_srs = extractRasterData(target_srs, 'spat_ref')
else:
target_srs_in = target_srs
target_srs_out = osr.SpatialReference()
if input_srs_type is int:
target_srs_out.ImportFromEPSG(target_srs_in)
elif input_srs_type is str:
if input_srs_type.upper().startswith('EPSG:'):
target_srs_out.ImportFromEPSG(int(target_srs_in.upper().lstrip('EPSG:')))
elif '+proj' in target_srs_in:
target_srs_out.ImportFromProj4(target_srs_in)
else:
target_srs_out.ImportFromWkt(target_srs_in)
else:
raise RasterIOError("`target_srs` type is unsupported: {}".format(input_srs_type))
target_srs = target_srs_out
interp_gdal = interp_str2gdal(interp_str)
source_srs, dx, dy = extractRasterData(ds_in, 'spat_ref', 'dx', 'dy')
if source_srs.IsSame(target_srs) == 1:
return ds_in
temp_inmemory_file_path = '/vsimem/reproj.tif'
gdal.Warp(
temp_inmemory_file_path, ds_in,
dstSRS=target_srs, resampleAlg=interp_gdal,
xRes=dx, yRes=dy, targetAlignedPixels=True,
format="GTiff"
)
ds_out = gdal.Open(temp_inmemory_file_path, gdal.GA_ReadOnly)
gdal.Unlink(temp_inmemory_file_path)
return ds_out
def gdalReadAsArraySetsmSceneBand(raster_band, make_nodata_nan=False):
scale = raster_band.GetScale()
offset = raster_band.GetOffset()
if scale is None:
scale = 1.0
if offset is None:
offset = 0.0
if scale == 1.0 and offset == 0.0:
array_data = raster_band.ReadAsArray()
if make_nodata_nan:
nodata_val = raster_band.GetNoDataValue()
if nodata_val is not None:
array_data[array_data == nodata_val] = np.nan
else:
if raster_band.DataType != gdalconst.GDT_Int32:
raise RasterIOError(
"Expected GDAL raster band with scale!=1.0 or offset!=0.0 to be of Int32 data type"
" (scaled int LERC_ZSTD-compressed 50cm DEM), but data type is {}".format(
gdal.GetDataTypeName(raster_band.DataType)
)
)
if scale == 0.0:
raise RasterIOError(
"GDAL raster band has invalid parameters: scale={}, offset={}".format(scale, offset)
)
nodata_val = raster_band.GetNoDataValue()
array_data = raster_band.ReadAsArray(buf_type=gdalconst.GDT_Float32)
adjust_where = (array_data != nodata_val) if nodata_val is not None else True
if scale != 1.0:
np.multiply(array_data, scale, out=array_data, where=adjust_where)
if offset != 0.0:
np.add(array_data, offset, out=array_data, where=adjust_where)
if make_nodata_nan:
array_nodata = np.logical_not(adjust_where, out=adjust_where)
array_data[array_nodata] = np.nan
del adjust_where
if array_data is None:
raise RasterIOError("`raster_band.ReadAsArray()` returned None")
return array_data
def getCornerCoords(gt, shape):
"""
Retrieve the georeferenced corner coordinates of a raster image.
The corner coordinates of the raster are calculated from
the rasters's geometric transformation specifications and
the dimensions of the raster.
Parameters
----------
gt : numeric tuple `(top_left_x, dx_x, dx_y, top_left_y, dy_x, dy_y)`
The affine geometric transformation ("geotransform" or "geo_trans")
describing the relationship between pixel coordinates and
georeferenced coordinates.
Pixel coordinates start at `(0, 0)` [row, col] for the top left pixel
in the raster image, increasing down rows and right across columns.
Georeferenced coordinates `(x_geo, y_geo)` are calculated for pixels
in the image by the pixel coordinates `(pix_row, pix_col)` as follows:
`x_geo = top_left_x + pix_row*dx_x + pix_col*dx_y`
`y_geo = top_left_y + pix_row*dy_x + pix_col*dy_y`
shape : tuple of positive int, 2 elements
Dimensions of the raster image in (num_rows, num_cols) format.
Returns
-------
corner_coords : ndarray (5, 2)
Georeferenced corner coordinates of the raster image,
in (x, y) coordinate pairs, starting and ending at the
top left corner, clockwise.
"""
top_left_x = np.full((5, 1), gt[0])
top_left_y = np.full((5, 1), gt[3])
top_left_mat = np.concatenate((top_left_x, top_left_y), axis=1)
ysize, xsize = shape
raster_XY_size_mat = np.array([
[0, 0],
[xsize, 0],
[xsize, ysize],
[0, ysize],
[0, 0]
])
gt_mat = np.array([
[gt[1], gt[4]],
[gt[2], gt[5]]
])
return top_left_mat + np.dot(raster_XY_size_mat, gt_mat)
def coordsToWkt(point_coords):
"""
Retrieve a WKT polygon representation of an ordered list of
point coordinates.
Parameters
----------
point_coords : 2D sequence of floats/ints like ndarray
of shape (npoints, ndim)
Ordered list of points, each represented by a list of
coordinates that define its position in space.
Returns
-------
wkt : str
WKT polygon representation of `point_coords`.
"""
return 'POLYGON (({}))'.format(
','.join([" ".join([str(c) for c in xy]) for xy in point_coords])
)
def wktToCoords(wkt):
"""
Create an array of point coordinates from a WKT polygon string.
Parameters
----------
wkt : str
WKT polygon representation of points with coordinate data
to be extracted.
Returns
-------
point_coords : ndarray of shape (npoints, ndim)
Ordered list of point coordinates extracted from `wkt`.
"""
coords_list = eval(
wkt.replace('POLYGON ','').replace('(','[').replace(')',']').replace(',','],[').replace(' ',',')
)
return np.array(coords_list)
def extractRasterData(rasterFile_or_ds, *params):
"""
Extract information from a single-band raster image file.
Parameters
----------
rasterFile_or_ds : str (file path) or osgeo.gdal.Dataset
File path of the raster image to open as a GDAL dataset object,
or the GDAL dataset itself.
params : str
Names of parameters to be extracted from the raster dataset.
'array'/'z' ------ matrix of image pixel values as ndarray (2D)
'shape'----------- pixel shape of image as tuple (nrows, ncols)
'x' -------------- georeferenced grid coordinates corresponding to
each column of pixels in image as ndarray (1D)
'y' -------------- georeferenced grid coordinates corresponding to
each row of pixels in image as ndarray (1D)
'dx' ------------- x length of each pixel in georeferenced pixel-grid coordinates,
corresponding to x[1] - x[0] from 'x' param (dx may be negative)
'dy' ------------- y length of each pixel in georeferenced pixel-grid coordinates,
corresponding to y[1] - y[0] from 'y' param (dy may be negative)
'res' ------------ (absolute) resolution of square pixels in image
(NaN if pixels are not square)
'geo_trans' ------ affine geometric transformation
(see documentation for `getCornerCoords`)
'corner_coords' -- georeferenced corner coordinates of image extent
(see documentation for `getCornerCoords`)
'proj_ref' ------- projection definition string in OpenGIS WKT format
(None if projection definition is not available)
'spat_ref' ------- spatial reference as osgeo.osr.SpatialReference object
(None if spatial reference is not available)
'geom' ----------- polygon geometry of image extent as osgeo.ogr.Geometry object
'geom_sr' -------- polygon geometry of image extent as osgeo.ogr.Geometry object
with spatial reference assigned (if available)
'nodata_val' ----- pixel value that should be interpreted as "No Data"
'dtype_val' ------ GDAL type code for numeric data type of pixel values (integer)
'dtype_str' ------ GDAL type name for numeric data type of pixel values (string)
Returns
-------
value_list : list
List of parameter data with length equal to the number
of parameter name arguments given in the function call.
The order of returned parameter data corresponds directly to
the order of the parameter name arguments.
If only one parameter name argument is provided, the single
datum is returned itself, not in a list.
Examples
--------
>>> f = 'my_raster.tif'
>>> image_data, resolution = extractRasterData(f, 'array', 'res')
>>> resolution
2
>>> extractRasterData(f, 'dy')
-2
"""
ds = openRaster(rasterFile_or_ds)
pset = set(params)
invalid_pnames = pset.difference({'ds', 'shape', 'z', 'array', 'x', 'y',
'dx', 'dy', 'res', 'geo_trans', 'corner_coords',
'proj_ref', 'spat_ref', 'geom', 'geom_sr',
'nodata_val', 'dtype_val', 'dtype_str'})
if invalid_pnames:
raise InvalidArgumentError("Invalid parameter(s) for extraction: {}".format(invalid_pnames))
if pset.intersection({'z', 'array', 'nodata_val', 'dtype_val', 'dtype_str'}):
band = ds.GetRasterBand(1)
if pset.intersection({'z', 'array'}):
try:
array_data = gdalReadAsArraySetsmSceneBand(band)
except RasterIOError as e:
traceback.print_exc()
print("Error reading raster: {}".format(rasterFile_or_ds))
raise
if pset.intersection({'shape', 'x', 'y', 'corner_coords', 'geom', 'geom_sr'}):
shape = (ds.RasterYSize, ds.RasterXSize) if 'array_data' not in vars() else array_data.shape
if pset.intersection({'x', 'y', 'dx', 'dy', 'res', 'geo_trans', 'corner_coords', 'geom', 'geom_sr'}):
geo_trans = ds.GetGeoTransform()
if pset.intersection({'proj_ref', 'spat_ref', 'geom_sr'}):
proj_ref = ds.GetProjectionRef()
if pset.intersection({'corner_coords', 'geom', 'geom_sr'}):
corner_coords = getCornerCoords(geo_trans, shape)
if pset.intersection({'spat_ref', 'geom_sr'}):
spat_ref = osr.SpatialReference(proj_ref) if proj_ref is not None else None
if pset.intersection({'geom', 'geom_sr'}):
geom = ogr.Geometry(wkt=coordsToWkt(corner_coords))
if pset.intersection({'nodata_val'}):
nodata_val = band.GetNoDataValue()
if pset.intersection({'dtype_val', 'dtype_str'}):
dtype_val = band.DataType
if pset.intersection({'dtype_str'}):
dtype_str = gdal.GetDataTypeName(dtype_val)
value_list = []
for pname in params:
pname = pname.lower()
value = None
if pname == 'ds':
value = ds
elif pname == 'shape':
value = shape
elif pname in ('z', 'array'):
value = array_data
elif pname == 'x':
value = geo_trans[0] + np.arange(shape[1]) * geo_trans[1]
elif pname == 'y':
value = geo_trans[3] + np.arange(shape[0]) * geo_trans[5]
elif pname == 'dx':
value = abs(geo_trans[1])
elif pname == 'dy':
value = abs(geo_trans[5])
elif pname == 'res':
value = abs(geo_trans[1]) if abs(geo_trans[1]) == abs(geo_trans[5]) else np.nan
elif pname == 'geo_trans':
value = geo_trans
elif pname == 'corner_coords':
value = corner_coords
elif pname == 'proj_ref':
value = proj_ref
elif pname == 'spat_ref':
value = spat_ref
elif pname == 'geom':
value = geom
elif pname == 'geom_sr':
value = geom.Clone() if 'geom' in params else geom
if spat_ref is not None:
value.AssignSpatialReference(spat_ref)
else:
warn("Spatial reference could not be extracted from raster dataset, "
"so extracted geometry has not been assigned a spatial reference.")
elif pname == 'nodata_val':
value = nodata_val
elif pname == 'dtype_val':
value = dtype_val
elif pname == 'dtype_str':
value = dtype_str
value_list.append(value)
if len(value_list) == 1:
value_list = value_list[0]
return value_list
# Legacy; Retained for a visual aid of equivalences between NumPy and GDAL data types.
# Use gdal_array.NumericTypeCodeToGDALTypeCode to convert from NumPy to GDAL data type.
def dtype_np2gdal_old(dtype_in, form_out='gdal', force_conversion=False):
"""
Converts between input NumPy data type (dtype_in may be either
NumPy 'dtype' object or already a string) and output GDAL data type.
If form_out='numpy', the corresponding NumPy 'dtype' object will be
returned instead, allowing for quick lookup by string name.
If the third element of a dtype_dict conversion tuple is zero,
that conversion of NumPy to GDAL data type is not recommended. However,
the conversion may be forced with the argument force_conversion=True.
"""
dtype_dict = { # ---GDAL LIMITATIONS---
'bool' : (np.bool, gdal.GDT_Byte, 0), # GDAL no bool/logical/1-bit
'int8' : (np.int8, gdal.GDT_Byte, 1), # GDAL byte is unsigned
'int16' : (np.int16, gdal.GDT_Int16, 1),
'int32' : (np.int32, gdal.GDT_Int32, 1),
'intc' : (np.intc, gdal.GDT_Int32, 1), # np.intc ~= np.int32
'int64' : (np.int64, gdal.GDT_Int32, 0), # GDAL no int64
'intp' : (np.intp, gdal.GDT_Int32, 0), # intp ~= np.int64
'uint8' : (np.uint8, gdal.GDT_Byte, 1),
'uint16' : (np.uint16, gdal.GDT_UInt16, 1),
'uint32' : (np.uint32, gdal.GDT_UInt32, 1),
'uint64' : (np.uint64, gdal.GDT_UInt32, 0), # GDAL no uint64
'float16' : (np.float16, gdal.GDT_Float32, 1), # GDAL no float16
'float32' : (np.float32, gdal.GDT_Float32, 1),
'float64' : (np.float64, gdal.GDT_Float64, 1),
'complex64' : (np.complex64, gdal.GDT_CFloat32, 1),
'complex128': (np.complex128, gdal.GDT_CFloat64, 1),
}
errmsg_unsupported_dtype = "Conversion of NumPy data type '{}' to GDAL is not supported".format(dtype_in)
try:
dtype_tup = dtype_dict[str(dtype_in).lower()]
except KeyError:
raise UnsupportedDataTypeError("No such NumPy data type in lookup table: '{}'".format(dtype_in))
if form_out.lower() == 'gdal':
if dtype_tup[2] == 0:
if force_conversion:
print(errmsg_unsupported_dtype)
else:
raise UnsupportedDataTypeError(errmsg_unsupported_dtype)
dtype_out = dtype_tup[1]
elif form_out.lower() == 'numpy':
dtype_out = dtype_tup[0]
else:
raise UnsupportedDataTypeError("The following output data type format is not supported: '{}'".format(form_out))
return dtype_out
def dtype_np2gdal(dtype_np):
# TODO: Write docstring.
if dtype_np == np.bool:
promote_dtype = np.uint8
elif dtype_np == np.int8:
promote_dtype = np.int16
elif dtype_np == np.float16:
promote_dtype = np.float32
else:
promote_dtype = None
if promote_dtype is not None:
warn("NumPy array data type ({}) does not have equivalent GDAL data type and is not "
"supported, but can be safely promoted to {}".format(dtype_np, promote_dtype(1).dtype))
dtype_np = promote_dtype
dtype_gdal = gdal_array.NumericTypeCodeToGDALTypeCode(dtype_np)
if dtype_gdal is None:
raise InvalidArgumentError("NumPy array data type ({}) does not have equivalent "
"GDAL data type and is not supported".format(dtype_np))
return dtype_gdal, promote_dtype
def interp_str2gdal(interp_str):
# TODO: Write docstring.
interp_choices = ('nearest', 'linear', 'cubic', 'spline', 'lanczos', 'average', 'mode')
interp_dict = {
'nearest' : gdal.GRA_NearestNeighbour,
'linear' : gdal.GRA_Bilinear,
'bilinear' : gdal.GRA_Bilinear,
'cubic' : gdal.GRA_Cubic,
'bicubic' : gdal.GRA_Cubic,
'spline' : gdal.GRA_CubicSpline,
'lanczos' : gdal.GRA_Lanczos,
'average' : gdal.GRA_Average,
'mode' : gdal.GRA_Mode,
}
if interp_str not in interp_dict:
raise UnsupportedMethodError("`interp` must be one of {}, but was '{}'".format(interp_choices, interp_str))
return interp_dict[interp_str]
def saveArrayAsTiff(array, dest,
X=None, Y=None, proj_ref=None, geotrans_rot_tup=(0, 0),
nodata_val='like_raster', dtype_out=None, nbits=None, co_args='compress',
like_raster=None):
"""
Save a NumPy 2D array as a single-band raster image in GeoTiff format.
Parameters
----------
array : ndarray, 2D
Array containing the values of pixels to be saved in the image,
one value per pixel.
dest : str (file path)
File path where the raster image will be saved.
If a file already exists at this path, it will be overwritten.
X : None or (ndarray, 1D)
Grid coordinates corresponding to all columns in the raster image,
from left to right, such that `X[j]` specifies the x-coordinate for
all pixels in `array[:, j]`.
If None, `like_raster` must be provided.
Y : None or (ndarray, 1D)
Grid coordinates corresponding to all rows in the raster image,
from top to bottom, such that `Y[i]` specifies the y-coordinate for
all pixels in `array[i, :]`
If None, `like_raster` must be provided.
proj_ref : None, str (WKT or Proj4), or osr.SpatialReference
Projection reference of the raster image to be saved, specified as
either a WKT/Proj4 string or an osr.SpatialReference object.
If None, `like_raster` must be provided.
geotrans_rot_tup : None or tuple (2 floats)
The third and fifth elements of the geometric transformation tuple
that specify rotation from north-up of the raster image to be saved.
If a north-up output is desired, let both elements be zero.
See documentation for `getCornerCoords` for more information on the
geometric transformation tuple.
If None, `like_raster` must be provided.
nodata_val : 'like_raster', None, or int/float
Non-NaN value in `array` that will be classified as "no data" in the
output raster image.
If 'like_raster', allow this value to be set equal to the nodata value
of `like_raster`.
dtype_out : data type as str (e.g. 'uint16'), NumPy data type
(e.g. np.uint16), or numpy.dtype object (e.g. from arr.dtype)
Numeric type of values in the output raster image.
If 'n-bit', write output raster image in an unsigned integer GDAL
data type with ['NBITS=n'] option in driver, where n is set to `nbits`
if `nbits` is not None. If `nbits` is None, n is calculated to be only
as large as necessary to capture the maximum value of `array`, and the
output array data type is unsigned integer of minimal bitdepth.
nbits : None or 1 <= int <= 32
Only applies when `dtype_out='nbits'`.
co_args : None, 'compress', or list of '[ARG_NAME]=[ARG_VALUE]' strings
Creation Option arguments to pass to the `Create` method of the GDAL
Geotiff driver that instantiates the output raster dataset.
If 'compress', the following default arguments are used:
'BIGTIFF=IF_SAFER'
'COMPRESS=LZW'
'TILED=YES'
The 'NBITS=X' argument may not be used -- that is set by the `nbits`
argument for this function.
A list of Creation Option arguments may be found here: [1].
like_raster : None, str (file path), or osgeo.gdal.Dataset
File path or GDAL dataset for a raster image of identical dimensions,
geographic location/extent, spatial reference, and nodata value as
the raster image that will be saved.
If provided, `X`, `Y`, `proj_ref`, and `geotrans_rot_tup` should not
be provided, as these metrics will be taken from the like raster.
Returns
-------
None
Notes
-----
The OSGeo `gdal_translate` program [1] must be callable by name
from the current working directory at the time this function is called.
References
----------
.. [1] https://www.gdal.org/frmt_gtiff.html
"""
spat_ref = None
projstr_wkt = None
projstr_proj4 = None
if proj_ref is None:
pass
elif type(proj_ref) == osr.SpatialReference:
spat_ref = proj_ref
elif isinstance(proj_ref, str):
spat_ref = osr.SpatialReference()
if proj_ref.lstrip().startswith('PROJCS'):
projstr_wkt = proj_ref
spat_ref.ImportFromWkt(projstr_wkt)
elif proj_ref.lstrip().startswith('+proj='):
projstr_proj4 = proj_ref
spat_ref.ImportFromProj4(projstr_proj4)
else:
raise InvalidArgumentError("`proj_ref` of string type has unknown format: '{}'".format(proj_ref))
else:
raise InvalidArgumentError("`proj_ref` must be a string or osr.SpatialReference object, "
"but was of type {}".format(type(proj_ref)))
dtype_is_nbits = (dtype_out is not None and type(dtype_out) is str and dtype_out == 'nbits')
if co_args is not None and co_args != 'compress':
if type(co_args) != list:
raise InvalidArgumentError("`co_args` must be a list of strings, but was {}".format(co_args))
if dtype_is_nbits:
for arg in co_args:
if arg.startswith('NBITS='):
raise InvalidArgumentError("`co_args` cannot include 'NBITS=X' argument. "
"Please use this function's `nbits` argument.")
shape = array.shape
dtype_gdal = None
if like_raster is not None:
ds_like = openRaster(like_raster)
if shape[0] != ds_like.RasterYSize or shape[1] != ds_like.RasterXSize:
raise InvalidArgumentError("Shape of `like_rasterFile` '{}' ({}, {}) does not match "
"the shape of `array` {}".format(
like_raster, ds_like.RasterYSize, ds_like.RasterXSize, shape)
)
geo_trans = extractRasterData(ds_like, 'geo_trans')
if proj_ref is None:
spat_ref = extractRasterData(ds_like, 'spat_ref')
if nodata_val == 'like_raster':
nodata_val = extractRasterData(ds_like, 'nodata_val')
if dtype_out is None:
dtype_gdal = extractRasterData(ds_like, 'dtype_val')
else:
if shape[0] != Y.size or shape[1] != X.size:
raise InvalidArgumentError("Lengths of [`Y`, `X`] grid coordinates ({}, {}) do not match "
"the shape of `array` ({})".format(Y.size, X.size, shape))
geo_trans = (X[0], X[1]-X[0], geotrans_rot_tup[0],
Y[0], geotrans_rot_tup[1], Y[1]-Y[0])
if nodata_val == 'like_raster':
nodata_val = None
if dtype_out is not None:
if dtype_is_nbits:
if nbits is None:
nbits = int(math.floor(math.log(float(max(1, np.max(array))), 2)) + 1)
elif type(nbits) != int or nbits < 1:
raise InvalidArgumentError("`nbits` must be an integer in the range [1,32]")
if nbits <= 8:
dtype_gdal = gdal.GDT_Byte
elif nbits <= 16:
dtype_gdal = gdal.GDT_UInt16
elif nbits <= 32:
dtype_gdal = gdal.GDT_UInt32
else:
raise InvalidArgumentError("Output array requires {} bits of precision, "
"but GDAL supports a maximum of 32 bits")
else:
if type(dtype_out) is str:
dtype_out = eval('np.{}'.format(dtype_out.lower()))
dtype_gdal = gdal_array.NumericTypeCodeToGDALTypeCode(dtype_out)
if dtype_gdal is None:
raise InvalidArgumentError("Output array data type ({}) does not have equivalent "
"GDAL data type and is not supported".format(dtype_out))
dtype_in = array.dtype
dtype_in_gdal, promote_dtype = dtype_np2gdal(dtype_in)
if promote_dtype is not None:
array = array.astype(promote_dtype)
dtype_in = promote_dtype(1).dtype
if dtype_out is not None:
if dtype_is_nbits:
if not np.issubdtype(dtype_in, np.unsignedinteger):
warn("Input array data type ({}) is not unsigned and may be incorrectly saved "
"with n-bit precision".format(dtype_in))
elif dtype_in != dtype_out:
warn("Input array NumPy data type ({}) differs from output "
"NumPy data type ({})".format(dtype_in, dtype_out(1).dtype))
elif dtype_gdal is not None and dtype_gdal != dtype_in_gdal:
warn("Input array GDAL data type ({}) differs from output "
"GDAL data type ({})".format(gdal.GetDataTypeName(dtype_in_gdal),
gdal.GetDataTypeName(dtype_gdal)))
if dtype_gdal is None:
dtype_gdal = dtype_in_gdal
sys.stdout.write("Saving Geotiff {} ...".format(dest))
sys.stdout.flush()
# Create the output raster dataset in memory.
if co_args is None:
co_args = []
if co_args == 'compress':
co_args = []
co_args.extend(['BIGTIFF=IF_SAFER']) # Will create BigTIFF
# if the resulting file *might* exceed 4GB.
co_args.extend(['COMPRESS=LZW']) # Do LZW compression on output image.
co_args.extend(['TILED=YES']) # Force creation of tiled TIFF files.
if dtype_is_nbits:
co_args.extend(['NBITS={}'.format(nbits)])
if spat_ref is not None:
if projstr_wkt is None:
projstr_wkt = spat_ref.ExportToWkt()
if projstr_proj4 is None:
projstr_proj4 = spat_ref.ExportToProj4()
sys.stdout.write(" GDAL data type: {}, NoData value: {}, Creation Options: {}, Projection (Proj4): {} ...".format(
gdal.GetDataTypeName(dtype_gdal), nodata_val, ' '.join(co_args) if co_args else None, projstr_proj4.strip())
)
sys.stdout.flush()
sys.stdout.write(" creating file ...")
sys.stdout.flush()
driver = gdal.GetDriverByName('GTiff')
ds_out = driver.Create(dest, shape[1], shape[0], 1, dtype_gdal, co_args)
ds_out.SetGeoTransform(geo_trans)
if projstr_wkt is not None:
ds_out.SetProjection(projstr_wkt)
band = ds_out.GetRasterBand(1)
if nodata_val is not None:
band.SetNoDataValue(nodata_val)
sys.stdout.write(" writing array values ...")
sys.stdout.flush()
band.WriteArray(array)
# Write the output raster dataset to disk.
sys.stdout.write(" finishing file ...")
sys.stdout.flush()
ds_out = None # Dereference dataset to initiate write to disk of intermediate image.
sys.stdout.write(" done!\n")
sys.stdout.flush()
#######################
# Array Manipulations #
#######################
def getWindow(array, i, j, window_shape=(3, 3), output='array', bounds_check=True):
# TODO: Write docstring.
output_choices = ('array', 'indices')
if output not in output_choices:
raise InvalidArgumentError("`output` must be one of {}, "
"but was {}".format(output_choices, output))
win_nrows, win_ncols = window_shape
if bounds_check:
if win_nrows < 1 or win_ncols < 1:
raise InvalidArgumentError("`window_shape` must be a tuple of two positive ints")
arr_nrows, arr_ncols = array.shape
i_backup = i
j_backup = j
if i < 0:
i = arr_nrows + i
if j < 0:
j = arr_ncols + j
if i >= arr_nrows:
raise InvalidArgumentError("Index `i`={} is outside `array` bounds".format(i_backup))
if j >= arr_ncols:
raise InvalidArgumentError("Index `j`={} is outside `array` bounds".format(j_backup))
win_halfrowsz = (win_nrows-1) / 2
win_halfcolsz = (win_ncols-1) / 2
win_r0 = int(i - np.ceil(win_halfrowsz))
win_r1 = int(i + np.floor(win_halfrowsz) + 1)
win_c0 = int(j - np.ceil(win_halfcolsz))
win_c1 = int(j + np.floor(win_halfcolsz) + 1)
if not bounds_check:
if win_r1 == 0:
win_r1 = None
if win_c1 == 0:
win_c1 = None
return ( array[win_r0:win_r1, win_c0:win_c1] if output == 'array'
else (win_r0, win_r1, win_c0, win_c1))
if win_r0 < 0 or win_r1 > arr_nrows or win_c0 < 0 or win_c1 > arr_ncols:
raise InvalidArgumentError("Window falls outside `array` bounds")
return ( array[win_r0:win_r1, win_c0:win_c1] if output == 'array'
else (win_r0, win_r1, win_c0, win_c1))
def rotate_arrays_if_kernel_has_even_sidelength(array, kernel):
"""
Return 180-degree rotated views into the provided arrays
if `kernel` has an even side length.
Parameters
----------
array : ndarray, 2D
Primary array associated with `kernel`.
kernel : ndarray, 2D
Kernel array.
Returns
-------
array_out, kernel_out, rotation_flag : tuple
Tuple containing views into `array` and `kernel`,
and a flag that is True if the views of these two
arrays have been rotated by 180 degrees.
See Also
--------
fix_array_if_rotation_was_applied
Notes
-----
The sole purpose of this function is to assist other
functions in this array utility suite in their attempts
to mimic the behavior of corresponding MATLAB functions
at the pixel level when dealing with a kernel/structure
that has an even side length.
"""
for s in kernel.shape:
if s % 2 == 0:
return np.rot90(array, 2), np.rot90(kernel, 2), True
return array, kernel, False
def fix_array_if_rotation_was_applied(array, rotation_flag):
"""
Return 180-degree rotated view into the provided array
if `rotation_flag` is True.
Parameters
----------
array : ndarray, 2D
Array that may or may not need undoing of rotation.
rotation_flag : bool
True if `array` rotation should be undone.
False if `array` does not need undoing of rotation.
Returns
-------
array_out : ndarray, 2D
View into `array` that may or may not have had
rotation undone.
See Also
--------
rotate_arrays_if_kernel_has_even_sidelength
Notes
-----
The sole purpose of this function is to assist other
functions in this array utility suite in their attempts
to mimic the behavior of corresponding MATLAB functions
at the pixel level when dealing with a kernel/structure
that has an even side length.
"""
return np.rot90(array, 2) if rotation_flag else array
def rot90_pixcoords(coords, shape_in, k=1):
"""
Rotate 2D (row, col) pixel coordinates taken from an
array of a defined nrows x ncols shape by 90 degrees.
Rotation direction is counterclockwise.
Parameters
----------
coords : 2D ndarray or list/tuple of two 1D ndarrays
2D (row, col) pixel coordinates.
May be in the format of the output of np.where
(2D ndarray, shape like (npoints, 2)) [1] or
np.argwhere (tuple of two 1D ndarrays, each of
size npoints) [2].
shape_in : tuple of positive int
Shape of array that pixel coordinates came from
before the desired rotation has been applied,
like (nrows, ncols) output of `array.shape`.
k : int
Number of times the coordinates are rotated by
90 degrees.
Returns
-------
coords_out : same format, type, shape as `coords`
2D (row, col) pixel coordinates rotated from
the corresponding coordinates in `coords`.
See Also
--------
numpy.rot90 [3]
flip_pixcoords
Notes
-----
Say `coords` index into array 'a' to return values
of a set of pixels 'a_vals' as follows:
`a_vals = a[coords]`
Rotate both `a` and `coords` 90 degrees the same
number of times `k` to get array 'b' and pixel
coords 'coords_b' that index into 'b' to return
'b_vals'.
`b = numpy.rot90(a, k)`
`coords_b = rot90_pixcoords(coords, a.shape, k)`
`b_vals = b[coords_b]`
The values in 'a_vals' and 'b_vals' are identical.
References
----------
.. [1] https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html
.. [2] https://docs.scipy.org/doc/numpy/reference/generated/numpy.argwhere.html
.. [3] https://docs.scipy.org/doc/numpy/reference/generated/numpy.rot90.html
"""
if type(coords) == np.ndarray:
row_in, col_in = coords.T
else:
row_in, col_in = coords
k = k % 4
if k == 0:
row_out = row_in
col_out = col_in
elif k == 1:
row_out = (shape_in[1]-1) - col_in
col_out = row_in
elif k == 2:
row_out = (shape_in[0]-1) - row_in
col_out = (shape_in[1]-1) - col_in
elif k == 3:
row_out = col_in
col_out = (shape_in[0]-1) - row_in
if type(coords) == np.ndarray:
result = np.array([row_out, col_out]).T
else:
result = (row_out, col_out)
return result
def flip_pixcoords(coords, shape_in, axis=0):
"""
Flip 2D (row, col) pixel coordinates taken from an
array of a defined nrows x ncols shape across an axis.
Parameters
----------
coords : 2D ndarray or list/tuple of two 1D ndarrays
2D (row, col) pixel coordinates.
May be in the format of the output of np.where
(2D ndarray, shape like (npoints, 2)) [1] or
np.argwhere (tuple of two 1D ndarrays, each of
size npoints) [2].
shape_in : tuple of positive int
Shape of array that pixel coordinates came from,
like (nrows, ncols) output of `array.shape`.
axis : 0 or 1
If 0, flip coordinates vertically.
If 1, flip coordinates horizontally.
See Also
--------
numpy.rot90 [3]
rot90_pixcoords
Returns
-------
coords_out : same format, type, shape as `coords`
2D (row, col) pixel coordinates flipped from
the corresponding coordinates in `coords`.
Notes
-----
Say `coords` index into array 'a' to return values
of a set of pixels 'a_vals' as follows:
`a_vals = a[coords]`
Flip both `a` and `coords` over the same axis with
number `axis` to get array 'b' and pixel coords
'coords_b' that index into 'b' to return 'b_vals'.
`b = numpy.flip(a, axis)`
`coords_b = flip_pixcoords(coords, a.shape, axis)`
`b_vals = b[coords_b]`
The values in 'a_vals' and 'b_vals' are identical.
References
----------
.. [1] https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html
.. [2] https://docs.scipy.org/doc/numpy/reference/generated/numpy.argwhere.html
.. [3] https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html
"""
if type(coords) == np.ndarray:
row_in, col_in = coords.T
else:
row_in, col_in = coords
if axis == 0:
row_out = (shape_in[0]-1) - row_in
col_out = col_in
elif axis == 1:
row_out = row_in
col_out = (shape_in[1]-1) - col_in
else:
raise InvalidArgumentError("`axis` must be 0 or 1")
if type(coords) == np.ndarray:
result = np.array([row_out, col_out]).T
else:
result = (row_out, col_out)
return result
def array_round_proper(array, in_place=False):
"""
Round data in a floating point array to the nearest integer,
rounding up for positive X.5 and down for negative X.5.
Parameters
----------
array : ndarray of floating dtype
Floating point array to round.
in_place : bool
If True, round array in place.
If False, copy array before rounding.
Returns
-------
array_round : ndarray of floating dtype
The rounded array.
"""
if not in_place:
array = np.copy(array)
array_gt_zero = array > 0
np.add(array, 0.5, out=array, where=array_gt_zero)
np.floor(array, out=array, where=array_gt_zero)
del array_gt_zero
array_lt_zero = array < 0
np.subtract(array, 0.5, out=array, where=array_lt_zero)
np.ceil(array, out=array, where=array_lt_zero)
del array_lt_zero
return array
def astype_round_and_crop(array, dtype_out, allow_modify_array=False):
"""
Cast a floating point array to an integer data type,
first rounding data values and cropping all values to
the minimum and maximum representable values for the
output data type.
Parameters
----------
array : ndarray
Array containing data to be cast.
dtype_out : numpy data type (e.g. numpy.int32) or numpy.dtype
The data type `array` is to be cast to.
allow_modify_array : bool
If True, values in input `array` may be modified.
Returns
-------
array_out : ndarray of type `dtype_out`
The new array that has been cast from `array`.
Notes
-----
This function is meant to replicate MATLAB array type casting.
"""
# The trivial case
if dtype_out == np.bool:
return array.astype(dtype_out)
array_dtype_np = array.dtype.type
dtype_out_np = dtype_out if type(dtype_out) != np.dtype else dtype_out.type
if np.issubdtype(array_dtype_np, np.floating) and np.issubdtype(dtype_out_np, np.integer):
# TODO: Consider replacing the following potentially costly call with
# -t np.around(array) if round-half-to-nearest-whole-even is acceptable.
array = array_round_proper(array, allow_modify_array)
return astype_cropped(array, dtype_out_np, allow_modify_array)
def astype_cropped(array, dtype_out, allow_modify_array=False):
"""
Cast an array to a new data type, first cropping all values
to the minimum and maximum representable values for the
output data type.
Parameters
----------
array : ndarray
Array containing data to be cast.
dtype_out : numpy data type (e.g. numpy.int32) or numpy.dtype
The data type `array` is to be cast to.
allow_modify_array : bool
If True, values in input `array` may be modified.
Returns
-------
array_cropped : ndarray of type `dtype_out`
The new array that has been cast from `array`.
Notes
-----
The purpose of this function is to prevent underflow and
underflow during casting, something numpy.ndarray.astype
does not do. [1]
References
----------
.. [1] https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.astype.html
"""
# The trivial case
if dtype_out == np.bool:
return array.astype(dtype_out)
dtype_out_np = dtype_out if type(dtype_out) != np.dtype else dtype_out.type
dtype_info_fn = np.finfo if np.issubdtype(dtype_out_np, np.floating) else np.iinfo
dtype_out_min = dtype_info_fn(dtype_out_np).min
dtype_out_max = dtype_info_fn(dtype_out_np).max
array_cropped = array if allow_modify_array else None
try:
array_cropped = np.clip(array, dtype_out_min, dtype_out_max, out=array_cropped)
except OverflowError:
dtype_out_min_float = float(dtype_out_min)
dtype_out_max_float = float(dtype_out_max)
warn("Integers for {} clip range [{}, {}] are too large for underlying C code of numpy.clip(). "
"Casting clip range to float: [{}, {}]".format(dtype_out_np(1).dtype,
dtype_out_min, dtype_out_max,
dtype_out_min_float, dtype_out_max_float))
array_cropped = np.clip(array, dtype_out_min_float, dtype_out_max_float, out=array_cropped)
return array_cropped.astype(dtype_out)
def getDataArray(array, label=0, label_type='nodata'):
"""
Classify values in an array as "data" or non-"data".
Parameters
----------
array : ndarray
Array to be classified.
label : bool/int/float
Value of nodes in `array` that are classified as
"data" (if label_type='data')
or non-"data" (if label_type='nodata').
label_type : str; 'data' or 'nodata'
Whether `label` is a classification for "data"
or non-"data" nodes.
Returns
-------
data_array : ndarray of bool, same shape as `array`
Binary mask of `array` where "data" nodes are one
and non-"data" nodes are zero.
"""
label_type_choices = ('data', 'nodata')
if label_type not in label_type_choices:
raise InvalidArgumentError("`label_type` must be one of {}, "
"but was {}".format(label_type_choices, label_type))
if (array.dtype == np.bool
and ((label_type == 'nodata' and label == 0)
or (label_type == 'data' and label == 1))):
data_array = array
elif np.isnan(label):
data_array = np.isnan(array) if label_type == 'data' else ~np.isnan(array)
else:
data_array = (array == label) if label_type == 'data' else (array != label)
return data_array
######################
# Array Calculations #
######################
def interp2_fill_oob(X, Y, Zi, Xi, Yi, fillval=np.nan, coord_grace=True):
# Rows and columns of Zi outside the domain of Z are made NaN.
# Assume X and Y coordinates are monotonically increasing/decreasing
# so hopefully we only need to work a short way inwards from the edges.
Xi_size = Xi.size
Yi_size = Yi.size
Xmin = min(X[0], X[-1])
Ymax = max(Y[0], Y[-1])
x_lfttest_val = X[0]
x_rgttest_val = X[-1]
y_toptest_val = Y[0]
y_bottest_val = Y[-1]
if x_lfttest_val == Xmin:
# X-coords increase from left to right.
x_lfttest_op = operator.lt
x_rgttest_op = operator.gt
else:
# X-coords decrease from left to right.
x_lfttest_op = operator.gt
x_rgttest_op = operator.lt
if y_toptest_val == Ymax:
# Y-coords decrease from top to bottom.
y_toptest_op = operator.gt
y_bottest_op = operator.lt
else:
# Y-coords increase from top to bottom.
y_toptest_op = operator.lt
y_bottest_op = operator.gt
if coord_grace:
x_grace = (X[1] - X[0]) / 64
y_grace = (Y[1] - Y[0]) / 16
x_lfttest_val -= x_grace
x_rgttest_val += x_grace
y_toptest_val -= y_grace
y_bottest_val += y_grace
x_lfttest_op = operator.le if x_lfttest_op(0, 1) else operator.ge
x_rgttest_op = operator.le if x_rgttest_op(0, 1) else operator.ge
y_toptest_op = operator.le if y_toptest_op(0, 1) else operator.ge
y_bottest_op = operator.le if y_bottest_op(0, 1) else operator.ge
i = 0
while x_lfttest_op(Xi[i], x_lfttest_val) and i < Xi_size:
Zi[:, i] = fillval
i += 1
i = -1
while x_rgttest_op(Xi[i], x_rgttest_val) and i >= -Xi_size:
Zi[:, i] = fillval
i -= 1
j = 0
while y_toptest_op(Yi[j], y_toptest_val) and j < Yi_size:
Zi[j, :] = fillval
j += 1
j = -1
while y_bottest_op(Yi[j], y_bottest_val) and j >= -Yi_size:
Zi[j, :] = fillval
j -= 1
return Zi
# def interp2_cv2(X, Y, Z, Xi, Yi, interp_str, extrapolate=False, oob_val=np.nan):
# xx = np.repeat(np.reshape((Xi-X[0]/2, (1, X.size)), Y.size, axis=0)
# yy = np.repeat(np.reshape((Yi-Y[0])/-2, (Y.size, 1)), X.size, axis=1)
# cv2.remap(Z, xx.astype(np.float32), yy.astype(np.float32), cv2.INTER_LINEAR)
# pass
def interp2_gdal(X, Y, Z, Xi, Yi, interp_str, extrapolate=False, oob_val=np.nan):
"""
Resample array data from one set of x-y grid coordinates to another.
Parameters
----------
X : ndarray, 1D
Grid coordinates corresponding to all columns in the raster image,
from left to right, such that `X[j]` specifies the x-coordinate for
all pixels in `Z[:, j]`.
Y : ndarray, 1D
Grid coordinates corresponding to all rows in the raster image,
from top to bottom, such that `Y[i]` specifies the y-coordinate for
all pixels in `Z[i, :]`.
Z : ndarray, 2D
Array containing values to be resampled.
Xi : ndarray, 1D
New grid x-coordinates, like `X` array.
Yi : ndarray, 1D
New grid y-coordinates, like `Y` array.
interp_str : str
Interpolation/resampling method, must be one of the following:
'nearest', 'linear', 'cubic', 'spline', 'lanczos', 'average', 'mode'
extrapolate : bool
Whether or not to interpolate values for pixels with new grid coords
`Xi` and `Yi` that fall outside the range of old grid coords `X` and `Y`.
If True, allow the interpolation method to set the values of these pixels.
If False, set the values of these pixels to `oob_val`.
oob_val : int/float
(Option only applies when `extrapolate=True`.)
Value to fill any regions of the output array where new grid coords
`Xi` and `Yi` fall outside the range of old grid coords `X` and `Y`.
Returns
-------
Zi : ndarray, 2D, same shape and type as `Z`
The resampled array.
"""
dtype_gdal, promote_dtype = dtype_np2gdal(Z.dtype)
if promote_dtype is not None:
Z = Z.astype(promote_dtype)
interp_gdal = interp_str2gdal(interp_str)
mem_drv = gdal.GetDriverByName('MEM')
ds_in = mem_drv.Create('', X.size, Y.size, 1, dtype_gdal)
ds_in.SetGeoTransform((X[0], X[1]-X[0], 0,
Y[0], 0, Y[1]-Y[0]))
ds_in.GetRasterBand(1).WriteArray(Z)
ds_out = mem_drv.Create('', Xi.size, Yi.size, 1, dtype_gdal)
ds_out.SetGeoTransform((Xi[0], Xi[1]-Xi[0], 0,
Yi[0], 0, Yi[1]-Yi[0]))
gdal.ReprojectImage(ds_in, ds_out, '', '', interp_gdal)
Zi = ds_out.GetRasterBand(1).ReadAsArray()
if not extrapolate:
interp2_fill_oob(X, Y, Zi, Xi, Yi, oob_val)
return Zi
def interp2_scipy(X, Y, Z, Xi, Yi, interp, extrapolate=False, oob_val=np.nan,
griddata=False,
SBS=False,
RGI=False, RGI_extrap=True, RGI_fillVal=None,
CLT=False, CLT_fillVal=np.nan,
RBS=False):
# TODO: Rewrite docstring in new standard.
"""
Aims to provide similar functionality to interp2_gdal using SciPy's
interpolation library. However, initial tests show that interp2_gdal
both runs more quickly and produces output more similar to MATLAB's
interp2 function for every method required by Ian's mosaicking script.
griddata, SBS, and CLT interpolation methods are not meant to be used
for the resampling of a large grid as is done here.
"""
order_dict = {
'nearest' : 0,
'linear' : 1,
'bilinear' : 1,
'quadratic': 2,
'cubic' : 3,
'bicubic' : 3,
'quartic' : 4,
'quintic' : 5,
}
order = order_dict[interp]
method_set = True in (griddata, SBS, RGI, CLT, RBS)
if griddata:
# Supports nearest, linear, and cubic interpolation methods.
# Has errored out with "QH7074 qhull warning: more than 16777215 ridges.
# ID field overflows and two ridges may have the same identifier."
# when used on large arrays. Fails to draw a convex hull of input points.
# Needs more testing, but seems to handle NaN input. Output for linear and
# cubic methods shows NaN borders when interpolating out of input domain.
xx, yy = np.meshgrid(X, Y)
xxi, yyi = np.meshgrid(Xi, Yi)
Zi = scipy.interpolate.griddata((xx.flatten(), yy.flatten()), Z.flatten(),
(xxi.flatten(), yyi.flatten()), interp)
Zi.resize((Yi.size, Xi.size))
elif SBS:
# Supports all 5 orders of spline interpolation.
# Can't handle NaN input; results in all NaN output.
xx, yy = np.meshgrid(X, Y)
xxi, yyi = np.meshgrid(Xi, Yi)
fn = scipy.interpolate.SmoothBivariateSpline(xx.flatten(), yy.flatten(), Z.flatten(),
kx=order, ky=order)
Zi = fn.ev(xxi, yyi)
Zi.resize((Yi.size, Xi.size))
elif RGI or (not method_set and (order == 0 or (order == 1 and np.any(np.isnan(Z))))):
# Supports nearest and linear interpolation methods.
xxi, yyi = np.meshgrid(Xi, Yi[::-1])
pi = np.column_stack((yyi.flatten(), xxi.flatten()))
fn = scipy.interpolate.RegularGridInterpolator((Y[::-1], X), Z, method=interp,
bounds_error=(not RGI_extrap), fill_value=RGI_fillVal)
Zi = fn(pi, method=interp)
Zi.resize((Yi.size, Xi.size))
elif CLT or (not method_set and (order == 3 and np.any(np.isnan(Z)))):
# Performs cubic interpolation of data,
# but includes logic to first perform a nearest resampling of input NaNs.
# Produces the same error as scipy.interpolate.griddata when used on large arrays.
if np.any(np.isnan(Z)):
Zi = interp2_scipy(X, Y, Z, Xi, Yi, 'nearest')
Zi_data = np.where(~np.isnan(Zi))
Z_data = np.where(~np.isnan(Z))
p = np.column_stack((Z_data[0], Z_data[1]))
pi = np.column_stack((Zi_data[0], Zi_data[1]))
fn = scipy.interpolate.CloughTocher2DInterpolator(p, Z[Z_data], fill_value=CLT_fillVal)
Zi[Zi_data] = fn(pi)
else:
xx, yy = np.meshgrid(X, Y)
xxi, yyi = np.meshgrid(Xi, Yi)
p = np.column_stack((xx.flatten(), yy.flatten()))
pi = np.column_stack((xxi.flatten(), yyi.flatten()))
fn = scipy.interpolate.CloughTocher2DInterpolator(p, Z.flatten(), fill_value=CLT_fillVal)
Zi = fn(pi)
Zi.resize((Yi.size, Xi.size))
elif RBS or (not method_set and (order in (2, 4))):
# Supports all 5 orders of spline interpolation.
# Can't handle NaN input; results in all NaN output.
fn = scipy.interpolate.RectBivariateSpline(Y[::-1], X, Z,
kx=order, ky=order)
Zi = fn(Yi[::-1], Xi, grid=True)
else:
# Supports linear, cubic, and quintic interpolation methods.
# Can't handle NaN input; results in all NaN output.
# Default interpolator for its presumed efficiency.
fn = scipy.interpolate.interp2d(X, Y[::-1], Z, kind=interp)
Zi = fn(Xi, Yi)
if not extrapolate:
interp2_fill_oob(X, Y, Zi, Xi, Yi, oob_val)
return Zi
def imresize(array, size, interp='bicubic', dtype_out='input',
method='cv2', float_resize=True, round_proper=True,
one_dim_axis=1):
"""
Resize an array.
Parameters
----------
array : ndarray, 2D
The array to resize.
size : shape tuple (2D) or scalar value
If shape tuple, returns an array of this size.
If scalar value, returns an array of shape
that is `size` times the shape of `array`.
interp : str; 'nearest', 'area', 'bilinear', 'bicubic', or 'lanczos'
Interpolation method to use during resizing.
dtype_out : str; 'input' or 'float'
If 'input', data type of the returned array is
the same as `array`.
If 'float' and `array` data type is of floating type,
data type of the returned array is the same.
If 'float' and `array` data type is of integer type,
data type of the returned array is float32.
method : str; 'cv2', 'pil', 'gdal', or 'scipy'
Specifies which method used to perform resizing.
'cv2' ------ cv2.resize [1]
'pil' ------ PIL.Image.resize [2]
'scipy' ---- scipy.misc.imresize (WILL BE RETIRED SOON) [3]
'gdal' ----- interp2_gdal (local, utilizes gdal.ReprojectImage [4])
float_resize : bool
If True, convert integer arrays to float32 before resizing.
round_proper : bool
If the resized array is converted from floating
to an integer data type (such as when `float_resize=True`
and `dtype_out='input'`)...
- If True, round X.5 values up to (X + 1).
- If False, round X.5 values to nearest even integer to X.
one_dim_axis : int, 0 or 1
Which directional layout to give to a one-dimensional
`array` before resizing.
If 0, array runs vertically downwards across rows.
If 1, array runs horizontally rightwards across columns.
Returns
-------
array_r : ndarray, 2D, same type as `array`
The resized array.
See Also
--------
imresize_pil
imresize_old
Notes
-----
This function is meant to replicate MATLAB's `imresize` function [5].
References
----------
.. [1] https://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#void resize(InputArray src, OutputArray dst, Size dsize, double fx, double fy, int interpolation)
.. [2] http://pillow.readthedocs.io/en/3.1.x/reference/Image.html
.. [3] https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.imresize.html
.. [4] http://gdal.org/java/org/gdal/gdal/gdal.html#ReprojectImage-org.gdal.gdal.Dataset-org.gdal.gdal.Dataset-java.lang.String-java.lang.String-int-double-double-org.gdal.gdal.ProgressCallback-java.util.Vector-
https://svn.osgeo.org/gdal/trunk/autotest/alg/reproject.py
.. [5] https://www.mathworks.com/help/images/ref/imresize.html
"""
array_backup = array
dtype_in = array.dtype
method_choices = ('cv2', 'pil', 'scipy', 'gdal')
if method not in method_choices:
raise InvalidArgumentError("`method` must be one of {}, but was '{}'".format(method_choices, method))
dtype_out_choices = ('input', 'float')
if dtype_out not in dtype_out_choices:
raise InvalidArgumentError("`dtype_out` must be one of {}, but was '{}'".format(dtype_out_choices, dtype_out))
# Handle interpolation method lookups.
interp_dict = None
if method == 'cv2':
interp_dict = {
'nearest' : cv2.INTER_NEAREST,
'area' : cv2.INTER_AREA,
'bilinear' : cv2.INTER_LINEAR,
'bicubic' : cv2.INTER_CUBIC,
'lanczos' : cv2.INTER_LANCZOS4,
}
elif method == 'pil':
interp_dict = {
'nearest' : Image.NEAREST,
'box' : Image.BOX,
'linear' : Image.BILINEAR,
'bilinear' : Image.BILINEAR,
'hamming' : Image.HAMMING,
'cubic' : Image.BICUBIC,
'bicubic' : Image.BICUBIC,
'lanczos' : Image.LANCZOS,
}
if interp_dict is not None:
if interp not in interp_dict.keys():
raise UnsupportedMethodError("`interp` must be one of {}, but was '{}'".format(interp_dict.keys(), interp))
interp_code = interp_dict[interp]
# Handle 1D array input.
one_dim_flag = False
if array.ndim == 1:
one_dim_flag = True
if one_dim_axis == 0:
array_shape_1d = (array.size, 1)
elif one_dim_axis == 1:
array_shape_1d = (1, array.size)
else:
raise InvalidArgumentError("`one_dim_axis` must be either 0 or 1")
array = np.reshape(array, array_shape_1d)
# If a resize factor is provided for size, round up the x, y pixel
# sizes for the output array to match MATLAB's imresize function.
new_shape = size if type(size) == tuple else tuple(np.ceil(np.dot(size, array.shape)).astype(int))
if one_dim_flag and type(size) != tuple:
new_shape = (new_shape[0], 1) if one_dim_axis == 0 else (1, new_shape[1])
# The trivial case
if new_shape == array.shape:
return array_backup.copy()
# Handle input data type and conversions.
promote_dtype = None
promote_is_demote = False
if float_resize:
if np.issubdtype(dtype_in, np.floating):
pass
else:
array = array.astype(np.float32)
elif method == 'cv2':
if dtype_in == np.bool:
promote_dtype = np.uint8
elif dtype_in == np.int8:
promote_dtype = np.int16
elif dtype_in == np.float16:
promote_dtype = np.float32
elif dtype_in in (np.int32, np.uint32, np.int64, np.uint64):
raise InvalidArgumentError("`array` data type cannot be of 32/64-bit int/uint "
"when method='{}', but was {}; consider setting "
"`float_resize=True`".format(method, dtype_in))
elif method == 'pil':
if dtype_in == np.uint16:
promote_dtype = np.int32
elif dtype_in in (np.uint32, np.int64, np.uint64):
if np.any(array > np.iinfo(np.int32).max) or np.any(array < np.iinfo(np.int32).min):
raise InvalidArgumentError("`array` data type ({}) is not supported by method='{}', "
"but values cannot fit in int32; consider setting "
"`float_resize=True`")
promote_dtype = np.int32
promote_is_demote = True
elif dtype_in == np.float16:
promote_dtype = np.float32
if promote_dtype is not None:
warn("`array` data type ({}) is not supported by '{}' resizing method, "
"but can safely be {}{} to {} for processing".format(dtype_in, method,
'promoted'*(not promote_is_demote), 'demoted'*promote_is_demote, promote_dtype(1).dtype))
array = array.astype(promote_dtype)
# Resize array.
if method == 'cv2':
array_r = cv2.resize(array, tuple(list(new_shape)[::-1]), interpolation=interp_code)
elif method == 'pil':
image = (Image.frombytes(mode='1', size=array.shape[::-1], data=np.packbits(array, axis=1))
if array.dtype == np.bool else Image.fromarray(array))
image = image.resize(tuple(list(new_shape)[::-1]), interp_code)
# Set "default" data type for reading data into NumPy array.
if image.mode == '1':
dtype_out_pil = np.bool
image = image.convert('L')
elif image.mode == 'L':
dtype_out_pil = np.uint8
elif image.mode == 'I':
dtype_out_pil = np.int32
elif image.mode == 'F':
dtype_out_pil = np.float32
# Convert Pillow Image to NumPy array.
array_r = np.fromstring(image.tobytes(), dtype=dtype_out_pil)
array_r = array_r.reshape((image.size[1], image.size[0]))
elif method == 'gdal':
# Set up grid coordinate arrays, then run interp2_gdal.
X = np.arange(array.shape[1]) + 1
Y = np.arange(array.shape[0]) + 1
Xi = np.linspace(X[0], X[-1] + (X[1]-X[0]), num=(new_shape[1] + 1))[0:-1]
Yi = np.linspace(Y[0], Y[-1] + (Y[1]-Y[0]), num=(new_shape[0] + 1))[0:-1]
array_r = interp2_gdal(X, Y, array, Xi, Yi, interp, extrapolate=False)
elif method == 'scipy':
PILmode = 'L' if array.dtype in (np.bool, np.uint8) else 'F'
if PILmode == 'L' and array.dtype != np.uint8:
array = array.astype(np.uint8)
array_r = scipy.misc.imresize(array, new_shape, interp, PILmode)
# Handle output data type and conversions.
if dtype_out == 'input' and array_r.dtype != dtype_in:
if round_proper:
array_r = astype_round_and_crop(array_r, dtype_in, allow_modify_array=True)
else:
array_r = astype_cropped(array_r, dtype_in, allow_modify_array=True)
elif dtype_out == 'float' and not np.issubdtype(array_r.dtype, np.floating):
array_r = array_r.astype(np.float32)
if one_dim_flag:
result_size_1d = new_shape[0] if one_dim_axis == 0 else new_shape[1]
array_r = np.reshape(array_r, result_size_1d)
return array_r
def imresize_pil(array, size, interp='bicubic', dtype_out='input',
float_resize=True, round_proper=True,
one_dim_axis=1):
"""
Resize an array.
Parameters
----------
array : ndarray, 2D
The array to resize.
size : shape tuple (2D) or scalar value
If shape tuple, returns an array of this size.
If scalar value, returns an array of shape
that is `size` times the shape of `array`.
interp : str; 'nearest', 'box', 'bilinear', 'hamming',
'bicubic', or 'lanczos'
Interpolation method to use during resizing.
dtype_out : str; 'default' or 'input'
If 'default' and `float_resize=True`, the returned
array data type will be float32.
If 'default' and `float_resize=False`, the returned
array data type will be...
- bool if `array` is bool
- uint8 if `array` is uint8
- int32 if `array` is integer other than uint8
- float32 if `array` is floating
If 'input', the returned array data type will be
the same as `array` data type.
float_resize : bool
If True, convert the Pillow image of `array`
to PIL mode 'F' before resizing.
If False, allow the Pillow image to stay in its
default PIL mode for resizing.
The rounding scheme of resized integer images with
integer PIL modes (e.g. 'L' or 'I') is unclear when
compared with the same integer images in the 'F' PIL mode.
This option has no effect when `array` dtype is floating.
round_proper : bool
If the resized array is converted from floating
to an integer data type (such as when `float_resize=True`
and `dtype_out='input'`)...
- If True, round X.5 values up to (X + 1).
- If False, round X.5 values to nearest even integer to X.
one_dim_axis : int, 0 or 1
Which directional layout to give to a one-dimensional
`array` before resizing.
If 0, array runs vertically downwards across rows.
If 1, array runs horizontally rightwards across columns.
Returns
-------
array_r : ndarray, 2D, same type as `array`
The resized array.
See Also
--------
imresize
imresize_old
Notes
-----
This function is a wrapper for Pillow's `PIL.Image.resize` function [1]
meant to replicate MATLAB's `imresize` function [2].
References
----------
.. [1] http://pillow.readthedocs.io/en/3.1.x/reference/Image.html
.. [2] https://www.mathworks.com/help/images/ref/imresize.html
"""
array_backup = array
array_dtype_in = array.dtype
interp_choices = ('nearest', 'box', 'bilinear', 'hamming', 'bicubic', 'lanczos')
interp_dict = {
'nearest' : Image.NEAREST,
'box' : Image.BOX,
'linear' : Image.BILINEAR,
'bilinear' : Image.BILINEAR,
'hamming' : Image.HAMMING,
'cubic' : Image.BICUBIC,
'bicubic' : Image.BICUBIC,
'lanczos' : Image.LANCZOS,
}
try:
interp_pil = interp_dict[interp]
except KeyError:
raise UnsupportedMethodError("`interp` must be one of {}, but was '{}'".format(interp_choices, interp))
dtype_out_choices = ('default', 'input')
if dtype_out not in dtype_out_choices:
raise InvalidArgumentError("`dtype_out` must be one of {}, but was '{}'".format(dtype_out_choices, dtype_out))
# Handle 1D array input.
one_dim_flag = False
if array.ndim == 1:
one_dim_flag = True
if one_dim_axis == 0:
array_shape_1d = (array.size, 1)
elif one_dim_axis == 1:
array_shape_1d = (1, array.size)
else:
raise InvalidArgumentError("`one_dim_axis` must be either 0 or 1")
array = np.reshape(array, array_shape_1d)
# If a resize factor is provided for size, round up the x, y pixel
# sizes for the output array to match MATLAB's imresize function.
new_shape = size if type(size) == tuple else tuple(np.ceil(np.dot(size, array.shape)).astype(int))
if one_dim_flag and type(size) != tuple:
new_shape = (new_shape[0], 1) if one_dim_axis == 0 else (1, new_shape[1])
# The trivial case
if new_shape == array.shape:
return array_backup
# Convert NumPy array to Pillow Image.
image = None
if array_dtype_in == np.bool:
if float_resize:
image = Image.fromarray(array, 'L')
else:
image = Image.frombytes(mode='1', size=array.shape[::-1], data=np.packbits(array, axis=1))
else:
if array_dtype_in == np.float16:
array = array.astype(np.float32)
if not float_resize:
if array_dtype_in == np.uint16:
array = array.astype(np.int32)
elif array_dtype_in == np.uint32:
if np.any(array > np.iinfo(np.int32).max):
raise InvalidArgumentError("`array` of uint32 cannot be converted to int32")
array = array.astype(np.int32)
image = Image.fromarray(array)
if float_resize and image.mode != 'F':
image = image.convert('F')
# Resize array.
image = image.resize(tuple(list(new_shape)[::-1]), interp_pil)
# Set "default" data type for reading data into NumPy array.
if image.mode == '1':
dtype_out_np = np.bool
image = image.convert("L")
elif image.mode == 'L':
dtype_out_np = np.uint8
elif image.mode == 'I':
dtype_out_np = np.int32
elif image.mode == 'F':
dtype_out_np = np.float32
# Convert Pillow Image to NumPy array.
array_r = np.fromstring(image.tobytes(), dtype=dtype_out_np)
array_r = array_r.reshape((image.size[1], image.size[0]))
# Clean up resized array.
if dtype_out == 'input' and array_r.dtype != array_dtype_in:
if round_proper:
array_r = astype_round_and_crop(array_r, array_dtype_in, allow_modify_array=True)
else:
array_r = astype_cropped(array_r, array_dtype_in, allow_modify_array=True)
if one_dim_flag:
result_size_1d = new_shape[0] if one_dim_axis == 0 else new_shape[1]
array_r = np.reshape(array_r, result_size_1d)
return array_r
def imresize_old(array, size, interp='bicubic', dtype_out='input',
method='pil',
one_dim_axis=1):
"""
Resize an array.
Parameters
----------
array : ndarray, 2D
The array to resize.
size : shape tuple (2D) or scalar value
If shape tuple, returns an array of this size.
If scalar value, returns an array of shape
that is `size` times the shape of `array`.
interp : str
Interpolation method to use during resizing.
See documentation for a particular `method`.
dtype_out : str; 'input' or 'float'
If 'input', data type of the returned array is
the same as `array`.
If 'float' and `array` data type is of floating type,
data type of the returned array is the same.
If 'float' and `array` data type is of integer type,
data type of the returned array is float32.
method : str; 'cv2', 'pil', 'gdal', or 'scipy'
Specifies which method used to perform resizing.
'cv2' ------ cv2.resize [1]
'pil' ------ PIL.Image.resize [2]
'scipy' ---- scipy.misc.imresize (WILL BE RETIRED SOON) [3]
'gdal' ----- interp2_gdal (local, utilizes gdal.ReprojectImage [4])
one_dim_axis : int, 0 or 1
Which directional layout to give to a one-dimensional
`array` before resizing.
If 0, array runs vertically downwards across rows.
If 1, array runs horizontally rightwards across columns.
Returns
-------
array_r : ndarray, 2D, same type as `array`
The resized array.
See Also
--------
imresize
imresize_pil
Notes
-----
This function is meant to replicate MATLAB's `imresize` function [5].
References
----------
.. [1] https://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#void resize(InputArray src, OutputArray dst, Size dsize, double fx, double fy, int interpolation)
.. [2] http://pillow.readthedocs.io/en/3.1.x/reference/Image.html
.. [3] https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.imresize.html
.. [4] http://gdal.org/java/org/gdal/gdal/gdal.html#ReprojectImage-org.gdal.gdal.Dataset-org.gdal.gdal.Dataset-java.lang.String-java.lang.String-int-double-double-org.gdal.gdal.ProgressCallback-java.util.Vector-
https://svn.osgeo.org/gdal/trunk/autotest/alg/reproject.py
.. [5] https://www.mathworks.com/help/images/ref/imresize.html
"""
array_backup = array
method_choices = ('cv2', 'gdal', 'pil', 'scipy')
dtype_out_choices = ('float', 'input')
if method not in method_choices:
raise UnsupportedMethodError("`method` must be one of {}, "
"but was '{}'".format(method_choices, method))
if dtype_out not in dtype_out_choices:
raise InvalidArgumentError("`dtype_out` must be one of {}, "
"but was '{}'".format(dtype_out_choices, dtype_out))
# Handle 1D array input.
one_dim_flag = False
if array.ndim == 1:
one_dim_flag = True
if one_dim_axis == 0:
array_shape_1d = (array.size, 1)
elif one_dim_axis == 1:
array_shape_1d = (1, array.size)
else:
raise InvalidArgumentError("`one_dim_axis` must be either 0 or 1")
array = np.reshape(array, array_shape_1d)
# If a resize factor is provided for size, round up the x, y pixel
# sizes for the output array to match MATLAB's imresize function.
new_shape = size if type(size) == tuple else tuple(np.ceil(np.dot(size, array.shape)).astype(int))
if one_dim_flag and type(size) != tuple:
new_shape = (new_shape[0], 1) if one_dim_axis == 0 else (1, new_shape[1])
# The trivial case
if new_shape == array.shape:
return array_backup
array_dtype_in = array.dtype
dtype_out_np = None
if dtype_out == 'float':
dtype_out_np = array_dtype_in if np.issubdtype(array_dtype_in, np.floating) else np.float32
elif dtype_out == 'input':
dtype_out_np = array_dtype_in
if method == 'cv2':
interp_dict = {
'nearest' : cv2.INTER_NEAREST,
'area' : cv2.INTER_AREA,
'bilinear' : cv2.INTER_LINEAR,
'bicubic' : cv2.INTER_CUBIC,
'lanczos' : cv2.INTER_LANCZOS4,
}
try:
interp_cv2 = interp_dict[interp]
except KeyError:
raise InvalidArgumentError("For `method=cv2`, `interp` must be one of {}, "
"but was '{}'".format(interp_dict.keys(), interp))
if array_dtype_in == np.bool:
array = array.astype(np.uint8)
array_r = cv2.resize(array, tuple(list(new_shape)[::-1]), interpolation=interp_cv2)
elif method == 'gdal':
# Set up grid coordinate arrays, then run interp2_gdal.
X = np.arange(array.shape[1]) + 1
Y = np.arange(array.shape[0]) + 1
Xi = np.linspace(X[0], X[-1] + (X[1]-X[0]), num=(new_shape[1] + 1))[0:-1]
Yi = np.linspace(Y[0], Y[-1] + (Y[1]-Y[0]), num=(new_shape[0] + 1))[0:-1]
array_r = interp2_gdal(X, Y, array, Xi, Yi, interp, extrapolate=False)
elif method == 'pil':
return imresize_pil(array, new_shape, interp)
elif method == 'scipy':
PILmode = 'L' if array.dtype in (np.bool, np.uint8) else 'F'
if PILmode == 'L' and array.dtype != np.uint8:
array = array.astype(np.uint8)
array_r = scipy.misc.imresize(array, new_shape, interp, PILmode)
# Clean up resized array.
if array_r.dtype != dtype_out_np:
array_r = astype_round_and_crop(array_r, dtype_out_np, allow_modify_array=True)
if one_dim_flag:
result_size_1d = new_shape[0] if one_dim_axis == 0 else new_shape[1]
array_r = np.reshape(array_r, result_size_1d)
return array_r
def conv2_slow(array, kernel, shape='full', default_double_out=True, zero_border=True,
fix_float_zeros=True, nan_over_zero=True, allow_flipped_processing=True):
"""
Convolve two 2D arrays.
Parameters
----------
array : ndarray, 2D
Primary array to convolve.
kernel : ndarray, 2D, smaller shape than `array`
Secondary, smaller array to convolve with `array`.
shape : str; 'full', 'same', or 'valid'
See documentation for `scipy.signal.convolve` [1].
default_double_out : bool
If True and `array` is not of floating data type,
casts the result to float64 before returning.
The sole purpose of this option is to allow this function
to most closely replicate the corresponding MATLAB array method [2].
zero_border : bool
When `kernel` hangs off the edges of `array`
during convolution calculations...
If True, pixels beyond the edges of `array`
are extrapolated as zeros.
If False, pixels beyond the edges of `array`
are extrapolated as the value of the closest edge pixel.
This option only applies when `shape='same'`,
since a zero border is required when `shape='full'`
and does not make sense when `shape='valid'`.
fix_float_zeros : bool
To correct for FLOP error in convolution where the result
should be zero but isn't, immediately following convolution
map array values between -1.0e-12 and +1.0e-11 to zero.
nan_over_zero : bool
If True, let NaN x 0 = NaN in convolution computation.
If False, let NaN x 0 = 0 in convolution computation.
allow_flipped_processing : bool
If True and at least one of `kernel`'s side lengths is even,
rotate both `array` `kernel` 180 degrees before performing convolution,
then rotate the result array 180 degrees before returning.
The sole purpose of this option is to allow this function
to most closely replicate the corresponding MATLAB array method [2].
Returns
-------
array_c : ndarray, 2D
A 2D array containing the convolution of the input array and kernel.
See Also
--------
conv2
Notes
-----
This function is meant to replicate MATLAB's conv2 function [2].
Scipy's convolution function cannot handle NaN input as it results in all NaN output.
In comparison, MATLAB's conv2 function takes a sensible approach by letting NaN win out
in all calculations involving pixels with NaN values in the input array.
To replicate this, we set all NaN values to zero before performing convolution,
then mask our result array with NaNs according to a binary dilation of ALL NaN locations
in the input array, dilating using a structure of ones with same shape as the provided kernel.
For large arrays, this function will use an FFT method for convolution that results in
FLOP errors on the order of 10^-12. For this reason, a floating result array will have
all resulting pixel values between -1.0e-12 and 10.0e-12 set to zero.
References
----------
.. [1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.convolve.html
.. [2] https://www.mathworks.com/help/matlab/ref/conv2.html
"""
shape_choices = ('full', 'same', 'valid')
if shape not in shape_choices:
raise InvalidArgumentError("`shape` must be one of {}, but was '{}'".format(shape_choices, shape))
if default_double_out:
dtype_out = None
if np.issubdtype(array.dtype, np.floating):
dtype_out = array.dtype
if (np.issubdtype(kernel.dtype, np.floating)
and int(str(kernel.dtype).replace('float', '')) > int(str(dtype_out).replace('float', ''))):
warn("Since default_double_out=True, kernel with floating dtype ({}) at greater precision than "
"array floating dtype ({}) is cast to array dtype".format(kernel.dtype, dtype_out))
kernel = kernel.astype(dtype_out)
else:
dtype_out = np.float64
if kernel.dtype == np.bool:
warn("Boolean data type for kernel is not supported, casting to float32")
kernel = kernel.astype(np.float32)
rotation_flag = False
if allow_flipped_processing:
array, kernel, rotation_flag = rotate_arrays_if_kernel_has_even_sidelength(array, kernel)
# Take a record of where all NaN values are located
# before setting the values of those pixels to zero.
fixnans_flag = False
if np.issubdtype(array.dtype, np.floating):
array_nans = np.isnan(array)
if np.any(array_nans):
fixnans_flag = True
array[array_nans] = 0
else:
del array_nans
# Edge settings
array_backup = array
if (fixnans_flag and shape != 'same') or (shape == 'same' and not zero_border):
if shape in ('full', 'same'):
pady_top, padx_lft = (np.array(kernel.shape) - 1) / 2
pady_bot, padx_rht = np.array(kernel.shape) / 2
elif shape == 'valid':
pady_top, padx_lft = np.array(kernel.shape) / 2
pady_bot, padx_rht = (np.array(kernel.shape) - 1) / 2
pady_top, padx_lft = int(pady_top), int(padx_lft)
pady_bot, padx_rht = int(pady_bot), int(padx_rht)
if shape == 'same': # and not zero_border
array = np.pad(array, ((pady_top, pady_bot), (padx_lft, padx_rht)), 'edge')
# Perform convolution.
method = scipy.signal.choose_conv_method(array, kernel, shape)
array_c = scipy.signal.convolve(array, kernel, shape, method)
if method != 'direct' and fix_float_zeros and np.issubdtype(array_c.dtype, np.floating):
# Fix FLOP error from FFT method where we assume zero was the desired result.
array_c[(-1.0e-12 < array_c) & (array_c < 10.0e-12)] = 0
# Apply dilation of original NaN pixels to result.
if fixnans_flag:
array_nans_backup = array_nans
if shape != 'same' or not zero_border:
if shape == 'full':
array_nans = np.pad(array_nans, ((pady_top, pady_bot), (padx_lft, padx_rht)), 'constant', constant_values=0)
elif shape == 'same': # and not zero_border
array_nans = np.pad(array_nans, ((pady_top, pady_bot), (padx_lft, padx_rht)), 'edge')
dilate_structure = np.ones(kernel.shape, dtype=np.uint8)
if not nan_over_zero:
dilate_structure[kernel == 0] = 0
array_nans_dilate = imdilate(array_nans, dilate_structure)
if shape == 'valid':
pady_bot = -pady_bot if pady_bot > 0 else None
padx_rht = -padx_rht if padx_rht > 0 else None
array_nans_dilate = array_nans_dilate[pady_top:pady_bot, padx_lft:padx_rht]
array_c[array_nans_dilate] = np.nan
# Return the input array to its original state.
array_backup[array_nans_backup] = np.nan
# Clean up result array.
if shape == 'same' and not zero_border:
pady_bot = -pady_bot if pady_bot > 0 else None
padx_rht = -padx_rht if padx_rht > 0 else None
array_c = array_c[pady_top:pady_bot, padx_lft:padx_rht]
# FIXME: Make returned data type function like conv2.
if default_double_out and array_c.dtype != dtype_out:
array_c = array_c.astype(dtype_out)
return fix_array_if_rotation_was_applied(array_c, rotation_flag)
def conv2(array, kernel, shape='full', conv_depth='default',
zero_border=True, fix_float_zeros=True,
nan_same=False, nan_over_zero=True,
allow_flipped_processing=True):
"""
Convolve two 2D arrays.
Parameters
----------
array : ndarray, 2D
Primary array to convolve.
kernel : ndarray, 2D, smaller shape than `array`
Secondary, smaller array to convolve with `array`.
shape : str; 'full', 'same', or 'valid'
See documentation for MATLAB's `conv2` function [2].
conv_depth : str; 'default', 'input', 'int16', 'single'/'float32', or 'double'/'float64'
Sets the data type depth of the convolution function filter2D,
and correspondingly sets the data type of the returned array.
'default': If `array` is of floating data type,
returns an array of that data type, otherwise returns
an array of float64.
'input': Returns an array of the same data type as `array`.
'int16': Returns an array of int16.
'single'/'float32': Returns an array of float32.
'double'/'float64': Returns an array of float64.
BEWARE: 'float32' option results in
zero_border : bool
When `kernel` hangs off the edges of `array`
during convolution calculations...
If True, pixels beyond the edges of `array`
are extrapolated as zeros.
If False, pixels beyond the edges of `array`
are extrapolated as the value of the closest edge pixel.
This option only applies when `shape='same'`,
since a zero border is required when `shape='full'`
and does not make sense when `shape='valid'`.
fix_float_zeros : bool
To correct for FLOP error in convolution where the result
should be zero but isn't, immediately following convolution
map array values between...
- float32 (single):
-1.0e-6 and +1.0e-6 to zero.
- float54 (double):
-1.0e-15 and +1.0e-15 to zero.
nan_same : bool
NaN values are treated as 0 in convolution computation,
but NaN pixels are retained from input to output.
nan_over_zero : bool
If True, let NaN x 0 = NaN in convolution computation.
If False, let NaN x 0 = 0 in convolution computation.
allow_flipped_processing : bool
If True and at least one of `kernel`'s side lengths is even,
rotate both `array` `kernel` 180 degrees before performing convolution,
then rotate the result array 180 degrees before returning.
The sole purpose of this option is to allow this function
to most closely replicate the corresponding MATLAB array method [2].
Returns
-------
array_c : ndarray, 2D
Array containing the convolution of input array and kernel.
See Also
--------
conv2_slow
Notes
-----
This function utilizes a fast OpenCV function `filter2D` [1]
as a means to replicate MATLAB's `conv2` function [2].
References
----------
.. [1] https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html#filter2d
.. [2] https://www.mathworks.com/help/matlab/ref/conv2.html
"""
shape_choices = ('full', 'same', 'valid')
if shape not in shape_choices:
raise InvalidArgumentError("`shape` must be one of {}, but was '{}'".format(shape_choices, shape))
conv_depth_choices = ('default', 'input', 'int16', 'single', 'float32', 'double', 'float64')
if conv_depth not in conv_depth_choices:
raise InvalidArgumentError("`conv_depth` must be one of {}, but was '{}'".format(conv_depth_choices, conv_depth))
cv2_array_dtypes = [np.uint8, np.int16, np.uint16, np.float32, np.float64]
cv2_kernel_dtypes = [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.int64, np.uint64, np.float32, np.float64]
# Check array data type.
array_error = False
array_dtype_in = array.dtype
if array_dtype_in not in cv2_array_dtypes:
array_dtype_errmsg = ("Fast convolution method only allows array dtypes {}, "
"but was {}".format([str(d(1).dtype) for d in cv2_array_dtypes], array.dtype))
# Only cast to a higher data type for safety.
array_dtype_cast = None
if array_dtype_in == np.bool:
array_dtype_cast = np.uint8
elif array_dtype_in == np.int8:
array_dtype_cast = np.int16
elif array_dtype_in == np.float16:
array_dtype_cast = np.float32
if array_dtype_cast is None:
array_error = True
# Check kernel data type.
kernel_error = False
kernel_dtype_in = kernel.dtype
if kernel_dtype_in not in cv2_kernel_dtypes:
kernel_dtype_errmsg = ("Fast convolution method only allows kernel dtypes {} "
"but was {}".format([str(d(1).dtype) for d in cv2_kernel_dtypes], kernel.dtype))
# Only cast to a higher data type for safety.
kernel_dtype_cast = None
if kernel_dtype_in == np.bool:
kernel_dtype_cast = np.uint8
elif kernel_dtype_in == np.uint32:
kernel_dtype_cast = np.uint64
elif kernel_dtype_in == np.float16:
kernel_dtype_cast = np.float32
if kernel_dtype_cast is None:
kernel_error = True
# Fall back to old (slower) conv2 function
# if array or kernel data type is unsupported.
if array_error or kernel_error:
dtype_errmsg = "{}{}{}".format(array_dtype_errmsg * array_error,
"\n" * (array_error * kernel_error),
kernel_dtype_errmsg * kernel_error)
if conv_depth != 'default':
raise UnsupportedDataTypeError(dtype_errmsg + "\nSince conv_depth ('{}') != 'default', "
"cannot fall back to other method".format(conv_depth))
warn(dtype_errmsg + "\n-> Falling back to slower, less exact method")
return conv2_slow(array, kernel, shape, True,
nan_over_zero, allow_flipped_processing)
# Promote array or kernel to higher data type if necessary
# to continue with faster and more reliable convolution method.
array_casted = False
if 'array_dtype_cast' in vars():
if array_dtype_in != np.bool:
warn(array_dtype_errmsg + "\n-> Casting array from {} to {} for processing".format(
array_dtype_in, array_dtype_cast(1).dtype))
array = array.astype(array_dtype_cast)
array_casted = True
if 'kernel_dtype_cast' in vars():
if array_dtype_in != np.bool:
warn(kernel_dtype_errmsg + "\n-> Casting kernel from {} to {} for processing".format(
kernel_dtype_in, kernel_dtype_cast(1).dtype))
kernel = kernel.astype(kernel_dtype_cast)
# Set convolution depth and output data type.
ddepth = None
dtype_out = None
conv_dtype_error = False
if conv_depth == 'default':
if np.issubdtype(array_dtype_in, np.floating):
ddepth = -1
dtype_out = array_dtype_in
else:
ddepth = cv2.CV_64F
dtype_out = np.float64
elif conv_depth == 'input':
ddepth = -1
dtype_out = array_dtype_in
elif conv_depth == 'int16':
ddepth = cv2.CV_16S
dtype_out = np.int16
if array.dtype != np.uint8:
conv_dtype_error = True
conv_dtype_errmsg = "conv_depth can only be 'int16' if array dtype is uint8"
elif conv_depth in ('single', 'float32'):
ddepth = cv2.CV_32F
dtype_out = np.float32
if array.dtype == np.float64:
conv_dtype_error = True
conv_dtype_errmsg = "conv_depth can only be 'single'/'float32' if array dtype is not float64"
elif conv_depth in ('double', 'float64'):
ddepth = cv2.CV_64F
dtype_out = np.float64
if array.dtype == np.float32:
conv_dtype_errmsg = "conv_depth can only be 'double'/'float64' if array dtype is not float32"
warn(conv_dtype_errmsg + "\n-> Casting array from float32 to float64 for processing")
array = array.astype(np.float64)
array_casted = True
if conv_dtype_error:
raise UnsupportedDataTypeError(conv_dtype_errmsg)
rotation_flag = False
if allow_flipped_processing:
array, kernel, rotation_flag = rotate_arrays_if_kernel_has_even_sidelength(array, kernel)
# Take a record of where all NaN values are located
# before setting the values of those pixels to zero.
fixnans_flag = False
if np.issubdtype(array.dtype, np.floating):
array_nans = np.isnan(array)
if np.any(array_nans):
fixnans_flag = True
if not array_casted:
array_backup = array
array[array_nans] = 0
else:
del array_nans
# Edge settings
if shape != 'same' or not zero_border:
# The following differences in where to split
# an even side length for a kernel is purely
# to mimic MATLAB's conv2 function.
if shape == 'full' or (shape == 'same' and not zero_border):
pady_top, padx_lft = (np.array(kernel.shape) - 1) / 2
pady_bot, padx_rht = np.array(kernel.shape) / 2
elif shape == 'valid':
pady_top, padx_lft = np.array(kernel.shape) / 2
pady_bot, padx_rht = (np.array(kernel.shape) - 1) / 2
pady_top, padx_lft = int(pady_top), int(padx_lft)
pady_bot, padx_rht = int(pady_bot), int(padx_rht)
if shape == 'full':
array = np.pad(array, ((pady_top, pady_bot), (padx_lft, padx_rht)), 'constant', constant_values=0)
# Perform convolution.
array_c = cv2.filter2D(array, ddepth, np.rot90(kernel, 2),
borderType=(cv2.BORDER_CONSTANT if zero_border else cv2.BORDER_REPLICATE))
if fix_float_zeros and np.issubdtype(array_c.dtype, np.floating):
# Fix FLOP error where we assume zero was the desired result.
if array_c.dtype == np.float32:
array_c[(-1.0e-6 < array_c) & (array_c < 1.0e-6)] = 0
elif array_c.dtype == np.float64:
array_c[(-1.0e-15 < array_c) & (array_c < 1.0e-15)] = 0
if array_c.dtype != dtype_out:
array_c = astype_round_and_crop(array_c, dtype_out, allow_modify_array=True)
# Crop result if necessary.
if shape == 'valid':
if pady_bot >= 0:
pady_bot = -pady_bot if pady_bot > 0 else None
if padx_rht >= 0:
padx_rht = -padx_rht if padx_rht > 0 else None
array_c = array_c[pady_top:pady_bot, padx_lft:padx_rht]
# Apply dilation of original NaN pixels to result.
if fixnans_flag:
array_nans_backup = array_nans
if shape == 'full':
array_nans = np.pad(array_nans, ((pady_top, pady_bot), (padx_lft, padx_rht)), 'constant', constant_values=0)
elif shape == 'same' and not zero_border:
array_nans = np.pad(array_nans, ((pady_top, pady_bot), (padx_lft, padx_rht)), 'edge')
if nan_same:
array_nans_dilate = array_nans
else:
dilate_structure = np.ones(kernel.shape, dtype=np.uint8)
if not nan_over_zero:
dilate_structure[kernel == 0] = 0
array_nans_dilate = imdilate(array_nans, dilate_structure)
if shape == 'valid' or (shape == 'same' and not zero_border):
if pady_bot >= 0:
pady_bot = -pady_bot if pady_bot > 0 else None
if padx_rht >= 0:
padx_rht = -padx_rht if padx_rht > 0 else None
array_nans_dilate = array_nans_dilate[pady_top:pady_bot, padx_lft:padx_rht]
array_c[array_nans_dilate] = np.nan
# Restore the input array to its original state.
if not array_casted:
array_backup[array_nans_backup] = np.nan
return fix_array_if_rotation_was_applied(array_c, rotation_flag)
def filt2(array, kernel, shape='same', conv_depth='default',
zero_border=False, fix_float_zeros=True,
nan_same=False, nan_over_zero=True,
allow_flipped_processing=True):
"""
Apply the (convolution) filter kernel to an array in 2D.
See documentation for `conv2`, but replace the word "convolve" with "filter".
Notes
-----
The mathematical convolution function (as implemented in conv2)
rotates the kernel 180 degrees before sliding it over the array
and performing the multiplications/additions.
"""
return conv2(array, np.rot90(kernel, 2), shape, conv_depth,
zero_border, fix_float_zeros,
nan_same, nan_over_zero,
allow_flipped_processing)
def moving_average(array, nhood, shape='same', conv_depth='default',
zero_border=True, fix_float_zeros=True,
nan_same=False, nan_over_zero=True,
allow_flipped_processing=True):
"""
Calculate the moving average over an array.
Parameters
----------
array : ndarray, 2D
Array for which to calculate the moving average.
nhood : positive int, tuple like `array.shape`, or (ndarray, 2D)
If an integer / tuple, specifies the side length / shape
of structure (of ones) to be used as structure for moving window.
If ndarray, must be a binary array with True/1-valued elements
specifying the structure for moving window.
shape :
See documentation for `conv2`.
conv_depth : str; 'default', 'single', or 'double'
Specifies the floating data type of the convolution kernel.
See documentation for `conv2`.
zero_border : bool
See documentation for `conv2`.
fix_float_zeros : bool
See documentation for `conv2`.
nan_same : bool
See documentation for `conv2`.
nan_over_zero : bool
See documentation for `conv2`.
allow_flipped_processing : bool
See documentation for `conv2` function.
See Also
--------
conv2
conv2_slow
Returns
-------
moving_average : ndarray, 2D
Array containing the moving average of the input array.
"""
conv_dtype_choices = ('default', 'single', 'double')
structure = None
if type(nhood) in (int, tuple):
size = nhood
elif type(nhood) == np.ndarray:
structure = nhood
else:
raise InvalidArgumentError("`nhood` type may only be int, tuple, or ndarray, "
"but was {} (nhood={})".format(type(nhood), nhood))
if conv_depth not in conv_dtype_choices:
raise UnsupportedDataTypeError("float_dtype must be one of {}, "
"but was {}".format(conv_dtype_choices, conv_depth))
if conv_depth == 'default':
float_dtype = np.float32 if array.dtype == np.float32 else np.float64
else:
float_dtype = np.float32 if conv_depth == 'single' else np.float64
if structure is not None:
if not np.any(structure):
# The trivial case,
# must be handled to prevent divide by zero error.
return np.zeros_like(array, float_dtype)
if np.any(~np.logical_or(structure == 0, structure == 1)):
raise InvalidArgumentError("`structure` may only contain zeros and ones")
else:
if type(size) == int:
structure = np.ones((size, size), dtype=float_dtype)
elif type(size) == tuple:
structure = np.ones(size, dtype=float_dtype)
conv_kernel = np.rot90(np.divide(structure, np.sum(structure), dtype=float_dtype), 2)
return conv2(array, conv_kernel, shape, conv_depth,
zero_border, fix_float_zeros,
nan_same, nan_over_zero,
allow_flipped_processing)
def conv_binary_prevent_overflow(array, structure):
"""
Make sure structure array has great enough positive bitdepth
to be convolved with binary primary array.
Parameters
----------
array : ndarray of bool or int, 2D
Primary integer array to convolve.
Must be a binary array of only zero/False and one/True.
structure : ndarray of bool or int, 2D
Secondary, smaller integer array to convolve with `array`.
Must be a binary array of only zero/False and one/True.
Returns
-------
structure : ndarray, possible uint cast of `structure`
Either the same `structure` array or a cast or `structure`
to a uint data type with more positive bitdepth than the
input array.
"""
# Get upper bound on minimum positive bitdepth for convolution.
conv_bitdepth_pos = math.log(np.prod(structure.shape)+1, 2)
dtype_bitdepths_pos = (1, 7, 8, 15, 16, 31, 32, 63, 64)
for b in dtype_bitdepths_pos:
if conv_bitdepth_pos <= b:
conv_bitdepth_pos = b
break
# Parse input array and structure data type for bitdepth.
input_bitdepth_pos = 0
for arr in (array, structure):
arr_dtype = arr.dtype
if arr.dtype == np.bool:
arr_posbits = 1
elif np.issubdtype(arr_dtype, np.int):
arr_posbits = int(str(arr.dtype).replace('int', '')) - 1
elif np.issubdtype(arr_dtype, np.uint):
arr_posbits = int(str(arr.dtype).replace('uint', ''))
elif np.issubdtype(arr_dtype, np.floating):
arr_posbits = np.inf
else:
arr_posbits = 0
input_bitdepth_pos = max(input_bitdepth_pos, arr_posbits)
if input_bitdepth_pos == 0:
# Handle unknown data type by casting structure to
# maximum possible bitdepth.
structure = structure.astype(np.uint64)
else:
# If maximum positive bitdepth from inputs is too low,
# cast structure to minimum positive bitdepth for conovlution.
if input_bitdepth_pos < conv_bitdepth_pos:
if (conv_bitdepth_pos % 2) != 0:
conv_bitdepth_pos += 1
structure = structure.astype(eval('np.uint{}'.format(conv_bitdepth_pos)))
return structure
def imerode_slow(array, nhood, iterations=1, mode='auto',
cast_structure_for_speed=True, allow_flipped_processing=True):
"""
Erode an array with the provided binary structure.
Parameters
----------
array : ndarray, 2D
Array to erode.
nhood : positive int, tuple like `array.shape`, or (ndarray, 2D)
If an integer / tuple, specifies the side length / shape
of structure (of ones) to be used as structure for erosion.
If ndarray, must be a binary array with True/1-valued elements
specifying the structure for erosion.
iterations : positive int
Number of times to perform the erosion.
mode : str; 'auto', 'conv', 'skimage', 'scipy', or 'scipy_grey'
Specifies which method will be used to perform erosion.
'auto' -------- use the fastest of ('conv', 'scipy') given array, structure sizes
'conv' -------- `conv2`
'skimage' ----- `skimage.morphology.binary_erosion` [1]
'scipy' ------- `scipy.ndimage.binary_erosion` [2]
'scipy_grey' -- `scipy.ndimage.grey_erosion` [3]
cast_structure_for_speed : bool
If True and `structure` is not float32 data type, cast it to float32.
This produces the fastest results overall for all methods,
and for 'conv' method this prevents a potential fallback call to
`conv2_slow` if input structure has an unsupported data type for
fast OpenCV method used in `conv2`.
allow_flipped_processing : bool
If True and at least one of `structure`'s side lengths is even,
rotate both `array` `structure` 180 degrees before performing erosion,
then rotate the result array 180 degrees before returning.
The sole purpose of this option is to allow this function
to most closely replicate the corresponding MATLAB array method [4].
Returns
-------
array_e : ndarray, same shape and type as `array`
Array containing the erosion of the input array by the structure.
See Also
--------
imdilate_slow
Notes
-----
This function is meant to replicate MATLAB's `imerode` function [4].
Strictly binary erosion will be performed if and only if `array.dtype` is `np.bool`,
otherwise greyscale erosion will be performed. However, greyscale erosion on a
binary array containing only values X and Y produces the same result as if the
values [min(X, Y), max(X, Y)] were mapped to [0, 1] and cast to a boolean array,
passed into this function, then mapped values in the result array back to their
original values (for floating `array`, note `-inf < 0 < inf < NaN`).
All modes will handle greyscale erosion when `array` is not boolean.
For `array` of feasibly large sizes containing more than two values,
'scipy_grey' is the fastest method for performing greyscale erosion,
but since the method may interpolate on the boundaries between regions
of differing values (which the MATLAB function does not do), it is not
an acceptable default method and is not considered when `mode=='auto'`.
In preliminary testing, all three methods 'conv', 'scipy', and 'skimage'
are able to reproduce the results of the MATLAB function for both binary
and greyscale erosion (with the exception of some edge pixels when
`structure` with a False/zero center element is used in grey erosion,
which produces nonsensical values where proper erosion cannot be detected
by these three methods as well as MATLAB's function -- only the 'scipy_grey'
method handles this case properly).
References
----------
.. [1] http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_erosion
.. [2] https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.morphology.binary_erosion.html
.. [3] https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.morphology.grey_erosion.html
.. [4] https://www.mathworks.com/help/images/ref/imerode.html
"""
mode_choices = ('auto', 'conv', 'skimage', 'scipy', 'scipy_grey')
structure = None
if type(nhood) == int:
structure = np.ones((nhood, nhood), dtype=np.float32)
elif type(nhood) == tuple:
structure = np.ones(nhood, dtype=np.float32)
elif type(nhood) == np.ndarray:
structure = nhood
if structure.dtype != np.bool and np.any(~np.logical_or(structure == 0, structure == 1)):
raise InvalidArgumentError("`nhood` structure contains values other than 0 and 1")
if cast_structure_for_speed and structure.dtype != np.float32:
structure = structure.astype(np.float32)
else:
raise InvalidArgumentError("`nhood` type may only be int, tuple, or ndarray, "
"but was {} (nhood={})".format(type(nhood), nhood))
if mode not in mode_choices:
raise UnsupportedMethodError("'mode' must be one of {}, but was '{}'".format(mode_choices, mode))
if mode == 'auto':
# FIXME: Get new time coefficients for faster conv2 function now being used.
# Make an estimate of the runtime for 'conv' and 'scipy' methods,
# then choose the faster method.
array_elements = np.prod(array.shape)
struc_elements = np.prod(structure.shape)
time_conv = 1.25e-07 * array_elements - 7.61e-02
time_scipy = ( (1.56e-10 * array_elements - 2.66e-04) * struc_elements
+ (1.34e-08 * array_elements - 1.42e-02) )
mode = 'conv' if time_conv < time_scipy else 'scipy'
if mode == 'conv':
if ( not np.issubdtype(array.dtype, np.floating)
and not np.issubdtype(structure.dtype, np.floating) ):
# Make sure one of the input integer arrays has great enough
# positive bitdepth to prevent overflow during convolution.
if array.dtype != np.bool and np.any(~np.logical_or(array == 0, array == 1)):
structure = structure.astype(np.uint64)
else:
structure = conv_binary_prevent_overflow(array, structure)
structure = np.rot90(structure, 2)
rotation_flag = False
if allow_flipped_processing:
array, structure, rotation_flag = rotate_arrays_if_kernel_has_even_sidelength(array, structure)
if mode == 'skimage':
pady, padx = np.array(structure.shape) / 2
pady, padx = int(pady), int(padx)
if array.dtype == np.bool:
padval = 1
else:
padval = np.inf if np.issubdtype(array.dtype, np.floating) else
|
np.iinfo(array.dtype)
|
numpy.iinfo
|
'''
Created on 11.07.2014
@author: mkamp
'''
import gc
import numpy as np
from concurrent.futures import ThreadPoolExecutor, as_completed
RADON_CALC_METHOD = 1 #2 #method 1 is to add a line 1,0,...,0 to S and set b = 0,...,0,1 in order to force a non-trivial solution. method 2 is to set S=[:-1] and b = S[-1], i.e., forcing the last column of S to have weight one and the other columns to add up to that last one, canceling each other out to zero.
EPS = 0.000001
MAX_REL_EPS = 0.0001
def getRadonPoint(S):
alpha = []
if RADON_CALC_METHOD == 1:
A = np.vstack((np.transpose(S),np.ones(S.shape[0])))
z = np.zeros(S.shape[0])
z[0] = 1.0
A = np.vstack((A,z))
b = np.zeros(S.shape[0])
b[-1] = 1.0
alpha = np.linalg.lstsq(A, b)[0]
else:
print( S)
A = S[:-1]
print( A)
A = np.vstack((np.transpose(A),np.ones(A.shape[0])))
print( A)
b = np.hstack((S[-1], np.ones(1)))
print( b)
alpha = np.linalg.solve(A, b)
alpha_plus = np.zeros(len(alpha))
alpha_minus = np.zeros(len(alpha))
for i in range(len(alpha)):
if alpha[i] > 0:
alpha_plus[i] = alpha[i]
if alpha[i] < 0:
alpha_minus[i] = alpha[i]
sumAlpha_plus = 1.*np.sum(alpha_plus)
sumAlpha_minus = -1.*np.sum(alpha_minus)
if not floatApproxEqual(sumAlpha_plus, sumAlpha_minus):
print( "Error: sum(a+) != sum(a-): " + str(sumAlpha_plus) + " != " + str(sumAlpha_minus) + " for |S| = " + str(S.shape) + " and R = " + str(getRadonNumber(S)))
alpha /= sumAlpha_plus
r = np.zeros(S.shape[1])
r_minus = np.zeros(S.shape[1])
for i in range(len(alpha)):
if alpha[i] > 0:
r += alpha[i] * S[i]
if alpha[i] < 0:
r_minus += alpha[i] * S[i]
rtest_plus = r * 1./np.linalg.norm(r) #normiert
rtest_minus = r_minus * 1./
|
np.linalg.norm(r_minus)
|
numpy.linalg.norm
|
# coding: utf-8
import os
import itertools
from atomate.vasp.fireworks.core import StaticFW
from fireworks import Workflow, Firework
from atomate.vasp.powerups import (
add_additional_fields_to_taskdocs,
add_wf_metadata,
add_common_powerups,
)
from atomate.vasp.workflows.base.core import get_wf
from atomate.vasp.firetasks.run_calc import RunVaspCustodian
from atomate.common.firetasks.glue_tasks import PassCalcLocs
from atomate.vasp.firetasks.glue_tasks import CopyVaspOutputs
from atomate.vasp.firetasks.parse_outputs import (
VaspToDb, HubbardHundLinRespToDb
)
from atomate.vasp.firetasks.write_inputs import WriteVaspFromIOSet
from atomate.utils.utils import get_logger
logger = get_logger(__name__)
from atomate.vasp.config import VASP_CMD, DB_FILE, ADD_WF_METADATA
from uuid import uuid4
from pymatgen.io.vasp.sets import MPRelaxSet, MPStaticSet
from pymatgen.io.vasp.inputs import Poscar, Incar
from pymatgen.core import Lattice, Structure, Element
import numpy as np
__author__ = "<NAME>, <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__date__ = "February 2020"
__hubbard_hund_linresp_wf_version__ = 0.0
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
def get_wf_hubbard_hund_linresp(
structure,
user_incar_settings=None,
relax_nonmagnetic=True,
spin_polarized=True,
applied_potential_range=(-0.2, 0.2),
num_evals=9,
site_indices_perturb=None, species_perturb=None,
find_nearest_sites=True,
parallel_scheme=0,
ediff_tight=None,
c=None):
"""
Compute Hubbard U (and Hund J) on-site interaction values using GGA+U
linear response method proposed by Cococcioni et. al.
(DOI: 10.1103/PhysRevB.71.035105)
and the spin-polarized response formalism developed by Linscott et. al.
(DOI: 10.1103/PhysRevB.98.235157)
This workflow relies on the constrained on-site potential functional implemented in VASP,
with a helpful tutorial found here:
https://www.vasp.at/wiki/index.php/Calculate_U_for_LSDA%2BU
Args:
structure:
user_incar_settings: user INCAR settings
relax_nonmagnetic: Restart magnetic SCF runs from
non-magnetic calculation, using WAVECAR
spin_polarized: Perform spin-dependent perturbations
applied_potential_range: Bounds of applied potential
num_evals: Number of perturbation evalutaions
site_indices_perturb: (must specify if species_perturb=None)
List of site indices within
Structure indicating perturbation sites;
species_perturb: (must specify if site_indices_perturb=None)
List of names of species (string)
of sites to perturb; First site of that species
is selected in the structure
find_nearest_sites: If set to true and species_perturb != None,
the closest sites (by the Structure distance matrix) will be selected
in the response analysis to account for inter-site screening effects
parallel_scheme: 0 - (default) self-consistent (SCF)
runs use WAVECAR from non-self consistent (NSCF) run
at same applied potential; 1 - SCF runs use WAVECAR
from ground-state (V=0) run.
While reusing the WAVECAR from NSCF run in SCF run may be more
efficient (parallel_scheme: 0), the user may also choose to
remove the dependency between NSCF and SCF runs
(parallel_scheme: 1)
ediff_tight: Final energy convergence tolerance,
if restarting from a previous run
(if not specified, will default to pymatgen default EDIFF)
c: Workflow config dict, in the same format
as in presets/core.py and elsewhere in atomate
Returns: Workflow
"""
if not structure.is_ordered:
raise ValueError(
"Please obtain an ordered approximation of the input structure."
)
if not site_indices_perturb:
site_indices_perturb = []
if species_perturb:
if find_nearest_sites:
site_indices_perturb = find_closest_sites(structure, species_perturb)
else:
for specie_u in species_perturb:
found_specie = False
for s in range(len(structure)):
site = structure[s]
if (Element(str(site.specie)) == Element(specie_u)) \
and (s not in site_indices_perturb):
found_specie = True
break
if not found_specie:
raise ValueError(
"Could not find specie(s) in structure."
)
site_indices_perturb.append(s)
elif not site_indices_perturb:
logger.warning(
"Sites for computing U value are not specified. "
"Computing U for first site in structure. "
)
site_indices_perturb = list(tuple(site_indices_perturb))
num_perturb = len(site_indices_perturb)
sites_perturb = []
for site_index_perturb in site_indices_perturb:
site = structure[site_index_perturb]
sites_perturb.append(site)
structure.remove_sites(indices=site_indices_perturb)
for site in sites_perturb:
structure.insert(i=0, species=site.specie, coords=site.frac_coords,
properties=site.properties)
# using a uuid for book-keeping,
# in a similar way to other workflows
uuid = str(uuid4())
c_defaults = {"vasp_cmd": VASP_CMD, "db_file": DB_FILE}
if c:
c.update(c_defaults)
else:
c = c_defaults
# Calculate groundstate
# set user_incar_settings
if not user_incar_settings:
user_incar_settings = {}
# setup VASP input sets
uis_gs, uis_ldau, val_dict, vis_ldau = init_linresp_input_sets(
user_incar_settings, structure, num_perturb)
fws = []
index_fw_gs = [0]
ediff_default = vis_ldau.incar['EDIFF']
if not ediff_tight:
ediff_tight = 0.1 * ediff_default
append_linresp_ground_state_fws (
fws, structure, num_perturb, index_fw_gs,
uis_gs, relax_nonmagnetic,
ediff_default, ediff_tight)
# generate list of applied on-site potentials in linear response
applied_potential_value_list = []
for counter_perturb in range(num_perturb):
applied_potential_values = np.linspace(
applied_potential_range[0], applied_potential_range[1],
num_evals)
applied_potential_values =
|
np.around(applied_potential_values, decimals=9)
|
numpy.around
|
import tensorflow as tf
import numpy as np
from scipy import signal
from scipy.ndimage import gaussian_filter
from PIL import Image, ImageDraw
import random
import glob, os
import csv
from multiprocessing import Pool
import subprocess
import time
width = 512
height = 512
scale = 2
np.random.seed(os.getpid() + int(time.time()))
random.seed(os.getpid() + int(time.time()))
class BaseData:
def __init__(self):
self.load_idmap()
def load_idmap(self):
self.glyph_id = {}
self.glyphs = {}
self.glyph_type = {}
self.glyph_id[''] = 0
self.glyphs[0] = ''
with open(os.path.join('data','codepoints.csv'),'r') as f:
reader = csv.reader(f)
for row in reader:
codehex = row[1]
if len(codehex) > 7:
code = eval('"' + ''.join(['\\u' + codehex[i*4:i*4+4] for i in range(len(codehex) // 4)]) + '"')
else:
code = chr(int(codehex, 16))
i = int.from_bytes(code.encode('utf-32le'), 'little')
self.glyph_id[code] = i
self.glyphs[i] = code
with open(os.path.join('data','id_map.csv'),'r') as f:
reader = csv.reader(f)
for row in reader:
code = bytes.fromhex(row[2]).decode()
if code in self.glyph_id:
k = self.glyph_id[code]
else:
i = int.from_bytes(code.encode('utf-32le'), 'little')
self.glyph_id[code] = i
self.glyphs[i] = code
k = i
self.glyph_type[k] = int(row[3])
self.id_count = len(self.glyph_id)
def sub_load(args):
exe = os.path.join('data','load_font','load_font.exe')
if not os.path.exists(exe):
exe = os.path.join('data','load_font','load_font')
proc = subprocess.Popen([
exe,
args[0],
'128',
], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ret = {}
for c in args[1]:
if len(c) == 1:
charbuf = c.encode("utf-32-le")
proc.stdin.write(charbuf[:4])
proc.stdin.flush()
result = proc.stdout.read(32)
code = result[:4]
rows = int.from_bytes(result[4:8], 'little')
width = int.from_bytes(result[8:12], 'little')
boundingWidth = int.from_bytes(result[12:16], 'little', signed=True)
boundingHeight = int.from_bytes(result[16:20], 'little', signed=True)
horiBearingX = int.from_bytes(result[20:24], 'little', signed=True)
horiBearingY = int.from_bytes(result[24:28], 'little', signed=True)
horiAdvance = int.from_bytes(result[28:32], 'little', signed=True)
if rows * width == 0:
continue
assert(charbuf == code)
boundingWidth = boundingWidth / 64
boundingHeight = boundingHeight / 64
horiBearingX = horiBearingX / 64
horiBearingY = horiBearingY / 64
horiAdvance = horiAdvance / 64
buffer = proc.stdout.read(rows*width)
img = np.frombuffer(buffer, dtype='ubyte').reshape(rows,width)
value = {
'horizontal': {
'rows': rows,
'width': width,
'boundingWidth': boundingWidth,
'boundingHeight': boundingHeight,
'horiBearingX': horiBearingX,
'horiBearingY': horiBearingY,
'horiAdvance': horiAdvance,
'image': img,
}
}
result = proc.stdout.read(28)
rows = int.from_bytes(result[:4], 'little')
width = int.from_bytes(result[4:8], 'little')
boundingWidth = int.from_bytes(result[8:12], 'little', signed=True)
boundingHeight = int.from_bytes(result[12:16], 'little', signed=True)
vertBearingX = int.from_bytes(result[16:20], 'little', signed=True)
vertBearingY = int.from_bytes(result[20:24], 'little', signed=True)
vertAdvance = int.from_bytes(result[24:28], 'little', signed=True)
boundingWidth = boundingWidth / 64
boundingHeight = boundingHeight / 64
vertBearingX = vertBearingX / 64
vertBearingY = vertBearingY / 64
vertAdvance = vertAdvance / 64
buffer = proc.stdout.read(rows*width)
img = np.frombuffer(buffer, dtype='ubyte').reshape(rows,width)
value['vertical'] = {
'rows': rows,
'width': width,
'boundingWidth': boundingWidth,
'boundingHeight': boundingHeight,
'vertBearingX': vertBearingX,
'vertBearingY': vertBearingY,
'vertAdvance': vertAdvance,
'image': img,
}
ret[(args[0],c)] = value
else:
pass
proc.stdin.close()
return ret
def sub_load_image(path):
dirnames = glob.glob(os.path.join(path, '*'))
ret = {}
for d in dirnames:
c_code = os.path.basename(d)
char = str(bytes.fromhex(c_code), 'utf-8')
count = 0
for f in glob.glob(os.path.join(d, '*.png')):
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%count,char)] = {
'horizontal': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'horiBearingX': horiBearingX,
'horiBearingY': horiBearingY,
'horiAdvance': 96.0,
'image': img,
},
'vertical': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': horiBearingX,
'vertBearingY': horiBearingY,
'vertAdvance': 96.0,
'image': img,
}
}
count += 1
vert_imgs = glob.glob(os.path.join(d, 'vert', '*.png'))
if 0 < len(vert_imgs) <= count:
for i in range(count):
f = vert_imgs[i % len(vert_imgs)]
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%i,char)]['vertical'] = {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': horiBearingX,
'vertBearingY': horiBearingY,
'vertAdvance': 96.0,
'image': img,
}
elif 0 < len(vert_imgs):
vcount = 0
for f in vert_imgs:
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%vcount,char)] = {
'horizontal': ret[('hand%06d'%(vcount % count),char)]['horizontal'],
'vertical': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': vertBearingY,
'vertBearingY': vertBearingX,
'vertAdvance': 96.0,
'image': img,
}
}
vcount += 1
return ret
def gaussian_kernel(kernlen=7, xstd=1., ystd=1.):
gkern1dx = signal.gaussian(kernlen, std=xstd).reshape(kernlen, 1)
gkern1dy = signal.gaussian(kernlen, std=ystd).reshape(kernlen, 1)
gkern2d = np.outer(gkern1dy, gkern1dx)
return gkern2d
def apply_random_filter(images):
p = np.random.uniform(0., 1.)
if p < 0.25:
sigma = np.random.uniform(0., 1.75)
return gaussian_filter(images, sigma=sigma)
if p < 0.5:
sigma = np.random.uniform(0., 6.)
gauss = gaussian_filter(images, sigma=sigma)
gain = np.random.uniform(0., 5.)
return (1 + gain) * images - gain * gauss
return images
def is_Font_match(font, target):
if target.startswith('hand'):
return font.startswith('hand')
else:
return font == target
class FontData(BaseData):
def __init__(self):
super().__init__()
self.img_cache = {}
print('loading handwrite image')
self.img_cache.update(sub_load_image(os.path.join('data','handwritten')))
print('loading enfont')
enfont_files = sorted(glob.glob(os.path.join('data','enfont','*.ttf')) + glob.glob(os.path.join('data','enfont','*.otf')))
en_glyphs = [self.glyphs[key] for key in self.glyphs.keys() if self.glyph_type.get(key,-1) in [0,1,2,6]]
items = [(f, en_glyphs) for f in enfont_files]
total = len(enfont_files)
with Pool() as pool:
progress = tf.keras.utils.Progbar(total, unit_name='item')
dicts = pool.imap_unordered(sub_load, items)
for dictitem in dicts:
self.img_cache.update(dictitem)
progress.add(1)
print('loading jpfont')
jpfont_files = sorted(glob.glob(os.path.join('data','jpfont','*.ttf')) + glob.glob(os.path.join('data','jpfont','*.otf')))
items = [(f, list(self.glyphs.values())) for f in jpfont_files]
total = len(jpfont_files)
with Pool() as pool:
progress = tf.keras.utils.Progbar(total, unit_name='item')
dicts = pool.imap_unordered(sub_load, items)
for dictitem in dicts:
self.img_cache.update(dictitem)
progress.add(1)
type_count_max = max([self.glyph_type[k] for k in self.glyph_type]) + 1
for key in self.img_cache:
i = self.glyph_id[key[1]]
if i not in self.glyph_type:
self.glyph_type[i] = type_count_max
type_count_max = max([self.glyph_type[k] for k in self.glyph_type]) + 1
gtype_count = [0 for _ in range(type_count_max)]
type_count = [0 for _ in range(type_count_max)]
for key in self.img_cache:
t = self.glyph_type[self.glyph_id[key[1]]]
type_count[t] += 1
for k in self.glyph_type:
gtype_count[self.glyph_type[k]] += 1
self.image_keys = list(self.img_cache.keys())
self.test_keys = self.get_test_keys()
self.train_keys = self.get_train_keys()
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1, 1.0]
self.prob_map = [p/t for p,t in zip(self.prob_map, type_count)]
self.random_probs_train = [self.prob_map[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.random_probs_test = [self.prob_map[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_kanji = [0, 0, 0, 0, 0, 1.0, 0, 0, 1.0, 1.0, 0.5, 0]
self.prob_map_kanji = [p/t for p,t in zip(self.prob_map_kanji, type_count)]
self.kanji_probs_train = [self.prob_map_kanji[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.kanji_probs_test = [self.prob_map_kanji[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_num = [1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_num = [p/t for p,t in zip(self.prob_map_num, type_count)]
self.num_probs_train = [self.prob_map_num[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.num_probs_test = [self.prob_map_num[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_alpha = [0, 1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_alpha = [p/t for p,t in zip(self.prob_map_alpha, type_count)]
self.alpha_probs_train = [self.prob_map_alpha[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.alpha_probs_test = [self.prob_map_alpha[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_hira = [0, 0, 0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_hira = [p/t for p,t in zip(self.prob_map_hira, type_count)]
self.hira_probs_train = [self.prob_map_hira[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.hira_probs_test = [self.prob_map_hira[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
self.train_keys_num = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 0]
self.train_num_fonts = list(set([key[0] for key in self.train_keys_num]))
self.test_keys_num = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 0]
self.test_num_fonts = list(set([key[0] for key in self.test_keys_num]))
self.train_keys_capital = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 1]
self.train_capital_fonts = list(set([key[0] for key in self.train_keys_capital]))
self.test_keys_capital = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 1]
self.test_capital_fonts = list(set([key[0] for key in self.test_keys_capital]))
self.train_keys_small = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 2]
self.train_small_fonts = list(set([key[0] for key in self.train_keys_small]))
self.test_keys_small = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 2]
self.test_small_fonts = list(set([key[0] for key in self.test_keys_small]))
self.train_keys_alpha = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [0,1,2,6]]
self.train_alpha_fonts = list(set([key[0] for key in self.train_keys_alpha]))
self.test_keys_alpha = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [0,1,2,6]]
self.test_alpha_fonts = list(set([key[0] for key in self.test_keys_alpha]))
self.train_keys_jp = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4,5,7,8,9]]
self.test_keys_jp = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4,5,7,8,9]]
self.train_jp_fonts = list(set([key[0] for key in self.train_keys_jp]))
p_sum = sum([0 if '.' in f else 1 for f in self.train_jp_fonts])
self.train_jp_fonts_p = [1. if '.' in f else 1/p_sum for f in self.train_jp_fonts]
self.test_jp_fonts = list(set([key[0] for key in self.test_keys_jp]))
p_sum = sum([0 if '.' in f else 1 for f in self.test_jp_fonts])
self.test_jp_fonts_p = [1. if '.' in f else 1/p_sum for f in self.test_jp_fonts]
self.train_keys_hira = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4]]
self.test_keys_hira = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4]]
self.train_hira_fonts = list(set([key[0] for key in self.train_keys_hira]))
p_sum = sum([0 if '.' in f else 1 for f in self.train_hira_fonts])
self.train_hira_fonts_p = [1. if '.' in f else 1/p_sum for f in self.train_hira_fonts]
self.test_hira_fonts = list(set([key[0] for key in self.test_keys_hira]))
p_sum = sum([0 if '.' in f else 1 for f in self.test_hira_fonts])
self.test_hira_fonts_p = [1. if '.' in f else 1/p_sum for f in self.test_hira_fonts]
self.train_keys_jpnum = [x for x in self.train_keys if (self.glyph_type[self.glyph_id[x[1]]] in [0,3,4,5,7]) and (x[0] in self.train_jp_fonts)]
self.test_keys_jpnum = [x for x in self.test_keys if (self.glyph_type[self.glyph_id[x[1]]] in [0,3,4,5,7]) and (x[0] in self.test_jp_fonts)]
self.train_jpnum_fonts = list(set([key[0] for key in self.train_keys_jpnum]))
self.train_jpnum_fonts_p = [1. if '.' in f else 0. for f in self.train_jpnum_fonts]
self.test_jpnum_fonts = list(set([key[0] for key in self.test_keys_jpnum]))
self.test_jpnum_fonts_p = [1. if '.' in f else 0. for f in self.test_jpnum_fonts]
self.prob_map_clustering = [
gtype_count[0] / type_count[0],
gtype_count[1] / type_count[1],
gtype_count[2] / type_count[2],
gtype_count[3] / type_count[3],
gtype_count[4] / type_count[4],
gtype_count[5] / type_count[5],
gtype_count[6] / type_count[6],
0.,
0.,
0.,
0.,
0.
]
self.random_background = glob.glob(os.path.join('data','background','*'))
self.max_std = 8.0
self.min_ker = 4
def get_test_keys(self):
def fontname(fontpath):
return os.path.splitext(os.path.basename(fontpath))[0]
keys = self.image_keys
test_keys = [k for k in keys if fontname(k[0]).startswith('Noto')]
return test_keys
def get_train_keys(self):
def fontname(fontpath):
return os.path.splitext(os.path.basename(fontpath))[0]
keys = self.image_keys
train_keys = [k for k in keys if not fontname(k[0]).startswith('Noto')]
return train_keys
def load_background_images(self):
def remove_transparency(im, bg_colour=(255, 255, 255)):
# Only process if image has transparency (http://stackoverflow.com/a/1963146)
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
# Need to convert to RGBA if LA format due to a bug in PIL (http://stackoverflow.com/a/1963146)
alpha = im.convert('RGBA').getchannel('A')
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
# (http://stackoverflow.com/a/8720632 and http://stackoverflow.com/a/9459208)
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
return bg
else:
return im
im_file = random.choice(self.random_background)
im = Image.open(im_file)
im = remove_transparency(im).convert('RGB')
scale_min = max(width / im.width, height / im.height)
scale_max = max(scale_min + 0.5, 1.5)
s = np.random.uniform(scale_min, scale_max)
im = im.resize((int(im.width * s)+1, int(im.height * s)+1))
x1 = np.random.randint(0, im.width - width)
y1 = np.random.randint(0, im.height - height)
im_crop = im.crop((x1, y1, x1 + width, y1 + height))
img = np.asarray(im_crop).astype(np.float32)
img = img / 128. - 1.
if np.random.uniform() < 0.5:
img = img[::-1,:,:]
if np.random.uniform() < 0.5:
img = img[:,::-1,:]
brightness = np.random.uniform(-1.0, 1.0)
brightness = np.array([brightness,brightness,brightness])
img += brightness[None,None,:]
contrast = np.random.uniform(0.2, 1.8)
contrast = np.array([contrast,contrast,contrast])
img = img * contrast[None,None,:]
img = np.clip(img, -1.0, 1.0)
return img
def tateyokotext_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 16
max_pixel = 100
text_size = random.randint(min_pixel, max_pixel)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
probs2 = [1. if is_Font_match(key[0], select_font) and self.glyph_type[self.glyph_id[key[1]]] == 0 else 0. for key in keys]
selection2 = [key for key in random.choices(keys, k=max_count*2, weights=probs2)]
base_line = width - text_size // 2
line_space = int(text_size * random.uniform(1.05, 2.0))
line_start = 0
line_end = 0
isnum = -1
i = 0
for key in selection:
if isnum < 0 or isnum > 1:
if np.random.uniform() < 0.1:
isnum = 0
else:
isnum = -1
if isnum < 0:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
horiBearingX = 0
else:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
key = selection2[i]
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
vertBearingX = -text_size * 0.5
vertBearingY = 0
vertAdvance = text_size
if line_end + vertAdvance >= height:
draw.line(((base_line // scale, line_start // scale),
(base_line // scale, line_end // scale)), fill=255, width=3)
base_line -= line_space
if base_line - text_size / 2 < 0:
break
line_start = 0
line_end = 0
if isnum >= 0:
t = (line_end + vertBearingY + text_size * 0.75 - horiBearingY) / height
else:
t = (line_end + vertBearingY) / height
if isnum > 0:
l = (base_line + horiBearingX) / width
else:
l = (base_line + vertBearingX + horiBearingX) / width
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
if isnum > 0:
l = int(np.clip(base_line + horiBearingX, 0, width - w))
else:
l = int(np.clip(base_line + vertBearingX + horiBearingX, 0, width - w))
if isnum >= 0:
t = int(np.clip(line_end + vertBearingY + text_size * 0.75 - horiBearingY, 0, height - h))
else:
t = int(np.clip(line_end + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
if isnum != 0:
line_end += vertAdvance
if isnum >= 0:
isnum += 1
i += 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def yoko_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 16
max_pixel = 100
text_size = random.randint(min_pixel, max_pixel)
line_space = int(text_size * random.uniform(1.05, 2.0))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * width)
break_space = text_size * random.uniform(0.6, 1.5)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
base_line = line_space
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiAdvance = item['horiAdvance'] / 128 * text_size
if temp_lineend + horiAdvance < line_end:
linebuf.append((key, item))
temp_lineend += horiAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((line_start // scale, base_line // scale),
(line_end // scale, base_line // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiAdvance = item['horiAdvance'] / 128 * text_size
l = (line_start + horiBearingX) / width
t = (base_line - horiBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
top = int(np.clip(base_line - horiBearingY, 0, height - h))
left = int(np.clip(line_start + horiBearingX, 0, width - w))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[top:top+h,left:left+w] = np.maximum(
images[top:top+h,left:left+w],
im)
line_start += int(horiAdvance)
base_line += line_space
if base_line + text_size >= height:
if block_no == 0:
sep_end = base_line - line_space
base_line = line_space
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
t = line_space // 2 // scale
b = sep_end // scale
seps[t:b, l-1:l+2] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def tate_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 16
max_pixel = 100
text_size = random.randint(min_pixel, max_pixel)
line_space = int(text_size * random.uniform(1.05, 2.0))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * height)
break_space = text_size * random.uniform(0.6, 1.0)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
base_line = width - line_space + text_size // 2
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(height, height if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
if temp_lineend + vertAdvance < line_end:
linebuf.append((key,item))
temp_lineend += vertAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((base_line // scale, line_start // scale),
(base_line // scale, line_end // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
l = (base_line + vertBearingX) / width
t = (line_start + vertBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
l = int(np.clip(base_line + vertBearingX, 0, width - w))
t = int(np.clip(line_start + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
line_start += int(vertAdvance)
base_line -= line_space
if base_line - text_size / 2 < 0:
if block_no == 0:
sep_end = base_line + line_space
base_line = width - line_space + text_size // 2
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
right = (width - line_space + text_size // 2) // scale
left = sep_end // scale
seps[l-1:l+2, left:right] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def tatefurigana_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 12
max_pixel = 50
text_size = random.randint(min_pixel, max_pixel)
text_size2 = text_size * 2
line_space = int(text_size2 * random.uniform(1.45, 1.7))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * height)
break_space = text_size2 * random.uniform(0.6, 1.0)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
probs2 = [1. if is_Font_match(key[0], select_font) and self.glyph_type[self.glyph_id[key[1]]] in [3,4] else 0. for key in keys]
selection2 = iter([key for key in random.choices(keys, k=max_count*2, weights=probs2)])
base_line = width - line_space + text_size2 // 2
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(height, height if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
vertBearingX = item['vertBearingX'] / 128 * text_size2
vertBearingY = item['vertBearingY'] / 128 * text_size2
vertAdvance = item['vertAdvance'] / 128 * text_size2
if temp_lineend + vertAdvance < line_end:
linebuf.append((key,item))
temp_lineend += vertAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((base_line // scale, line_start // scale),
(base_line // scale, line_end // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
vertBearingX = item['vertBearingX'] / 128 * text_size2
vertBearingY = item['vertBearingY'] / 128 * text_size2
vertAdvance = item['vertAdvance'] / 128 * text_size2
l = (base_line + vertBearingX) / width
t = (line_start + vertBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size2), 1)
h = max(int(item['rows'] / 128 * text_size2), 1)
l = int(np.clip(base_line + vertBearingX, 0, width - w))
t = int(np.clip(line_start + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
line_start += int(vertAdvance)
# ふりがな処理
base_line2 = base_line + text_size2 // 2 + text_size // 2
line_start2 = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
if block_no == 0:
line_start2 += remain
line_end2 = line_start
line_start2p = line_start2
while line_start2 < line_end2:
key2 = next(selection2, None)
if key2 is None:
break
item = self.img_cache[key2]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
if np.random.uniform() < 0.2:
# ここは空ける
if line_start2 != line_start2p:
draw.line(((base_line2 // scale, line_start2p // scale),
(base_line2 // scale, line_start2 // scale)), fill=255, width=3)
line_start2 += int(vertAdvance)
line_start2p = line_start2
continue
# ここはふりがな
l = (base_line2 + vertBearingX) / width
t = (line_start2 + vertBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
l = int(np.clip(base_line2 + vertBearingX, 0, width - w))
t = int(np.clip(line_start2 + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
line_start2 += int(vertAdvance)
if line_start2 != line_start2p:
draw.line(((base_line2 // scale, line_start2p // scale),
(base_line2 // scale, line_start2 // scale)), fill=255, width=3)
base_line -= line_space
if base_line - text_size2 / 2 < 0:
if block_no == 0:
sep_end = base_line + line_space
base_line = width - line_space + text_size2 // 2
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
right = (width - line_space + text_size2 // 2) // scale
left = sep_end // scale
seps[l-1:l+2, left:right] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def yokofurigana_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 12
max_pixel = 50
text_size = random.randint(min_pixel, max_pixel)
text_size2 = text_size * 2
line_space = int(text_size2 * random.uniform(1.45, 1.7))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * width)
break_space = text_size2 * random.uniform(0.6, 1.5)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
probs2 = [1. if is_Font_match(key[0], select_font) and self.glyph_type[self.glyph_id[key[1]]] in [3,4] else 0. for key in keys]
selection2 = iter([key for key in random.choices(keys, k=max_count*2, weights=probs2)])
base_line = line_space
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
horiBearingX = item['horiBearingX'] / 128 * text_size2
horiBearingY = item['horiBearingY'] / 128 * text_size2
horiAdvance = item['horiAdvance'] / 128 * text_size2
if temp_lineend + horiAdvance < line_end:
linebuf.append((key, item))
temp_lineend += horiAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((line_start // scale, base_line // scale),
(line_end // scale, base_line // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
horiBearingX = item['horiBearingX'] / 128 * text_size2
horiBearingY = item['horiBearingY'] / 128 * text_size2
horiAdvance = item['horiAdvance'] / 128 * text_size2
l = (line_start + horiBearingX) / width
t = (base_line - horiBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size2), 1)
h = max(int(item['rows'] / 128 * text_size2), 1)
top = int(np.clip(base_line - horiBearingY, 0, height - h))
left = int(np.clip(line_start + horiBearingX, 0, width - w))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[top:top+h,left:left+w] = np.maximum(
images[top:top+h,left:left+w],
im)
line_start += int(horiAdvance)
# ふりがな処理
base_line2 = base_line - text_size2
line_start2 = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
if block_no == 0:
line_start2 += remain
line_end2 = line_start
line_start2p = line_start2
while line_start2 < line_end2:
key2 = next(selection2, None)
if key2 is None:
break
item = self.img_cache[key2]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiAdvance = item['horiAdvance'] / 128 * text_size
if np.random.uniform() < 0.2:
# ここは空ける
if line_start2 != line_start2p:
draw.line(((line_start2p // scale, base_line // scale),
(line_start // scale, base_line // scale)), fill=255, width=3)
line_start2 += int(horiAdvance)
line_start2p = line_start2
continue
# ここはふりがな
l = (line_start2 + horiBearingX) / width
t = (base_line2 - horiBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
top = int(np.clip(base_line2 - horiBearingY, 0, height - h))
left = int(np.clip(line_start2 + horiBearingX, 0, width - w))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[top:top+h,left:left+w] = np.maximum(
images[top:top+h,left:left+w],
im)
line_start2 += int(horiAdvance)
if line_start2 != line_start2p:
draw.line(((line_start2p // scale, base_line // scale),
(line_start // scale, base_line // scale)), fill=255, width=3)
base_line += line_space
if base_line + text_size2 >= height:
if block_no == 0:
sep_end = base_line - line_space
base_line = line_space
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
t = line_space // 2 // scale
b = sep_end // scale
seps[t:b, l-1:l+2] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def null_images(self):
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
labels = np.stack([keymap, xsizes, ysizes, offsetx, offsety, lines, seps], -1)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, len(self.random_background) > 0)
def load_random_line(self):
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
seps = Image.fromarray(seps)
draw1 = ImageDraw.Draw(seps)
images = Image.fromarray(images)
draw2 = ImageDraw.Draw(images)
linew = int(np.clip(np.random.uniform() * 20, scale, 20))
x1 = np.random.normal() * width / 2 + width / 2
y1 = np.random.normal() * height / 2 + height / 2
x2 = np.random.normal() * width / 2 + width / 2
y2 = np.random.normal() * height / 2 + height / 2
draw1.line(((x1 // scale, y1 // scale), (x2 // scale, y2 // scale)), fill=255, width=linew//scale+1)
draw2.line(((x1, y1), (x2, y2)), fill=255, width=linew)
labels = np.stack([keymap, xsizes, ysizes, offsetx, offsety, lines, np.asarray(seps) / 255.], -1)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
images = np.asarray(images) / 255.
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, len(self.random_background) > 0)
def load_images_random(self, keys, probs):
max_count = 64
angle_max = 15.0
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
selection = [key for key in random.choices(keys, k=np.random.randint(2,max_count), weights=probs)]
i = 0
boxprev = np.zeros([0, 4])
if random.random() < 0.1:
margin = 20
line_c = random.randint(0,3)
lw = random.randint(2, 10)
if line_c == 0:
x = random.randrange(width // 2, width)
y = random.randrange(0, height - lw)
px = x // scale
py = y // scale
seps[py:py+lw//scale+1, :px] = 1
images[y:y+lw, :x] = 255
boxprev = np.concatenate([boxprev, [[0, (x + margin)/width, (y - margin)/height, (y+lw + margin)/height]]])
elif line_c == 1:
x = random.randrange(0, width // 2)
y = random.randrange(0, height - lw)
px = x // scale
py = y // scale
seps[py:py+lw//scale+1, px:] = 1
images[y:y+lw, x:] = 255
boxprev = np.concatenate([boxprev, [[(x - margin)/width, 1, (y - margin)/height, (y+lw + margin)/height]]])
elif line_c == 2:
y = random.randrange(height // 2, height)
x = random.randrange(0, width - lw)
px = x // scale
py = y // scale
seps[:py, px:px+lw//scale+1] = 1
images[:y, x:x+lw] = 255
boxprev = np.concatenate([boxprev, [[(x - margin)/width, (x+lw + margin)/width, 0, (y + margin)/height]]])
elif line_c == 3:
y = random.randrange(0, height // 2)
x = random.randrange(0, width - lw)
px = x // scale
py = y // scale
seps[py:, px:px+lw//scale+1] = 1
images[y:, x:x+lw] = 255
boxprev = np.concatenate([boxprev, [[(x - margin)/width, (x+lw + margin)/width, (y - margin)/height, 1]]])
if random.random() < 0.5:
min_pixel = 20
max_pixel = width
else:
min_pixel = 20
max_pixel = width // 3
for key in selection:
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
if random.random() < 0.5:
tile_size = random.randint(min_pixel, max_pixel)
else:
tile_size = int(np.exp(random.uniform(np.log(min_pixel), np.log(max_pixel))))
w = item['width'] / 128 * tile_size
h = item['rows'] / 128 * tile_size
aspects = np.clip(np.random.normal() * 0.1 + 1.0, 0.75, 1.25)
if random.random() < 0.5:
aspects = 1.0 / aspects
w *= aspects
h /= aspects
tile_left = random.randint(0, int(width - tile_size))
tile_top = random.randint(0, int(height - tile_size))
if tile_top + h >= height or tile_left + w >= width:
continue
left = tile_left / width
top = tile_top / height
w = w / width
h = h / height
cx = left + w/2
cy = top + h/2
if np.random.uniform() < 0.1:
invert = True
x1 = cx - w/2 * 1.25
x2 = cx + w/2 * 1.25
y1 = cy - h/2 * 1.25
y2 = cy + h/2 * 1.25
inter_xmin = np.maximum(boxprev[:,0], x1)
inter_ymin = np.maximum(boxprev[:,2], y1)
inter_xmax = np.minimum(boxprev[:,1], x2)
inter_ymax = np.minimum(boxprev[:,3], y2)
else:
invert = False
inter_xmin = np.maximum(boxprev[:,0], cx - w/2 * 1.1)
inter_ymin = np.maximum(boxprev[:,2], cy - h/2 * 1.1)
inter_xmax = np.minimum(boxprev[:,1], cx + w/2 * 1.1)
inter_ymax = np.minimum(boxprev[:,3], cy + h/2 * 1.1)
inter_w = np.maximum(inter_xmax - inter_xmin, 0.)
inter_h = np.maximum(inter_ymax - inter_ymin, 0.)
inter_vol = inter_w * inter_h
if np.any(inter_vol > 0):
continue
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
if invert:
boxprev = np.concatenate([boxprev, [[x1, x2, y1, y2]]])
else:
boxprev = np.concatenate([boxprev, [[cx - w/2, cx + w/2, cy - h/2, cy + h/2]]])
w = max(int(item['width'] / 128 * tile_size * aspects), 1)
h = max(int(item['rows'] / 128 * tile_size / aspects), 1)
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[tile_top:tile_top+h,tile_left:tile_left+w] = np.maximum(
images[tile_top:tile_top+h,tile_left:tile_left+w],
im)
if invert:
x1 = int(x1 * width)
x2 = int(x2 * width)
y1 = int(y1 * height)
y2 = int(y2 * height)
crop = images[y1:y2,x1:x2]
images[y1:y2,x1:x2] = 255 - crop
i += 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, lines, sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, len(self.random_background) > 0)
def load_images_fill(self, keys, fonts):
max_count = 64
angle_max = 15.0
min_pixel = 24
max_pixel = 200
if random.random() < 0.5:
tile_size = random.randint(min_pixel, max_pixel)
else:
tile_size = int(np.exp(random.uniform(np.log(min_pixel), np.log(max_pixel))))
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
tile_left = 0
tile_base = 0
angle = angle_max * np.random.normal() / 180 * np.pi
if np.random.rand() < 0.5:
angle -= np.pi / 2
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
aspects = np.clip(np.random.normal() * 0.1 + 1.0, 0.75, 1.25)
if random.random() < 0.5:
aspects = 1.0 / aspects
select_font = random.choice(fonts)
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=np.random.randint(2,max_count), weights=probs)]
boxprev = np.zeros([0, 4])
if random.random() < 0.1:
margin = 20
line_c = random.randint(0,3)
lw = random.randint(2, 10)
if line_c == 0:
x = random.randrange(width // 2, width)
y = random.randrange(0, height - lw)
px = x // scale
py = y // scale
seps[py:py+lw//scale+1, :px] = 1
images[y:y+lw, :x] = 255
boxprev = np.concatenate([boxprev, [[0, (x + margin)/width, (y - margin)/height, (y+lw + margin)/height]]])
elif line_c == 1:
x = random.randrange(0, width // 2)
y = random.randrange(0, height - lw)
px = x // scale
py = y // scale
seps[py:py+lw//scale+1, px:] = 1
images[y:y+lw, x:] = 255
boxprev = np.concatenate([boxprev, [[(x - margin)/width, 1, (y - margin)/height, (y+lw + margin)/height]]])
elif line_c == 2:
y = random.randrange(height // 2, height)
x = random.randrange(0, width - lw)
px = x // scale
py = y // scale
seps[:py, px:px+lw//scale+1] = 1
images[:y, x:x+lw] = 255
boxprev = np.concatenate([boxprev, [[(x - margin)/width, (x+lw + margin)/width, 0, (y + margin)/height]]])
elif line_c == 3:
y = random.randrange(0, height // 2)
x = random.randrange(0, width - lw)
px = x // scale
py = y // scale
seps[py:, px:px+lw//scale+1] = 1
images[y:, x:x+lw] = 255
boxprev = np.concatenate([boxprev, [[(x - margin)/width, (x+lw + margin)/width, (y - margin)/height, 1]]])
cont_n = random.randint(1,21)
cut_p = 10 ** (np.log10(0.5) / cont_n)
i = 0
replace = True
prev = 0
prev_center = None
for key in selection:
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * tile_size
h = item['rows'] / 128 * tile_size
w *= aspects
h /= aspects
horiBearingX = item['horiBearingX'] / 128 * tile_size * aspects
horiBearingY = item['horiBearingY'] / 128 * tile_size / aspects
horiAdvance = item['horiAdvance'] / 128 * tile_size * aspects
if replace:
invert = np.random.uniform() < 0.1
l = max(0,-int(horiBearingX))
tile_left = random.randint(l,
int(width - tile_size)) if int(width - tile_size) > l else l
tile_base = random.randint(tile_size,
int(height - tile_size)) if int(height - tile_size) > tile_size else tile_size
if tile_base - horiBearingY < 0 or tile_base - horiBearingY + h >= height or tile_left + horiBearingX < 0 or tile_left + horiBearingX + w >= width:
replace = True
prev = 0
prev_center = None
continue
l = (tile_left + horiBearingX) / width
t = (tile_base - horiBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
if invert:
x1 = tile_left / width
x2 = (tile_left + horiAdvance) / width
y1 = cy - h/2 * 1.25
y2 = cy + h/2 * 1.25
if prev >= 0:
inter_xmin = np.maximum(boxprev[:,0], x1)
inter_ymin = np.maximum(boxprev[:,2], y1)
inter_xmax = np.minimum(boxprev[:,1], x2)
inter_ymax = np.minimum(boxprev[:,3], y2)
else:
# 連続してレンダリングする場合は、前の文字にめり込むことがあるので判定しない
inter_xmin = np.maximum(boxprev[:prev,0], x1)
inter_ymin = np.maximum(boxprev[:prev,2], y1)
inter_xmax = np.minimum(boxprev[:prev,1], x2)
inter_ymax = np.minimum(boxprev[:prev,3], y2)
else:
if prev >= 0:
inter_xmin = np.maximum(boxprev[:,0], cx - w/2)
inter_ymin = np.maximum(boxprev[:,2], cy - h/2)
inter_xmax = np.minimum(boxprev[:,1], cx + w/2)
inter_ymax = np.minimum(boxprev[:,3], cy + h/2)
else:
# 連続してレンダリングする場合は、前の文字にめり込むことがあるので判定しない
inter_xmin = np.maximum(boxprev[:prev,0], cx - w/2)
inter_ymin = np.maximum(boxprev[:prev,2], cy - h/2)
inter_xmax = np.minimum(boxprev[:prev,1], cx + w/2)
inter_ymax = np.minimum(boxprev[:prev,3], cy + h/2)
inter_w = np.maximum(inter_xmax - inter_xmin, 0.)
inter_h = np.maximum(inter_ymax - inter_ymin, 0.)
inter_vol = inter_w * inter_h
if np.any(inter_vol > 0):
replace = True
prev = 0
prev_center = None
continue
if prev < 0:
draw.line((prev_center, (cx * width // scale, tile_base // scale)), fill=255, width=3)
prev_center = (cx * width // scale, tile_base // scale)
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
if angle < -np.pi / 4:
offset_y = (cx * width % scale) / width * np.cos(-(angle + np.pi / 2))
offset_x = -(cy * height % scale) / height * np.sin(-(angle + np.pi / 2) + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale + (np.arange(size_ymin, size_ymax) - center_y) * np.sin(-(angle + np.pi / 2) + np.pi / 2)
offset_y = offset_y / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(-(angle + np.pi / 2))
offset_y = offset_y[np.newaxis,...] + np.linspace(-(size_ymax-size_ymin) * np.sin(-(angle + np.pi / 2)) / 2, (size_ymax-size_ymin) * np.sin(-(angle + np.pi / 2)) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_x = offset_x[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(-(angle + np.pi/ 2) + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(-(angle + np.pi/ 2) + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
else:
offset_x = (cx * width % scale) / width * np.sin(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
if invert:
boxprev = np.concatenate([boxprev, [[x1, x2, y1, y2]]])
else:
boxprev = np.concatenate([boxprev, [[cx - w/2, cx + w/2, cy - h/2, cy + h/2]]])
w = max(int(item['width'] / 128 * tile_size * aspects), 1)
h = max(int(item['rows'] / 128 * tile_size / aspects), 1)
tile_top = int(tile_base - horiBearingY)
l = int(tile_left + horiBearingX)
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
if invert:
im_inv = 255 - im
x1 = int(x1 * width)
x2 = int(x2 * width)
y1 = int(y1 * height)
y2 = int(y2 * height)
images[y1:y2,x1:x2] = 255
images[tile_top:tile_top+h,l:l+w] = np.minimum(
images[tile_top:tile_top+h,l:l+w],
im_inv)
else:
images[tile_top:tile_top+h,l:l+w] = np.maximum(
images[tile_top:tile_top+h,l:l+w],
im)
i += 1
if random.random() > cut_p:
replace = True
prev_center = None
prev = 0
else:
tile_left += int(horiAdvance)
replace = False
if tile_left >= width - tile_size:
replace = True
prev_center = None
prev = 0
else:
prev -= 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, len(self.random_background) > 0)
def load_images_randomline(self, keys, probs):
max_count = 64
angle_max = 15.0
min_pixel = 20
max_pixel = 250
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
selection = [key for key in random.choices(keys, k=np.random.randint(2,max_count), weights=probs)]
boxprev = np.zeros([0, 4])
if random.random() < 0.1:
margin = 20
line_c = random.randint(0,3)
lw = random.randint(2, 10)
if line_c == 0:
x = random.randrange(width // 2, width)
y = random.randrange(0, height - lw)
px = x // scale
py = y // scale
seps[py:py+lw//scale+1, :px] = 1
images[y:y+lw, :x] = 255
boxprev = np.concatenate([boxprev, [[0, (x + margin)/width, (y - margin)/height, (y+lw + margin)/height]]])
elif line_c == 1:
x = random.randrange(0, width // 2)
y = random.randrange(0, height - lw)
px = x // scale
py = y // scale
seps[py:py+lw//scale+1, px:] = 1
images[y:y+lw, x:] = 255
boxprev = np.concatenate([boxprev, [[(x - margin)/width, 1, (y - margin)/height, (y+lw + margin)/height]]])
elif line_c == 2:
y = random.randrange(height // 2, height)
x = random.randrange(0, width - lw)
px = x // scale
py = y // scale
seps[:py, px:px+lw//scale+1] = 1
images[:y, x:x+lw] = 255
boxprev = np.concatenate([boxprev, [[(x - margin)/width, (x+lw + margin)/width, 0, (y + margin)/height]]])
elif line_c == 3:
y = random.randrange(0, height // 2)
x = random.randrange(0, width - lw)
px = x // scale
py = y // scale
seps[py:, px:px+lw//scale+1] = 1
images[y:, x:x+lw] = 255
boxprev = np.concatenate([boxprev, [[(x - margin)/width, (x+lw + margin)/width, (y - margin)/height, 1]]])
i = 0
replace = True
prev = 0
prev_center = None
for key in selection:
if replace:
invert = np.random.uniform() < 0.1
if self.glyph_type.get(self.glyph_id[key[1]],11) in [3,4,5,8,9,10,11]:
direction = random.randint(0,1) * 2
else:
p = random.random()
if p < 0.4:
direction = 0
elif p < 0.8:
direction = 2
else:
direction = 1
aspects = np.clip(np.random.normal() * 0.1 + 1.0, 0.75, 1.25)
if random.random() < 0.5:
aspects = 1.0 / aspects
if direction < 2:
item = self.img_cache[key]['horizontal']
else:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
if self.glyph_type.get(self.glyph_id[key[1]],11) in [3,4,5,8,9,10,11]:
if direction == 1:
continue
if replace:
if random.random() < 0.5:
tile_size = random.randint(min_pixel, max_pixel)
else:
tile_size = int(np.exp(random.uniform(np.log(min_pixel), np.log(max_pixel))))
if direction < 2:
w = item['width'] / 128 * tile_size
h = item['rows'] / 128 * tile_size
w *= aspects
h /= aspects
horiBearingX = item['horiBearingX'] / 128 * tile_size * aspects
horiBearingY = item['horiBearingY'] / 128 * tile_size / aspects
horiAdvance = item['horiAdvance'] / 128 * tile_size * aspects
else:
w = item['width'] / 128 * tile_size
h = item['rows'] / 128 * tile_size
w *= aspects
h /= aspects
vertBearingX = item['vertBearingX'] / 128 * tile_size * aspects
vertBearingY = item['vertBearingY'] / 128 * tile_size / aspects
vertAdvance = item['vertAdvance'] / 128 * tile_size / aspects
if direction == 0:
if replace:
l = max(0,-int(horiBearingX))
tile_left = random.randint(l,
int(width - tile_size)) if int(width - tile_size) > l else l
tile_base = random.randint(tile_size,
int(height - tile_size)) if int(height - tile_size) > tile_size else tile_size
if tile_base - horiBearingY < 0 or tile_base - horiBearingY + h >= height or tile_left + horiBearingX < 0 or tile_left + horiBearingX + w >= width:
replace = True
prev_center = None
prev = 0
continue
l = (tile_left + horiBearingX) / width
t = (tile_base - horiBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
if invert:
x1 = tile_left / width
x2 = (tile_left + horiAdvance) / width
y1 = cy - h/2 * 1.25
y2 = cy + h/2 * 1.25
if prev >= 0:
inter_xmin = np.maximum(boxprev[:,0], x1)
inter_ymin = np.maximum(boxprev[:,2], y1)
inter_xmax = np.minimum(boxprev[:,1], x2)
inter_ymax = np.minimum(boxprev[:,3], y2)
else:
# 連続してレンダリングする場合は、前の文字にめり込むことがあるので判定しない
inter_xmin = np.maximum(boxprev[:prev,0], x1)
inter_ymin = np.maximum(boxprev[:prev,2], y1)
inter_xmax = np.minimum(boxprev[:prev,1], x2)
inter_ymax = np.minimum(boxprev[:prev,3], y2)
else:
if prev >= 0:
inter_xmin = np.maximum(boxprev[:,0], cx - w/2 * 1.2)
inter_ymin = np.maximum(boxprev[:,2], cy - h/2 * 1.2)
inter_xmax = np.minimum(boxprev[:,1], cx + w/2 * 1.2)
inter_ymax = np.minimum(boxprev[:,3], cy + h/2 * 1.2)
else:
# 連続してレンダリングする場合は、前の文字にめり込むことがあるので判定しない
inter_xmin = np.maximum(boxprev[:prev,0], cx - w/2 * 1.2)
inter_ymin = np.maximum(boxprev[:prev,2], cy - h/2 * 1.2)
inter_xmax = np.minimum(boxprev[:prev,1], cx + w/2 * 1.2)
inter_ymax = np.minimum(boxprev[:prev,3], cy + h/2 * 1.2)
inter_w = np.maximum(inter_xmax - inter_xmin, 0.)
inter_h = np.maximum(inter_ymax - inter_ymin, 0.)
inter_vol = inter_w * inter_h
if np.any(inter_vol > 0):
replace = True
prev_center = None
prev = 0
continue
if prev < 0:
draw.line((prev_center, (cx * width // scale, tile_base // scale)), fill=255, width=3)
prev_center = (cx * width // scale, tile_base // scale)
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(
|
np.arange(size_xmin, size_xmax)
|
numpy.arange
|
import copy, os
import numpy as np
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from options import args_parser
from Update import LocalUpdate
from Fed import *
import random, time, pickle, math
import pdb, math
import scipy.io
# parse args
args = args_parser()
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
if args.arch == 'resnet18':
from resnet import *
elif args.arch == 'shufflenet':
from shufflenetv2 import *
def cifar_noniid(dataset, num_users):
"""
Sample non-I.I.D client data from cifar dataset
"""
lenRandom = 40000
num_items = int(lenRandom/num_users)
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
for ii in range(num_users):
dict_users[ii] = set(np.random.choice(all_idxs, num_items, replace=False))
all_idxs = list(set(all_idxs) - dict_users[ii])
labels = np.array(dataset.targets)
labels = labels[all_idxs]
# sort labels
idxs = np.arange(len(labels))
idxs_labels =
|
np.vstack((idxs, labels))
|
numpy.vstack
|
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
"""
Unitary tests for bigfish.stack.postprocess module.
"""
import pytest
import numpy as np
import bigfish.stack as stack
from numpy.testing import assert_array_equal
# TODO add test bigfish.stack.extract_cell
# TODO add test bigfish.stack.extract_spots_from_frame
# TODO add test bigfish.stack.center_mask_coord
# TODO add test bigfish.stack.from_boundaries_to_surface
# TODO add test bigfish.stack.from_surface_to_boundaries
# TODO add test bigfish.stack.from_binary_to_coord
# TODO add test bigfish.stack.complete_coord_boundaries
# TODO add test bigfish.stack.from_coord_to_frame
# TODO add test bigfish.stack.from_coord_to_surface
@pytest.mark.parametrize("ndim", [2, 3])
@pytest.mark.parametrize("mask_dtype", [
np.uint8, np.uint16, np.int64, bool])
@pytest.mark.parametrize("spot_dtype", [
np.int64, np.float64])
def test_identify_objects_in_region(ndim, mask_dtype, spot_dtype):
# simulate mask and coordinates
mask = np.zeros((10, 10), dtype=mask_dtype)
mask[1:4, 1:5] = np.ones((3, 4), dtype=mask_dtype)
spots_in = [[2, 1, 1], [5, 2, 2], [2, 3, 1], [9, 3, 4]]
spots_in = np.array(spots_in, dtype=spot_dtype)
spots_out = [[1, 0, 0], [3, 7, 2], [2, 1, 8], [7, 8, 8]]
spots_out = np.array(spots_out, dtype=spot_dtype)
if ndim == 2:
spots_in = spots_in[:, 1:]
spots_out = spots_out[:, 1:]
spots = np.concatenate((spots_in, spots_out))
# test
spots_in_, spots_out_ = stack.identify_objects_in_region(mask, spots, ndim)
assert_array_equal(spots_in_, spots_in)
assert_array_equal(spots_out_, spots_out)
assert spots_in_.dtype == spots_in.dtype
assert spots_out_.dtype == spots_out.dtype
@pytest.mark.parametrize("ndim", [2, 3])
@pytest.mark.parametrize("mask_dtype", [
np.uint8, np.uint16, np.int64, bool])
@pytest.mark.parametrize("spot_dtype", [
np.int64, np.float64])
def test_remove_transcription_site(ndim, mask_dtype, spot_dtype):
# simulate mask and coordinates
nuc_mask = np.zeros((10, 10), dtype=mask_dtype)
nuc_mask[1:4, 1:5] =
|
np.ones((3, 4), dtype=mask_dtype)
|
numpy.ones
|
"""
Common geometry classes.
Author: <NAME> <<EMAIL>>
"""
import math
import numpy as np
class Ray(object):
"""Ray defined by an origin point and a direction."""
def __init__(self, origin, direction):
"""Create a ray from on origion and direction.
Parameters:
origin: vector3 giving the ray origin.
direction: vector3 giving ray direction.
"""
self.origin = np.array(origin)
self.direction = np.array(direction)
def intersect(self, plane, epsilon=0.00001):
"""
Compute the intersection of a ray and a plane.
Parameters:
plane: Plane to intersect with.
epsilon: tolerance of floating point comparisons.
Return:
vector3 giving position of the ray/plane intersection, or None
if the ray does not intersect the plane.
"""
den = np.dot(self.direction, plane.normal)
if math.fabs(den) < epsilon:
return None
result = (-plane.distance -
|
np.dot(plane.normal, self.origin)
|
numpy.dot
|
import numpy as np
from femsolution import FEMSolution
from helpers import *
from bicycle_wheel import *
from rigidbody import *
EL_RIM = 1
EL_SPOKE = 2
N_RIM = 1
N_HUB = 2
N_REF = 3
class BicycleWheelFEM:
"""Finite-element solver for performing stress analysis bicycle wheels.
Creates a finite-element model from a BicycleWheel object and solves the
linear elasticity equations K*u = f subject to constraints and boundary
conditions.
"""
def get_node_pos(self, node_id):
'Return the [X,Y,Z] position of a node as an NdArray.'
return np.array([self.x_nodes[node_id],
self.y_nodes[node_id],
self.z_nodes[node_id]])
def get_rim_nodes(self):
'Return node IDs of all nodes on the rim.'
return np.where(self.type_nodes == N_RIM)[0]
def get_hub_nodes(self):
'Return node IDs of all nodes on the hub.'
return np.where(self.type_nodes == N_HUB)[0]
def get_rim_elements(self):
'Return element IDs of all rim elements.'
return np.where(self.el_type == EL_RIM)[0]
def get_spoke_elements(self):
'Return element IDs of all hub elements.'
return np.where(self.el_type == EL_SPOKE)[0]
def calc_spoke_stiff(self, el_id, s):
'Calculate stiffness matrix for a single spoke.'
n2 = self.el_n2[el_id]
nip_pt = pol2rect(s.rim_pt) # spoke nipple
hub_pt = pol2rect(s.hub_pt) # hub eyelet
rim_pt = self.get_node_pos(n2) # point on rim centroid
# Beam coordinate system
e1 = hub_pt - nip_pt # tangent vector
l = np.sqrt(e1.dot(e1))
e1 = e1 / l
e2 = np.cross(e1, np.array([0, 0, 1])) # normal vector
e2 = e2 / np.sqrt(e2.dot(e2))
e3 = np.cross(e1, e2) # second normal vector
# axial stiffness (normal)
k_n = s.EA / l
# tension stiffness (transverse). No negative tension-stiffness
k_t = max(0.0, self.el_prestress[el_id] / l)
# bending stiffness (transverse)
# Generally, bending stiffness is negligible. It is only present for
# numerical stability in the case of radial spokes (vanishing torsional
# stiffness).
k_b = 3 * s.EA * (s.diameter**2 / 16) / l**3
# Bar element stiffness matrix (FORCES ONLY) in beam coordinates
k_spoke = np.matrix(np.zeros((12, 12)))
k_spoke[0::6, 0::6] = k_n * np.matrix([[1, -1], [-1, 1]])
k_spoke[1::6, 1::6] = (k_t + k_b) * np.matrix([[1, -1], [-1, 1]])
k_spoke[2::6, 2::6] = (k_t + k_b) * np.matrix([[1, -1], [-1, 1]])
# rotation matrix to global coordinates
Tg = np.matrix(np.zeros((3, 3)))
Tg[:, 0] = e1.reshape((3, 1))
Tg[:, 1] = e2.reshape((3, 1))
Tg[:, 2] = e3.reshape((3, 1))
# Apply rotation matrix to each sub matrix
for i in range(4):
for j in range(4):
k_spoke[3*i:3*(i+1), 3*j:3*(j+1)] = \
Tg * k_spoke[3*i:3*(i+1), 3*j:3*(j+1)] * Tg.T
# Transformation matrices to account for spoke offset
r = nip_pt - rim_pt
Omega_r = skew_symm(r)
# Right-multiply k_spoke by C to transform from u_nip -> u_rim
for i in range(4):
k_spoke[3*i:3*(i+1), 9::] = k_spoke[3*i:3*(i+1), 9::] - \
Omega_r * k_spoke[3*i:3*(i+1), 6:9]
# Left-multiply k_spoke by B to transform from f_nip -> f_rim
for i in range(4):
k_spoke[9::, 3*i:3*(i+1)] = k_spoke[9::, 3*i:3*(i+1)] - \
Omega_r * k_spoke[6:9, 3*i:3*(i+1)]
return k_spoke
def calc_rim_stiff(self, el_id):
'Calculate stiffness matrix for a single rim element.'
n1 = self.el_n1[el_id]
n2 = self.el_n2[el_id]
# For details, see <NAME>, <NAME>,
# Computers and Structures, 4(21), pp. 663-669, 1985.
node1_pos = self.get_node_pos(n1)
node2_pos = self.get_node_pos(n2)
ref = np.array([0, 0, 0]) # reference point at wheel center
d = node2_pos - node1_pos # beam orientation vector
r1 = node1_pos - ref # radial vector to node 1
R = np.sqrt(r1.dot(r1)) # radius of curvature
# angle subtended by arc segment
phi0 = 2*np.arcsin(np.sqrt(d.dot(d)) / (2*R))
# local coordinate system
e1 = d / np.sqrt(d.dot(d)) # radial vector
e3 = np.array([0, 0, -1]) # axial vector
e2 = np.cross(e3, e1) # tangential vector
# Material and section properties
# Beam warping is neglected
A = self.wheel.rim.area
E = self.wheel.rim.young_mod
G = self.wheel.rim.shear_mod
I11 = self.wheel.rim.I11
I22 = self.wheel.rim.I22
I33 = self.wheel.rim.I33
K2 = 0 # shear flexibility constant (0 = Euler-Bernoulli beam)
K3 = 0 # shear flexibility constant (0 = Euler-Bernoulli beam)
# Constants
N = phi0 + np.sin(2*phi0)/2
B = phi0 - np.sin(2*phi0)/2
C = 3*phi0 + np.sin(2*phi0)/2 - 4*np.sin(phi0)
S = 0.75 - np.cos(phi0) + np.cos(2*phi0)/4
F = np.sin(phi0) - phi0
H = np.cos(phi0) - 1
V = 2*np.sin(phi0) - phi0 - np.sin(2*phi0)/2
D = np.cos(2*phi0)/2 - 0.5
# Initialize stiffness matrix
k_r = np.matrix(np.zeros((12, 12)))
# Flexibility matrix for node 1 DOFs
a = np.matrix(np.zeros((6, 6)))
a[0, 0] = R*N/(2*E*A) + K2*R*B/(2*G*A) + C*R**3/(2*E*I33)
a[0, 1] = R*D/(2*E*A) - K2*R*D/(2*G*A) + S*R**3/(E*I33)
a[1, 0] = a[0, 1]
a[1, 1] = B*R/(2*E*A) + K2*R*N/(2*G*A) + B*R**3/(2*E*I33)
a[0, 5] = F*R**2/(E*I33)
a[5, 0] = F*R**2/(E*I33)
a[1, 5] = H*R**2/(E*I33)
a[5, 1] = a[1, 5]
a[2, 2] = K3*R*phi0/(G*A) + C*R**3/(2*G*I11) + B*R**3/(2*E*I22)
a[2, 3] = R**2/2*(B/(E*I22) - V/(G*I11))
a[3, 2] = a[2, 3]
a[2, 4] = R**2/2*(2*S/(G*I11) - D/(E*I22))
a[4, 2] = a[2, 4]
a[3, 3] = R/2*(N/(G*I11) + B/(E*I22))
a[3, 4] = D*R/2*(1/(G*I11) - 1/(E*I22))
a[4, 3] = a[3, 4]
a[4, 4] = R/2*(B/(G*I11) + N/(E*I22))
a[5, 5] = R*phi0/(E*I33)
# Flexibility matrix for node 2 DOFs
b = a.copy()
b[0, 1] = -a[0, 1]
b[1, 0] = -a[1, 0]
b[1, 5] = -a[1, 5]
b[5, 1] = -a[5, 1]
b[2, 4] = -a[2, 4]
b[4, 2] = -a[4, 2]
b[3, 4] = -a[3, 4]
b[4, 3] = -a[4, 3]
# Transformation matrix from node 1 -> node 2
al = np.cos(phi0)
bt = np.sin(phi0)
Tbar = np.matrix([[-al, bt, 0, 0, 0, 0],
[-bt, -al, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0],
[0, 0, R*(1-al), -al, bt, 0],
[0, 0, -R*bt, -bt, -al, 0],
[R*(1-al), R*bt, 0, 0, 0, -1]])
# Transformation matrix from node 1 -> beam coordinates
Tb = np.matrix(np.zeros((6, 6)))
Tb[:3:, :3:] = np.matrix([[np.cos(phi0/2), -np.sin(phi0/2), 0],
[np.sin(phi0/2), np.cos(phi0/2), 0],
[0, 0, 1]])
Tb[3::, 3::] = Tb[:3:, :3:]
# Transformation matrix from beam coordinates to global coordinates
Tg = np.matrix(np.zeros((6, 6)))
Tg[:3:, :3:] = np.matrix(np.vstack((e1, e2, e3)).T)
Tg[3::, 3::] = Tg[:3:, :3:]
# Assemble submatrices
k_r[:6:, :6:] = np.linalg.inv(a) # K_II
k_r[6::, 6::] = np.linalg.inv(b) # K_JJ
k_r[6::, :6:] = Tbar * k_r[:6:, :6:] # K_JI
k_r[:6:, :6:] = Tg*Tb * k_r[:6:, :6:] * Tb.T*Tg.T
k_r[6::, 6::] = Tg*Tb.T * k_r[6::, 6::] * Tb *Tg.T
k_r[6::, :6:] = Tg*Tb.T * k_r[6::, :6:] * Tb.T*Tg.T
k_r[:6:, 6::] = k_r[6::,:6:].T # K_IJ (symm.)
return k_r
def calc_global_stiff(self):
'Calculate global stiffness matrix by element scatter algorithm.'
if self.verbose:
print('# Calculating global stiffness matrix ---')
print('# -- Nodes: {:d}'.format(self.n_nodes))
print('# -- DOFs : {:d}'.format(6*self.n_nodes))
print('# ---------------------------------------')
print('')
# Initialize empty stiffness matrix
self.k_global = np.matrix(np.zeros((6*self.n_nodes, 6*self.n_nodes)))
# Loop over all elements and scatter to global K matrix
for el in range(len(self.el_type)):
n1 = self.el_n1[el]
n2 = self.el_n2[el]
dofs = np.concatenate((6*n1 + np.arange(6), 6*n2 + np.arange(6)))
if self.el_type[el] == EL_RIM:
k_el = self.calc_rim_stiff(el)
elif self.el_type[el] == EL_SPOKE:
s = self.wheel.spokes[n1 - self.n_rim_nodes]
k_el = self.calc_spoke_stiff(el, s)
# Scatter to global matrix
self.k_global[np.ix_(dofs, dofs)] = self.k_global[dofs][:, dofs] +\
k_el
def calc_spoke_stress(self, el_id, u):
'Calculate tension in a spoke element.'
n1 = self.el_n1[el_id]
n2 = self.el_n2[el_id]
s_num = self.el_s_num[el_id]
s = self.wheel.spokes[s_num] # spoke object
# spoke vector
nip_pt = pol2rect(s.rim_pt) # spoke nipple
hub_pt = pol2rect(s.hub_pt) # hub eyelet
e1 = nip_pt - hub_pt
e1 = e1 / np.sqrt(e1.dot(e1))
dofs = np.concatenate((6*n1 + np.arange(6), 6*n2 + np.arange(6)))
k_el = self.calc_spoke_stiff(el_id, s)
u_el = u[dofs]
f_el = np.array(k_el.dot(u_el)).flatten()
# Generalized stress tuple:
# Tension
return (e1.dot(f_el[6:9]), )
def calc_rim_stress(self, el_id, u):
"""Calculate internal forces in a rim element.
Returns the internal forces at the first node of the rim element. The
internal forces are defined at the nodes (not integration points)
because the stiffness matrix is obtained by Castiliano's method.
Returns:
tuple:
0: axial force
1: transverse force (in-plane shear)
2: transverse force (out-of-plane shear)
3: twisting moment
4: bending moment (out-of-plane)
5: bending moment (in-plane)
"""
n1 = self.el_n1[el_id]
n2 = self.el_n2[el_id]
# Local coordinates system at node 1
n1_pos = self.get_node_pos(n1)
n2_pos = self.get_node_pos(n2)
e3_1 = np.cross(n1_pos, n2_pos) /\
np.sqrt(n1_pos.dot(n1_pos) * n2_pos.dot(n2_pos))
e1_1 = np.cross(n1_pos, e3_1) / np.sqrt(n1_pos.dot(n1_pos))
e2_1 = np.cross(e3_1, e1_1)
# Nodal degrees of freedom
dofs = np.concatenate((6*n1 + np.arange(6), 6*n2 + np.arange(6)))
# Calculate nodal forces
k_el = self.calc_rim_stiff(el_id)
u_el = u[dofs]
f_el = np.array(k_el.dot(u_el)).flatten()
return (e1_1.dot(f_el[0:3]), e2_1.dot(f_el[0:3]), e3_1.dot(f_el[0:3]),
e1_1.dot(f_el[3:6]), e2_1.dot(f_el[3:6]), e3_1.dot(f_el[3:6]))
def add_rigid_body(self, rigid_body):
'Add a rigid body defined by the arg rigid_body.'
# Check that nodes are not already assigned to rigid bodies
for rig in self.rigid:
in_rig = [i in rig.nodes for i in rigid_body.nodes]
if any(in_rig):
print('*** Nodes cannot belong to multiple rigid bodies')
print('*** -- Node {:d}\n'.format(rigid_body.nodes[in_rig.index(True)]))
return
# Add new rigid body
self.rigid.append(rigid_body)
# Create a new node to reference the rigid body to
self.n_nodes += 1
rigid_body.node_id = self.n_nodes - 1
self.x_nodes = np.append(self.x_nodes, rigid_body.pos[0])
self.y_nodes = np.append(self.y_nodes, rigid_body.pos[1])
self.z_nodes = np.append(self.z_nodes, rigid_body.pos[2])
self.type_nodes = np.append(self.type_nodes, N_REF)
self.bc_const.extend(6*[False])
self.bc_force.extend(6*[False])
self.bc_u = np.append(self.bc_u, 6*[0])
self.bc_f = np.append(self.bc_f, 6*[0])
if self.verbose:
print('# Adding new rigid body: {:s}'.format(rigid_body.name))
print('# -- Reference node {:d}\n'.format(rigid_body.node_id))
# Recalculate reduction matrices
self.calc_reduction_matrices()
def calc_reduction_matrices(self):
"""Calculate matrices which encode rigid body constraints.
Convert stiffness equation into reduced stiffness equation:
U = C * U_reduced
F_reduced = B * F
K_reduced = (B * K * C)
"""
if not self.rigid: # if there are no rigid bodies
self.B = 1
self.C = 1
self.node_r_id = range(self.n_nodes)
return
# Re-calculate B and C matrices
n_c = np.sum([r.n_nodes for r in self.rigid])
self.C = np.mat(np.zeros((6*self.n_nodes, 6*(self.n_nodes - n_c))))
self.B = np.mat(np.zeros((6*(self.n_nodes - n_c), 6*self.n_nodes)))
self.node_r_id = [-1] * self.n_nodes
for rig_id in range(len(self.rigid)):
self.node_r_id[self.rigid[rig_id].node_id] = self.n_nodes - len(self.rigid) - n_c + rig_id
n_r_n = 0
for n in range(self.n_nodes):
in_rigid = [n in r.nodes for r in self.rigid] # list of logicals
dof_n = 6*n + np.arange(6) # IDs of DOFs associated with this node
if not any(in_rigid):
# new re-numbered node ID
self.node_r_id[n] = n_r_n
dof_r_n = 6*n_r_n + np.arange(6)
n_r_n += 1
self.C[dof_n, dof_r_n] = 1 # identity matrix
self.B[dof_r_n, dof_n] = 1
else:
rig_i = in_rigid.index(True) # Index of rigid body
n_r_r = self.node_r_id[self.rigid[rig_i].node_id] # Reduced index of rigid body node
dof_r_r = 6*n_r_r + np.arange(6)
r_c = self.get_node_pos(n) - self.rigid[rig_i].pos
R = skew_symm(r_c)
self.C[dof_n, dof_r_r] = 1
self.C[np.ix_(dof_n[:3:], dof_r_r[3::])] = R
self.B[dof_r_r, dof_n] = 1
self.B[np.ix_(dof_r_r[3::], dof_n[:3:])] = R
self.soln_updated = False
def remove_rigid_body(self, rigid_body):
'Remove a rigid body constraint.'
# Confirm that the rigid body belongs to this model
if rigid_body not in self.rigid:
print('*** This rigid body does not exist in this model.')
return
# Remove from rigid bodies list
self.rigid.remove(rigid_body)
# Delete the reference node
n = rigid_body.node_id
self.n_nodes -= 1
self.x_nodes = np.delete(self.x_nodes, n)
self.y_nodes = np.delete(self.y_nodes, n)
self.z_nodes = np.delete(self.z_nodes, n)
self.type_nodes = np.delete(self.type_nodes, n)
self.bc_u = np.delete(self.bc_u, 6*n + np.arange(6))
self.bc_f = np.delete(self.bc_f, 6*n + np.arange(6))
for _ in range(6):
self.bc_const.pop(n)
self.bc_force.pop(n)
# Shift the node id for any subsequent rigid bodies down
for r in self.rigid:
if r.node_id > n:
r.node_id -= 1
# Unset reference node
rigid_body.node_id = None
# Recalculate reduction matrices
self.calc_reduction_matrices()
def add_constraint(self, node_id, dof, u=0):
'Add a displacement constraint (Dirichlet boundary condition).'
# Allow array input for node_id and/or dof
if not hasattr(node_id, '__iter__'):
node_id = [node_id]
if not hasattr(dof, '__iter__'):
dof = [dof]
for n in node_id:
for d in dof:
dof_r = 6*n + d
if not self.bc_force[dof_r]:
if not any([n in r.nodes for r in self.rigid]):
self.bc_const[dof_r] = True
self.bc_u[dof_r] = u
self.soln_updated = False
else:
print('\n*** Node {:d}: Cannot assign a force to a node in a rigid body\n'.format(n))
else:
print('\n*** Node {:d}, DOF {:d}: Cannot assign a constraint and force simultaneously\n',format(n,d))
def add_force(self, node_id, dof, f):
'Add a concentrated force or moment (Neumann boundary condition).'
dof_r = 6*node_id + dof
if not self.bc_const[dof_r]:
if not any([node_id in r.nodes for r in self.rigid]):
self.bc_force[dof_r] = True
self.bc_f[dof_r] = f
self.soln_updated = False
else:
print('\n*** Node {:d}: Cannot assign a force to a node in a rigid body\n'.format(node_id))
else:
print('\n*** Node {:d}: Cannot assign a constraint and force simultaneously\n'.format(node_id))
def remove_bc(self, node_id, dof):
'Remove one or more boundary conditions.'
if not hasattr(node_id, '__iter__'):
node_id = [node_id]
if not hasattr(dof, '__iter__'):
dof = [dof]
for n in node_id:
for d in dof:
dof_r = 6*n + d
self.bc_const[dof_r] = False
self.bc_force[dof_r] = False
self.bc_u[dof_r] = 0
self.bc_f[dof_r] = 0
self.soln_updated = False
def solve_iteration(self):
'Solve elasticity equations for nodal displacements.'
# Form augmented, reduced stiffness matrix
self.calc_global_stiff()
if len(self.rigid) == 0:
# No rigid bodies. Reduced node IDs are equal to node IDs
self.node_r_id = np.arange(self.n_nodes, dtype=np.int16)
k_red = self.B * self.k_global * self.C
k_aug = k_red.copy()
f_aug = np.zeros(k_aug.shape[0])
# Apply constraints to nodes
for dof_c in [d for d in range(6*self.n_nodes) if self.bc_const[d]]:
dof_r = 6*self.node_r_id[int(dof_c) / 6] + dof_c % 6
k_aug[dof_r] = 0
k_aug[dof_r, dof_r] = 1
f_aug[dof_r] = self.bc_u[dof_c]
# Apply forces to nodes
for dof_c in [d for d in range(6*self.n_nodes) if self.bc_force[d]]:
dof_r = 6*self.node_r_id[int(dof_c) / 6] + dof_c % 6
f_aug[dof_r] = self.bc_f[dof_c]
# Solve for reduced nodal displacements
if self.verbose:
print('# Solving for nodal displacements -------')
print('# -- Reduced system of equations:')
print('# Reduced DOFs: {:d}'.format(k_aug.shape[0]))
print('# Rank = {:d} / ({:d})'.format(np.linalg.matrix_rank(k_aug), k_aug.shape[0]))
try:
u_red = np.linalg.solve(k_aug, f_aug)
except np.linalg.LinAlgError as e:
print('\n*** ERROR: {:s}. Did you properly constrain all DOFs?'.format(e.message))
# TODO: Give user meaningful info about missing constraints
return False
self.soln_updated = True
# Create solution object
soln = FEMSolution(self)
# nodal displacements
if self.rigid:
u = np.array(self.C.dot(u_red)).flatten()
else:
u = u_red.flatten()
soln.nodal_disp = np.zeros((self.n_nodes, 6))
for d in range(6):
soln.nodal_disp[:, d] = u[d::6]
# nodal reaction forces
rxn_red = np.array(k_red.dot(u_red) - f_aug).flatten()
dof_rxn_red = [6*self.node_r_id[i/6] + i % 6
for i in range(6*self.n_nodes)
if self.bc_const[i]]
dof_rxn = np.where(self.bc_const)[0]
rxn = rxn_red[dof_rxn_red]
soln.nodal_rxn = np.zeros((self.n_nodes, 6))
soln.nodal_rxn[dof_rxn / 6, dof_rxn % 6] = rxn
# TODO Calculate element stresses
soln.el_prestress = self.el_prestress
for el in range(len(self.el_type)):
if self.el_type[el] == EL_SPOKE:
soln.el_stress.append(self.calc_spoke_stress(el, u))
else:
soln.el_stress.append(self.calc_rim_stress(el, u))
if self.verbose:
print('# ---------------------------------------')
return soln
def solve(self, pretension=None, verbose=True):
"""Solve elasticity equations including the effect of prestress.
Since the spoke stiffness depends on spoke tension, the elasticity
equations are technically non-linear. If the pretension keyword is used
the solve method first initializes the spoke tensions and then
solves the linear stiffness equation by calling the solve_iteration()
method. The changes in spoke tension are calculated and used to update
the spoke tensions. The solve_iteration() method is called again using
the updated spoke tensions. This method only requires 2 iterations to
converge because the axial stiffness is orthogonal to the tension
stiffness.
"""
self.verbose = verbose
if pretension is not None:
# set initial pretension
for e in self.get_spoke_elements():
self.el_prestress[e] = pretension
# solve
soln1 = self.solve_iteration()
# update spoke tensions
for e in self.get_spoke_elements():
self.el_prestress[e] = self.el_prestress[e] +\
soln1.el_stress[e][0]
else:
pretension = 0.0
# solve with updated element tensions
soln_2 = self.solve_iteration()
# reset spoke prestress to initial prestress
for e in self.get_spoke_elements():
self.el_prestress[e] = pretension
return soln_2
def __init__(self, wheel, verbose=False):
self.verbose = verbose
self.wheel = wheel
# Create a rim node at each unique spoke attachment point
theta_rim_nodes = set()
for s in self.wheel.spokes:
theta_rim_nodes.add(s.rim_pt[1])
theta_rim_nodes = sorted(list(theta_rim_nodes))
# Rim nodes
self.x_nodes = wheel.rim.radius * np.sin(theta_rim_nodes)
self.y_nodes = -wheel.rim.radius * np.cos(theta_rim_nodes)
self.z_nodes = np.zeros(len(self.x_nodes))
self.type_nodes = N_RIM * np.ones(len(self.x_nodes))
self.n_rim_nodes = len(self.type_nodes)
# Hub nodes
for s in self.wheel.spokes:
r_h = s.hub_pt[0]
theta_h = s.hub_pt[1]
z_h = s.hub_pt[2]
self.x_nodes = np.append(self.x_nodes, r_h*np.sin(theta_h))
self.y_nodes = np.append(self.y_nodes, -r_h*np.cos(theta_h))
self.z_nodes = np.append(self.z_nodes, z_h)
self.type_nodes =
|
np.append(self.type_nodes, N_HUB)
|
numpy.append
|
from matplotlib import pyplot as plt
import numpy as np
import math
import random
import matplotlib.ticker as plticker
from PIL import Image
from PIL import ImageColor
from PIL import ImageOps
import pickle
import pandas
with open('guitarShape.pkl','rb') as inFile:
guitarShape = pickle.load(inFile)
def getClosestColor(colors, r,g,b):
"""
Given a list of colors in tuple form (r,g,b) and three values for r, g, and b respectively
This function returns the closest color in the list to the r,g, and b values
I use this in this program to pass in the list of lego tile colors and then pass the raw rgb values of an image
to get the closest lego color to that part of an image.
This is very helpful when making a lego mosaic
"""
closestColor = 'ldsfjk'
closestNum = 20000000
for color in colors:
colorr = color[0]
colorg = color[1]
colorb = color[2]
distance = ( (r - colorr)**2 +(g - colorg)**2 +(b - colorb)**2) **.5
#print(distance)
if distance < closestNum:
closestColor = color
closestNum = distance
return closestColor
def plotGuitarShape():
"""
Plots the Les Paul guitar shape with matplotlib
"""
with open('guitarShape.pkl','rb') as inFile:
guitarShape = pickle.load(inFile)
for index, row in enumerate(guitarShape):
print(list(row).count(1))
if index == 10:
print('=======================================')
plt.imshow(guitarShape, cmap='binary')
plt.show()
def plotImageOnGuitar(imagePath):
"""
Takes imagePath as input
Plots the Les Paul Guitar shape made out of legos
Colors the guitar to a pixellated version of the imagePath image only using real lego colors
"""
colors = pandas.read_csv('legoColors.csv',header=None)
colors = np.array(colors)
colorList = []
colorNameList = []
for index, line in enumerate(colors):
if line[3] == 'f':
color = line[2]
colorList.append(ImageColor.getcolor('#' + str(color),'RGB'))
colorNameList.append(line[1])
filename = imagePath
img = Image.open(filename)
imgSmall = img.resize(guitarShape.shape,resample=Image.BILINEAR)
imgSmall = imgSmall.rotate(90, Image.NEAREST, expand = 1)
imgSmall = ImageOps.flip(imgSmall)
imgSmall = np.array(imgSmall)
# plt.imshow(imgSmall)
# plt.show()
for i in range(len(imgSmall)):
for j in range(len(imgSmall[i])):
currentColor = imgSmall[i][j]
curR = currentColor[0]
curG = currentColor[1]
curB = currentColor[2]
closestColor = getClosestColor(colorList,curR,curG,curB)
imgSmall[i][j][0] = closestColor[0]
imgSmall[i][j][1] = closestColor[1]
imgSmall[i][j][2] = closestColor[2]
try:
imgSmall[i][j][3] = 255
except:
pass
# plt.imshow(imgSmall)
# plt.show()
imgSmall[guitarShape == 0] = 0
plt.imshow(imgSmall)
plt.show()
def getDataFromRows(rows):
toReturn = []
for row in rows:
toReturn.append(len(row))
return toReturn
def getRowsFromData(data):
toReturn = []
for dataVal in data:
toReturn.append([1]*dataVal)
return toReturn
def getArrayFromRows(rows):
numRows = len(rows)
longestRow = max(getDataFromRows(rows))
array = list(np.zeros((numRows,longestRow)))
for y in range(numRows):
padding = int((longestRow-len(rows[y]))/2)
#print(padding)
for j in range(padding,padding+len(rows[y])):
array[y][j] = 1
picture = []
for row in reversed(array):
picture.append(row)
return picture
def getArrayFromData(data):
rows = getRowsFromData(data)
return getArrayFromRows(rows)
def monteCarloBottomShape():
"""
this function and the four functions above are from an earlier attempt I made randomly generating guitar shapes. I do not recoomentd using these
"""
numRows = 22
for iteration in range(25):
successfulGuitar = False
while not successfulGuitar:
rows = []
longestRow = -1
initialWidth = 41
rows.append([1] * initialWidth)
previousIncrease = 999
previousWidth = initialWidth
minIncrease = -10
for i in range(numRows - 1):
satisfied = False
while not satisfied:
tryWidth = random.randrange(25,43,2)
increase = tryWidth - previousWidth
if increase <= 0 and increase <= previousIncrease and increase >= minIncrease:
rows.append([1] * tryWidth)
previousIncrease = increase
previousWidth = tryWidth
satisfied = True
if longestRow == 25:
successfulGuitar = True
print('iteration:',getDataFromRows(rows))
array = getArrayFromRows(rows)
plt.subplot(iteration + 1)
plt.title(str(iteration + 1))
plt.imshow(array)
def sin(x):
return np.sin(x)
def arctan(x):
return np.arctan(x)
#The following 3 functions use equations found here http://www.mnealon.eosc.edu/The_Nealon_Equation.html
def guitarCurve(x,A=-.06393446,B=-.7410887,C=1.180973,D=-1.24886,E=.3187446,F=-.8305975,G=2.352912,H=-.1870003,I=3.40192,J=-.01303915,K=1.349344,L=4.32767,M=5.228206,N=-.4099881,O=-.000250234,P=.0007021002,Q=0,R=18.26765,S=18.1965,BL=19.35):
return (A *
|
np.sin(B*x + C)
|
numpy.sin
|
import unittest
from yauber_algo.errors import *
class RangeCloseTestCase(unittest.TestCase):
def test_rangeclose(self):
import yauber_algo.sanitychecks as sc
from numpy import array, nan, inf
import os
import sys
import pandas as pd
import numpy as np
from yauber_algo.algo import rangeclose
import yauber_algo.algo as a
#
# Function settings
#
algo = 'rangeclose'
func = rangeclose
with sc.SanityChecker(algo) as s:
#
# Check regular algorithm logic
#
s.check_regular(
array([0.8]),
func,
(
array([10]), # h
array([ 0]), # l
array([ 8]), # c
1
),
suffix='uptrend'
)
s.check_regular(
array([0.8]),
func,
(
array([10]), # h
array([0]), # l
array([8]), # c
0
),
suffix='period_zero',
exception=YaUberAlgoArgumentError
)
s.check_regular(
array([0.4]),
func,
(
array([10]), # h
array([0]), # l
array([8]), # c
-1
),
suffix='period_negative',
exception=YaUberAlgoArgumentError
)
s.check_regular(
array([0.4]),
func,
(
array([10]), # h
array([0]), # l
array([11]), # c
1
),
suffix='close_gt_hi',
exception=YaUberAlgoInternalError
)
s.check_regular(
array([0.4]),
func,
(
array([10]), # h
array([0]), # l
array([-1]), # c
1
),
suffix='close_lt_lo',
exception=YaUberAlgoInternalError
)
s.check_regular(
array([0.2]),
func,
(
array([10]), # h
array([0]), # l
array([2]), # c
1
),
suffix='dn_trend'
)
s.check_regular(
array([0.5]),
func,
(
array([10]), # h
array([0]), # l
array([5]), # c
1
),
suffix='neutral_trend'
)
s.check_regular(
array([0.4]),
func,
(
array([0]), # h
array([10]), # l
array([2]), # c
1
),
suffix='sanity_h_less_l',
exception=YaUberAlgoInternalError
)
s.check_regular(
array([0.5]),
func,
(
array([10]), # h
array([0]), # l
array([5]), # c
1
),
suffix='doji'
)
s.check_regular(
array([0.5]),
func,
(
array([10]), # h
array([0]), # l
array([5]), # c
1
),
suffix='doji_h_eq_l'
)
s.check_regular(
array([1.0]),
func,
(
array([10]), # h
array([0]), # l
array([10]), # c
1
),
suffix='ideal_trend_up'
)
s.check_regular(
array([0.0]),
func,
(
array([10]), # h
array([0]), # l
array([0]), # c
1
),
suffix='ideal_trend_dn'
)
s.check_regular(
array([0.5]),
func,
(
array([10]), # h
array([10]), # l
array([10]), # c
1
),
suffix='flat_candle'
)
s.check_regular(
array([nan]),
func,
(
array([nan]), # h
array([10]), # l
array([10]), # c
1
),
suffix='nan_h'
)
s.check_regular(
array([nan]),
func,
(
array([10]), # h
array([nan]), # l
array([10]), # c
1
),
suffix='nan_l'
)
s.check_regular(
array([nan]),
func,
(
array([10]), # h
array([10]), # l
array([nan]), # c
1
),
suffix='nan_c'
)
s.check_regular(
array([nan, 0.5]),
func,
(
array([6, 10]), # h
array([0, 3]), # l
array([0, 5]), # c
2
),
suffix='period2'
)
s.check_regular(
array([nan, 1.0]),
func,
(
array([10, 10]), # h
array([0, 3]), # l
array([0, 10]), # c
2
),
suffix='period2_alt'
)
s.check_regular(
array([nan, 1.0, (7-0.0)/(20-0)]),
func,
(
array([10, 10, 20]), # h
array([0, 3, 0]), # l
array([0, 10, 7]), # c
2
),
suffix='period2_alt_n3'
)
s.check_regular(
array([nan, nan, (7-0)/(20-0)]),
func,
(
array([10, 10, 20]), # h
array([0, nan, 0]), # l
array([0, 10, 7]), # c
2
),
suffix='period2_alt_n3_llv_nan'
)
s.check_regular(
array([nan, (5-0)/(10-0), (2-0) / (5-0)]),
func,
(
array([10, 5, 2]), # h
array([0, 3, 0]), # l
array([0, 5, 2]), # c
2
),
suffix='period2_alt_n3_hhv'
)
s.check_regular(
array([nan, nan, 0.7]),
func,
(
array([10, nan, 10]), # h
array([0, 3, 0]), # l
array([0, 10, 7]), # c
2
),
suffix='period2_alt_n3_hhv_with_nan'
)
s.check_naninf(
array([nan, 0.5]),
func,
(
array([nan, 10]), # h
array([inf, 0]), # l
array([nan, 5]), # c
2
),
suffix='period2_nan_ignored',
)
s.check_series(
pd.Series(array([nan, 0.5])),
func,
(
pd.Series(array([6, 10])), # h
pd.Series(array([0, 3])), # l
pd.Series(array([0, 5])), # c
2
),
suffix='series'
)
s.check_dtype_float(
array([0.8], dtype=np.float),
func,
(
array([10], dtype=np.float), # h
array([0], dtype=np.float), # l
array([8], dtype=np.float), # c
1
),
)
s.check_dtype_int(
array([0.8], dtype=np.float),
func,
(
array([10], dtype=np.int32), # h
|
array([0], dtype=np.int32)
|
numpy.array
|
import numpy as np
def gelman_rubin(data):
"""
Apply Gelman-Rubin convergence diagnostic to a bunch of chains.
:param data: np.array of shape (Nchains, Nsamples, Npars)
"""
Nchains, Nsamples, Npars = data.shape
B_on_n = data.mean(axis=1).var(axis=0) # variance of in-chain means
W = data.var(axis=1).mean(axis=0) # mean of in-chain variances
#print(B_on_n, ' B_on_n mean')
#print(W, ' W variance ')
# simple version, as in Obsidian
sig2 = (Nsamples/(Nsamples-1))*W + B_on_n
Vhat = sig2 + B_on_n/Nchains
Rhat = Vhat/W
# advanced version that accounts for ndof
m, n = np.float(Nchains), np.float(Nsamples)
si2 = data.var(axis=1)
xi_bar = data.mean(axis=1)
xi2_bar = data.mean(axis=1)**2
var_si2 = data.var(axis=1).var(axis=0)
allmean = data.mean(axis=1).mean(axis=0)
cov_term1 = np.array([np.cov(si2[:,i], xi2_bar[:,i])[0,1]
for i in range(Npars)])
cov_term2 = np.array([-2*allmean[i]*(np.cov(si2[:,i], xi_bar[:,i])[0,1])
for i in range(Npars)])
var_Vhat = ( ((n-1)/n)**2 * 1.0/m * var_si2
+ ((m+1)/m)**2 * 2.0/(m-1) * B_on_n**2
+ 2.0*(m+1)*(n-1)/(m*n**2)
* n/m * (cov_term1 + cov_term2))
df = 2*Vhat**2 / var_Vhat
#print(df, ' df ')
#print(var_Vhat, ' var_Vhat')
#print "gelman_rubin(): var_Vhat = {}, df = {}".format(var_Vhat, df)
Rhat *= df/(df-2)
print(Rhat, ' Rhat')
return Rhat
def main():
# pos
proposal = ['rw', 'adap-rw']
m = 0
problem = ['ptmcmc', 'surrogate-pt']
i = 0
for i in range(0,2) :
for m in range(0,2):
print(proposal[m], problem[i])
pos_run1 = np.loadtxt('data/'+proposal[m]+'/'+problem[i]+'/pos_1.txt')
pos_run2 = np.loadtxt('data/'+proposal[m]+'/'+problem[i]+'/pos_2.txt')
pos_run3 = np.loadtxt('data/'+proposal[m]+'/'+problem[i]+'/pos_3.txt')
pos_run4 =
|
np.loadtxt('data/'+proposal[m]+'/'+problem[i]+'/pos_4.txt')
|
numpy.loadtxt
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 27 09:50:53 2019
@author: craig
"""
#import sys
#from IPython import get_ipython
#get_ipython().magic('reset -sf')
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, QVBoxLayout, QSizePolicy, QMessageBox, QWidget, QPushButton
from PyQt5.QtCore import QTimer
#******************************************************************
import matplotlib
#import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
matplotlib.use("Qt5Agg")
import matplotlib.gridspec as gridspec
#******************************************************************
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
#import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numpy import diff
import math
import xlrd
from colorama import Fore, Style
# =============================================================================
# # ===========================================================================
# # #--------------------------------------------------------------------------
# # # Read Reference Data Spreadsheet
# # # Read mapinv_reference_data_carbonates_calculatedMode_Rosetta data
# # # which is the Rosetta Stone reference data per Clerke
# # #--------------------------------------------------------------------------
# # ===========================================================================
# =============================================================================
#Thomeer_core_data = pd.read_csv('mapinv_reference_data_carbonates_calculatedMode_Rosetta.csv')
#read the file
file = r'mapinv_reference_data_carbonates_calculatedMode_Rosetta.xlsx'
Thomeer_core_data = pd.read_excel(file,index_col=False)
# ------------------------------------------------------------------
# read in maininv_reference2 data which is just Rosetta Stone
# this is reference data
# DATA WILL BE LOADED
# ------------------------------------------------------------------
Permeability = Thomeer_core_data['PERMEABILITY']
Porosity = Thomeer_core_data['POROSITY']
G1 = Thomeer_core_data['G1']
PD1 = Thomeer_core_data['PD1']
BV1 = Thomeer_core_data['BV1']
G2 = Thomeer_core_data['G2']
PD2 = Thomeer_core_data['PD2']
BV2 = Thomeer_core_data['BV2']
Rock_Index = Thomeer_core_data['ROCK_INDEX']
Perm_max = 4
Perm_min = -4
Por_max = .35
Por_min = 0
x4=np.array(Porosity)
y4=np.array(Permeability)
# =============================================================================
# # ===========================================================================
# # #------------------------------------------------------------
# # #
# # # End of reading in mapinv reference data
# # #------------------------------------------------------------
# # ===========================================================================
# =============================================================================
# =============================================================================
# # ===========================================================================
# # #--------------------------------------------------------------------------
# ##
# ## Graphical Input of User Porosity and Pereability
# ##
# # #--------------------------------------------------------------------------
# # ===========================================================================
# =============================================================================
def tellme(s):
print(s)
plt.title(s, fontsize=16, color = 'blue')
plt.draw()
plt.clf() #clear plot of other things
#tellme('You will define a triangle, click to begin')
plt.figure(1,figsize=(6, 5))
#plt.ion()
plt.semilogy(x4, y4 , 'b-*', linewidth=0, label='RSW Core Data' )
plt.xlim(0.0,.4)
plt.ylim(0.0001, 10000)
plt.grid(True)
#plt.waitforbuttonpress()
#Use pts array to store selected points
pts = []
while len(pts) < 1:
tellme('Select Poro-Perm point with mouse')
pts = np.asarray(plt.ginput(1, timeout=3))
# if len(pts) < 3:
# tellme('Too few points, starting over')
# time.sleep(1) # Wait a second
Por = pts.item(0)
Perm = pts.item(1)
print()
print('Porosity =', Por, ' and Permeability =', Perm)
print()
plt.close('all')
# =============================================================================
# # ===========================================================================
# # #--------------------------------------------------------------------------
# ##
# ## This is the beginnin of KNN
# ##
# # #--------------------------------------------------------------------------
# # ===========================================================================
# =============================================================================
#normalized Porosity and Permeability inputs
por = (Por-Por_min)/(Por_max - Por_min)
perm= (math.log10(Perm) - Perm_min)/(Perm_max - Perm_min)
# =============================================================================
# #----------------------------------------------------------------------------
# #
# # ESTIMATE THOMEER PARAMETERS FOR BOTH PORE SYSTEMS
# #
# #----------------------------------------------------------------------------
# =============================================================================
dist_inv = []
dist_phi = []
dist_lperm = []
G1_weight = []
PD1_weight = []
BV1_weight = []
G2_weight = []
PD2_weight = []
BV2_weight = []
#Rock_Index_weight = []
dist_inv_total=0
G1_total = 0
PD1_total = 0
BV1_total = 0
G2_total = 0
PD2_total = 0
BV2_total = 0
inv_dist_array = []
#distance_knn_array = [rows,columns]
distance_knn_array = []
#------------------------------------------------------------------
# Estimate all Thomeer Parameters for *_est values
#-------------------------------------------------------------------
Porosity_norm = []
Permeability_norm = []
#this is the mapinv_reference_data being used
for i in range(0,len(Thomeer_core_data),1):
Porosity_norm.append((Porosity[i] - Por_min) / (Por_max - Por_min))
Permeability_norm.append((math.log10(Permeability[i]) - Perm_min) / (Perm_max - Perm_min))
#Euclidian Distance
dist_phi.append( abs(por - Porosity_norm[i] ))
dist_lperm.append( abs(perm - Permeability_norm[i]))
dist_inv.append( 1 / math.sqrt( dist_phi[i]**2 + dist_lperm[i]**2) )
#calculalte weights for each
G1_weight.append(dist_inv[i] * G1[i])
PD1_weight.append(dist_inv[i] * PD1[i])
BV1_weight.append(dist_inv[i] * BV1[i])
G2_weight.append(dist_inv[i] * G2[i])
PD2_weight.append(dist_inv[i] * PD2[i])
BV2_weight.append(dist_inv[i] * BV2[i])
#Rock_Index_weight.append(dist_inv[i] * Rock_Index[i])
inv_dist_array.append(dist_inv[i]); # add items
# =============================================================================
### KNN Array
# # ===========================================================================
# # #--------------------------------------------------------------------------
distance_knn_array = [dist_inv, G1_weight, PD1_weight, BV1_weight, G2_weight, PD2_weight, BV2_weight,Rock_Index]
# distance_knn_array = [Permeability, Porosity, G1, PD1, BV1, G2, PD2, BV2]
# # #--------------------------------------------------------------------------
# # ===========================================================================
# =============================================================================
xnorm=np.array(Porosity_norm)
ynorm=np.array(Permeability_norm)
# =============================================================================
# # ===========================================================================
# # #--------------------------------------------------------------------------
# # #
# # # Transpose and Sort new kNN array
# # #
# # #--------------------------------------------------------------------------
# # ===========================================================================
# =============================================================================
#knn_array = np.transpose array
knn_array = np.transpose(distance_knn_array)
#print(knn_array)
#Sort array from large to low by column 0 which is dist_inv
#xknn=np.array(knn_array)
#matsor x[x[:,column].argsort()[::-1]] and -1 us reverse order
mat_sort = knn_array[knn_array[:,0].argsort()[::-1]] #firt column reverse sort (-1)
#mat_sort = x[x[:,1].argsort()[::-1]]
#mat_sort = x[x[:,2].argsort()[::-1]
# =============================================================================
# # ===========================================================================
# # #--------------------------------------------------------------------------
# # #
# # # Calculate knn Thomeer Parameters
# # #
# # #--------------------------------------------------------------------------
# # ===========================================================================
# =============================================================================
#------------------------------------------------------------------------------
# Number of nearest Neighbors
#------------------------------------------------------------------------------
n_neighbors = 3
#------------------------------------------------------------------------------
dist_inv_total_knn = 0
G1_total_knn = 0
PD1_total_knn = 0
BV1_total_knn = 0
G2_total_knn = 0
PD2_total_knn = 0
BV2_total_knn = 0
#kNN Estimates for first 3 rows
#dist_inv_total = mat_sort[0][0] + mat_sort[1][0] + mat_sort[2][0]
for i in range(0,n_neighbors,1):
dist_inv_total_knn = dist_inv_total_knn + mat_sort[i][0]
G1_total_knn = G1_total_knn + mat_sort[i][1]
PD1_total_knn = PD1_total_knn + mat_sort[i][2]
BV1_total_knn = BV1_total_knn + mat_sort[i][3]
G2_total_knn = G2_total_knn + mat_sort[i][4]
PD2_total_knn = PD2_total_knn + mat_sort[i][5]
BV2_total_knn = BV2_total_knn + mat_sort[i][6]
#back to k values and calculate estimations now
G1_est_knn = G1_total_knn / dist_inv_total_knn
PD1_est_knn = PD1_total_knn / dist_inv_total_knn
BV1_est_knn = (BV1_total_knn / dist_inv_total_knn)
G2_est_knn = G2_total_knn / dist_inv_total_knn
PD2_est_knn = PD2_total_knn / dist_inv_total_knn
BV2_est_knn = (BV2_total_knn / dist_inv_total_knn)
print()
print(Fore.GREEN +'Estimated Thomeer Parameters from KNN =',n_neighbors,' on normlalized Poro-Perm data')
print(Fore.GREEN + ' G1 =',G1_est_knn, ', Pd1 =',PD1_est_knn, ', BV1(%) =',BV1_est_knn)
print(Fore.GREEN + ' G2 =',G2_est_knn, ', Pd2 =',PD2_est_knn, ', BV2(%) =',BV2_est_knn)
#print(Style.RESET_ALL)
#print('back to normal now')
# =============================================================================
# # ===========================================================================
# # #--------------------------------------------------------------------------
# # #
# # # Find Closest distance Pc curve in poro-perm space
# # #--------------------------------------------------------------------------
# # ===========================================================================
# =============================================================================
a =
|
np.array(inv_dist_array)
|
numpy.array
|
# -*- coding: utf-8 -*-
import copy
import itertools
import random
import networkx as nx
import numpy as np
import bionev.OpenNE.graph as og
import bionev.struc2vec.graph as sg
def read_for_OpenNE(filename, weighted=False):
graph = og.Graph()
print("Loading training graph for learning embedding...")
graph.read_edgelist(filename=filename, weighted=weighted)
print("Graph Loaded...")
return graph
def read_for_struc2vec(filename):
print("Loading training graph for learning embedding...")
graph = sg.load_edgelist(filename, undirected=True)
print("Graph Loaded...")
return graph
def read_for_gae(filename, weighted=False):
print("Loading training graph for learning embedding...")
edgelist = np.loadtxt(filename, dtype='float')
if weighted:
edgelist = [(int(edgelist[idx, 0]), int(edgelist[idx, 1])) for idx in range(edgelist.shape[0]) if
edgelist[idx, 2] > 0]
else:
edgelist = [(int(edgelist[idx, 0]), int(edgelist[idx, 1])) for idx in range(edgelist.shape[0])]
min_idx = min([x[0] for x in edgelist] + [x[1] for x in edgelist])
max_idx = max([x[0] for x in edgelist] + [x[1] for x in edgelist])
adj = nx.adjacency_matrix(nx.from_edgelist(edgelist), nodelist=list(range(min_idx, max_idx + 1)))
print(adj)
print("Graph Loaded...")
print(adj.shape)
return adj
def read_for_SVD(filename, weighted=False):
if weighted:
graph = nx.read_weighted_edgelist(filename)
else:
graph = nx.read_edgelist(filename)
return graph
def read_graph(edgelist, weighted=False):
if weighted:
graph = nx.read_weighted_edgelist(edgelist)
else:
graph = nx.read_edgelist(edgelist)
return graph
def train_test_graph(training_edgelist, testing_edgelist, weighted=False):
g_train = read_graph(training_edgelist, weighted=weighted)
g_test = nx.read_edgelist(testing_edgelist)
testing_pos_edges = g_test.edges
node_num1, edge_num1 = len(g_train.nodes), len(g_train.edges)
print('Training Graph: nodes:', node_num1, 'edges:', edge_num1)
return g_train, testing_pos_edges, training_edgelist
def split_train_test_graph(*, input_graph, testing_ratio=0.2, weighted=False):
node_num1, edge_num1 = len(input_graph.nodes), len(input_graph.edges)
print('Original Graph: nodes:', node_num1, 'edges:', edge_num1)
testing_edges_num = int(len(input_graph.edges) * testing_ratio)
testing_pos_edges = random.sample(input_graph.edges, testing_edges_num)
g_train = copy.deepcopy(input_graph)
for edge in testing_pos_edges:
node_u, node_v = edge
if g_train.degree(node_u) > 1 and g_train.degree(node_v) > 1:
g_train.remove_edge(node_u, node_v)
train_graph_filename = 'graph_train.edgelist'
if weighted:
nx.write_weighted_edgelist(g_train, train_graph_filename)
else:
nx.write_edgelist(g_train, train_graph_filename, data=False)
node_num1, edge_num1 = len(g_train.nodes), len(g_train.edges)
print('Training Graph: nodes:', node_num1, 'edges:', edge_num1)
return input_graph, g_train, testing_pos_edges, train_graph_filename
def generate_neg_edges(graph: nx.Graph, m: int):
"""Get m samples from the edges in the graph that don't exist."""
negative_edges = [
(source, target)
for source, target in itertools.combinations(graph, 2)
if not graph.has_edge(source, target)
]
return random.sample(negative_edges, m)
def load_embedding(embedding_file_name, node_list=None):
with open(embedding_file_name) as f:
node_num, _ = f.readline().split()
embedding_look_up = {}
if node_list:
for line in f:
vec = line.strip().split()
node_id = vec[0]
if node_id in node_list:
emb = [float(x) for x in vec[1:]]
embedding_look_up[node_id] = list(emb)
assert len(node_list) == len(embedding_look_up)
else:
for line in f:
vec = line.strip().split()
node_id = vec[0]
emb = [float(x) for x in vec[1:]]
embedding_look_up[node_id] = list(emb)
assert int(node_num) == len(embedding_look_up)
f.close()
return embedding_look_up
def read_node_labels(filename):
fin = open(filename, 'r')
node_list = []
labels = []
while 1:
l = fin.readline()
if l == '':
break
vec = l.strip().split()
node_list.append(vec[0])
labels.append(vec[1:])
fin.close()
print('Nodes with labels: %s' % len(node_list))
return node_list, labels
def split_train_test_classify(embedding_look_up, x, y, testing_ratio: float = 0.2):
training_ratio = 1 - testing_ratio
training_size = int(training_ratio * len(x))
shuffle_indices = np.random.permutation(np.arange(len(x)))
x_train = [embedding_look_up[x[shuffle_indices[i]]] for i in range(training_size)]
y_train = [y[shuffle_indices[i]] for i in range(training_size)]
x_test = [embedding_look_up[x[shuffle_indices[i]]] for i in range(training_size, len(x))]
y_test = [y[shuffle_indices[i]] for i in range(training_size, len(x))]
x_train = np.array(x_train).ravel()
y_train = np.array(y_train).ravel()
x_test = np.array(x_test).ravel()
y_test = np.array(y_test).ravel()
return x_train, y_train, x_test, y_test
def get_y_pred(y_test, y_pred_prob):
y_pred = np.zeros(y_pred_prob.shape)
sort_index = np.flip(np.argsort(y_pred_prob, axis=1), 1)
for i in range(y_test.shape[0]):
num = np.sum(y_test[i])
for j in range(num):
y_pred[i][sort_index[i][j]] = 1
return y_pred
def get_xy_sets(embeddings, graph_edges, neg_edges):
x = []
y = []
for edge in graph_edges:
node_u_emb =
|
np.array(embeddings[edge[0]])
|
numpy.array
|
import matplotlib.image as mpimg
import numpy as np
def get_batches_multi_dir(directories, batch_size):
# get batches from multiple (a list of) directories
i = 0
batch_ctr = 0
sw_angles = []
speed = []
throttle = []
brake_input = []
images = []
for directory in directories:
print('opening data in ', directory)
for l in open(directory + "/driving_log.csv", 'r'):
data = l.split(",")
if i == batch_size:
i = 0
batch_ctr = batch_ctr + 1
print('----GEN_DBG: ')
print('------batch_ctr: ', batch_ctr)
yield batch_ctr, np.array(images), np.array(sw_angles), np.array(throttle), np.array(brake_input), np.array(speed)
sw_angles = []
speed = []
throttle = []
brake_input = []
images = []
sw_angles.append(float(data[3]))
throttle.append(float(data[4]))
brake_input.append(float(data[5]))
speed.append(float(data[6]))
images.append([mpimg.imread(data[0]), mpimg.imread(data[1]), mpimg.imread(data[2])])
# print('----GEN_DBG: ')
# print('------i: ', i)
i = i + 1
yield batch_ctr, np.array(images), np.array(sw_angles), np.array(throttle), np.array(brake_input), np.array(speed)
def get_batches(directory, batch_size):
# get batches from a single directory
i = 0
batch_ctr = 0
sw_angles = []
speed = []
throttle = []
brake_input = []
images = []
print('opening data in ', directory)
for l in open(directory + "/driving_log.csv", 'r'):
data = l.split(",")
if i == batch_size:
i = 0
batch_ctr = batch_ctr + 1
print('----GEN_DBG: ')
print('------batch_ctr: ', batch_ctr)
yield batch_ctr, np.array(images), np.array(sw_angles), np.array(throttle), np.array(brake_input), np.array(speed)
sw_angles = []
speed = []
throttle = []
brake_input = []
images = []
sw_angles.append(float(data[3]))
throttle.append(float(data[4]))
brake_input.append(float(data[5]))
speed.append(float(data[6]))
images.append([mpimg.imread(data[0]), mpimg.imread(data[1]), mpimg.imread(data[2])])
# print('----GEN_DBG: ')
# print('------i: ', i)
i = i + 1
yield batch_ctr, np.array(images), np.array(sw_angles), np.array(throttle), np.array(brake_input), np.array(speed)
def multi_dir_data_gen(dirs, batch_size, train_fraction, mode="TRAIN"):
# same idea as get_batches_multi_dir, but augments the training data
# and has the option to perform training/validation split, but without
# total shuffling for disk access speed reasons
train_mode = mode == "TRAIN" #else assume should return validation data
valid_mode = mode == "VALIDATION"
all_mode = mode == "ALL"
if not(train_mode) and not(valid_mode) and not(all_mode):
assert mode == "VALIDATION", "mode must be either TRAIN or VALIDATION or ALL"
for batch_ctr, images, sw_angles, throttle, brake_input, speeds in get_batches_multi_dir(dirs, 128):
data_len = len(sw_angles)
num_for_train = int( float( data_len ) * train_fraction )
num_for_valid = data_len - num_for_train
#data augmentation generates 6 times the number of images
#perform data augmentation
features_center = images[:,0,:,:]
labels_center = sw_angles
features_center_rev = np.flip(features_center, 2)
labels_center_rev = sw_angles * -1.0
features_left = images[:,1,:,:]
labels_left = sw_angles + 0.5 # was 1.0
features_left_rev =
|
np.flip(features_left, 2)
|
numpy.flip
|
import numpy as np
import cv2
import pickle
def order_points(pts):
rect = np.zeros((4, 2), dtype = "float32")
s = pts.sum(axis = 1)
rect[0] = pts[
|
np.argmin(s)
|
numpy.argmin
|
# SPDX-FileCopyrightText: 2021 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: MIT
import copy
import dataclasses
import math
from dataclasses import dataclass, field
import numpy as np
EPS = np.finfo(float).eps
@dataclass
class PyVQFParams:
tauAcc: float = 3.0
tauMag: float = 9.0
motionBiasEstEnabled: bool = True
restBiasEstEnabled: bool = True
magDistRejectionEnabled: bool = True
biasSigmaInit: float = 0.5
biasForgettingTime: float = 100.0
biasClip: float = 2.0
biasSigmaMotion: float = 0.1
biasVerticalForgettingFactor: float = 0.0001
biasSigmaRest: float = 0.03
restMinT: float = 1.5
restFilterTau: float = 0.5
restThGyr: float = 2.0
restThAcc: float = 0.5
restThMag: float = 0.1
magCurrentTau: float = 0.05
magRefTau: float = 20.0
magNormTh: float = 0.1
magDipTh: float = 10.0
magNewTime: float = 20.0
magNewFirstTime: float = 5.0
magNewMinGyr: float = 20.0
magMinUndisturbedTime: float = 0.5
magMaxRejectionTime: float = 60.0
magRejectionFactor: float = 2.0
@dataclass
class PyVQFState:
gyrQuat: np.ndarray = field(default_factory=lambda: np.array([1, 0, 0, 0], float))
accQuat: np.ndarray = field(default_factory=lambda: np.array([1, 0, 0, 0], float))
delta: float = 0.0
restDetected: bool = False
magDistDetected: bool = True
lastAccLp: np.ndarray = field(default_factory=lambda: np.zeros(3, float))
accLpState: np.ndarray = field(default_factory=lambda: np.full((2, 3), np.nan, float))
kMagInit: float = 1.0
lastMagDisAngle: float = 0.0
lastMagCorrAngularRate: float = 0.0
bias: np.ndarray = field(default_factory=lambda: np.zeros(3, float))
biasP: np.ndarray = field(default_factory=lambda: np.full((3, 3), np.nan, float))
motionBiasEstRLpState: np.ndarray = field(default_factory=lambda: np.full((2, 9), np.nan, float))
motionBiasEstBiasLpState: np.ndarray = field(default_factory=lambda: np.full((2, 2), np.nan, float))
restLastSquaredDeviations: np.ndarray = field(default_factory=lambda: np.zeros(3, float))
restT: float = 0.0
restLastGyrLp: np.ndarray = field(default_factory=lambda: np.zeros(3, float))
restGyrLpState: np.ndarray = field(default_factory=lambda: np.full((2, 3), np.nan, float))
restLastAccLp: np.ndarray = field(default_factory=lambda: np.zeros(3, float))
restAccLpState: np.ndarray = field(default_factory=lambda: np.full((2, 3), np.nan, float))
restLastMagLp: np.ndarray = field(default_factory=lambda: np.zeros(3, float))
restMagLpState: np.ndarray = field(default_factory=lambda: np.full((2, 3), np.nan, float))
magRefNorm: float = 0.0
magRefDip: float = 0.0
magUndisturbedT: float = 0.0
magRejectT: float = -1.0
magCandidateNorm: float = -1.0
magCandidateDip: float = 0.0
magCandidateT: float = 0.0
magNormDip: np.ndarray = field(default_factory=lambda: np.zeros(2, float))
magNormDipLpState: np.ndarray = field(default_factory=lambda: np.full((2, 2), np.nan, float))
@dataclass
class PyVQFCoefficients:
gyrTs: float
accTs: float
magTs: float
accLpB: np.ndarray = field(default_factory=lambda: np.full(3, np.nan, float))
accLpA: np.ndarray = field(default_factory=lambda: np.full(2, np.nan, float))
kMag: float = -1.0
biasP0: float = -1.0
biasV: float = -1.0
biasMotionW: float = -1.0
biasVerticalW: float = -1.0
biasRestW: float = -1.0
restGyrLpB: np.ndarray = field(default_factory=lambda: np.full(3, np.nan, float))
restGyrLpA: np.ndarray = field(default_factory=lambda: np.full(2, np.nan, float))
restAccLpB: np.ndarray = field(default_factory=lambda: np.full(3, np.nan, float))
restAccLpA: np.ndarray = field(default_factory=lambda: np.full(2, np.nan, float))
restMagLpB: np.ndarray = field(default_factory=lambda: np.full(3, np.nan, float))
restMagLpA: np.ndarray = field(default_factory=lambda: np.full(2, np.nan, float))
kMagRef: float = -1.0
magNormDipLpB: np.ndarray = field(default_factory=lambda: np.full(3, np.nan, float))
magNormDipLpA: np.ndarray = field(default_factory=lambda: np.full(2, np.nan, float))
class PyVQF:
"""A Versatile Quaternion-based Filter for IMU Orientation Estimation.
This class implements the orientation estimation filter described in the following publication:
<NAME>, <NAME>. "VQF: Highly Accurate IMU Orientation Estimation with Bias Estimation and Magnetic
Disturbance Rejection." arXiv preprint, 2022. `arXiv:2203.17024 <https://arxiv.org/abs/2203.17024>`_.
The filter can perform simultaneous 6D (magnetometer-free) and 9D (gyr+acc+mag) sensor fusion and can also be used
without magnetometer data. It performs rest detection, gyroscope bias estimation during rest and motion, and
magnetic disturbance detection and rejection. Different sampling rates for gyroscopes, accelerometers, and
magnetometers are supported as well. While in most cases, the defaults will be reasonable, the algorithm can be
influenced via a number of tuning parameters.
To use this class for online (sample-by-sample) processing,
1. create a instance of the class and provide the sampling time and, optionally, parameters
2. for every sample, call one of the update functions to feed the algorithm with IMU data
3. access the estimation results with :meth:`getQuat6D`, :meth:`getQuat9D` and the other getter methods.
If the full data is available in numpy arrays, you can use :meth:`updateBatch`.
This class is a pure Python implementation of the algorithm that only depends on the Python standard library and
`numpy <https://numpy.org/>`_. Note that the wrapper :class:`vqf.VQF` for the C++ implementation :cpp:class:`VQF`
is much faster than this pure Python implementation. Depending on use case and programming language of choice,
the following alternatives might be useful:
+------------------------+---------------------------+--------------------------+---------------------------+
| | Full Version | Basic Version | Offline Version |
| | | | |
+========================+===========================+==========================+===========================+
| **C++** | :cpp:class:`VQF` | :cpp:class:`BasicVQF` | :cpp:func:`offlineVQF` |
+------------------------+---------------------------+--------------------------+---------------------------+
| **Python/C++ (fast)** | :py:class:`vqf.VQF` | :py:class:`vqf.BasicVQF` | :py:meth:`vqf.offlineVQF` |
+------------------------+---------------------------+--------------------------+---------------------------+
| **Pure Python (slow)** | **vqf.PyVQ (this class)** | -- | -- |
+------------------------+---------------------------+--------------------------+---------------------------+
| **Pure Matlab (slow)** | :mat:class:`VQF.m <VQF>` | -- | -- |
+------------------------+---------------------------+--------------------------+---------------------------+
In the most common case (using the default parameters and all data being sampled with the same frequency, create the
class like this:
.. code-block::
from vqf import PyVQF
vqf = PyVQF(0.01) # 0.01 s sampling time, i.e. 100 Hz
Example code to create an object with magnetic disturbance rejection disabled (use the `**-operator
<https://docs.python.org/3/tutorial/controlflow.html#unpacking-argument-lists>`_ to pass parameters from a dict):
.. code-block::
from vqf import PyVQF
vqf = PyVQF(0.01, magDistRejectionEnabled=False) # 0.01 s sampling time, i.e. 100 Hz
To use this class as a replacement for the basic version BasicVQF, pass the following parameters:
.. code-block::
from vqf import PyVQF
vqf = PyVQF(0.01, motionBiasEstEnabled=False, restBiasEstEnabled=False, magDistRejectionEnabled=False)
See :cpp:struct:`VQFParams` for a detailed description of all parameters.
"""
def __init__(self, gyrTs, accTs=-1.0, magTs=-1.0, **params):
"""
:param gyrTs: sampling time of the gyroscope measurements in seconds
:param accTs: sampling time of the accelerometer measurements in seconds
(the value of `gyrTs` is used if set to -1)
:param magTs: sampling time of the magnetometer measurements in seconds
(the value of `gyrTs` is used if set to -1)
:param (params): optional parameters to override the defaults
(see :cpp:struct:`VQFParams` for a full list and detailed descriptions)
"""
accTs = accTs if accTs > 0 else gyrTs
magTs = magTs if magTs > 0 else gyrTs
self._params = PyVQFParams(**params)
self._state = PyVQFState()
self._coeffs = PyVQFCoefficients(gyrTs=gyrTs, accTs=accTs, magTs=magTs)
self._setup()
def updateGyr(self, gyr):
"""Performs gyroscope update step.
It is only necessary to call this function directly if gyroscope, accelerometers and magnetometers have
different sampling rates. Otherwise, simply use :meth:`update`.
:param gyr: gyroscope measurement in rad/s -- numpy array with shape (3,)
:return: None
"""
assert gyr.shape == (3,)
# rest detection
if self._params.restBiasEstEnabled or self._params.magDistRejectionEnabled:
gyrLp = self.filterVec(gyr, self._params.restFilterTau, self._coeffs.gyrTs, self._coeffs.restGyrLpB,
self._coeffs.restGyrLpA, self._state.restGyrLpState)
deviation = gyr - gyrLp
squaredDeviation = deviation.dot(deviation)
biasClip = self._params.biasClip*np.pi/180.0
if squaredDeviation >= (self._params.restThGyr*np.pi/180.0)**2 or np.max(np.abs(gyrLp)) > biasClip:
self._state.restT = 0.0
self._state.restDetected = False
self._state.restLastGyrLp = gyrLp
self._state.restLastSquaredDeviations[0] = squaredDeviation
# remove estimated gyro bias
gyrNoBias = gyr - self._state.bias
# gyroscope prediction step
gyrNorm = math.sqrt(gyrNoBias.dot(gyrNoBias))
angle = gyrNorm * self._coeffs.gyrTs
if gyrNorm > EPS:
c = np.cos(angle/2)
s = np.sin(angle/2)/gyrNorm
gyrStepQuat = np.array([c, s*gyrNoBias[0], s*gyrNoBias[1], s*gyrNoBias[2]], float)
self._state.gyrQuat = self.quatMultiply(self._state.gyrQuat, gyrStepQuat)
self.normalize(self._state.gyrQuat)
def updateAcc(self, acc):
"""Performs accelerometer update step.
It is only necessary to call this function directly if gyroscope, accelerometers and magnetometers have
different sampling rates. Otherwise, simply use :meth:`update`.
Should be called after :meth:`updateGyr` and before :meth:`updateMag`.
:param acc: accelerometer measurement in m/s² -- numpy array with shape (3,)
:return: None
"""
assert acc.shape == (3,)
# ignore [0 0 0] samples
if acc[0] == 0.0 and acc[1] == 0.0 and acc[2] == 0.0:
return
accTs = self._coeffs.accTs
# rest detection
if self._params.restBiasEstEnabled:
accLp = self.filterVec(acc, self._params.restFilterTau, accTs, self._coeffs.restAccLpB,
self._coeffs.restAccLpA, self._state.restAccLpState)
deviation = acc - accLp
squaredDeviation = deviation.dot(deviation)
if squaredDeviation >= self._params.restThAcc**2:
self._state.restT = 0.0
self._state.restDetected = False
else:
self._state.restT += accTs
if self._state.restT >= self._params.restMinT:
self._state.restDetected = True
self._state.restLastAccLp = accLp
self._state.restLastSquaredDeviations[1] = squaredDeviation
# filter acc in inertial frame
accEarth = self.quatRotate(self._state.gyrQuat, acc)
self._state.lastAccLp = self.filterVec(accEarth, self._params.tauAcc, accTs,
self._coeffs.accLpB, self._coeffs.accLpA, self._state.accLpState)
# transform to 6D earth frame and normalize
accEarth = self.quatRotate(self._state.accQuat, self._state.lastAccLp)
# a = self._state.accQuat
# b = self.state.lastAccLp
# accEarth = self.quatRotate(a, b)
self.normalize(accEarth)
# inclination correction
q_w = math.sqrt((accEarth[2]+1)/2)
if q_w > 1e-6:
accCorrQuat = np.array([q_w, 0.5*accEarth[1]/q_w, -0.5*accEarth[0]/q_w, 0], float)
else:
accCorrQuat = np.array([0, 1, 0, 0], float)
self._state.accQuat = self.quatMultiply(accCorrQuat, self._state.accQuat)
self.normalize(self._state.accQuat)
# calculate correction angular rate to facilitate debugging
self._state.lastAccCorrAngularRate = math.acos(accEarth[2])/self._coeffs.accTs
# bias estimation
if self._params.motionBiasEstEnabled or self._params.restBiasEstEnabled:
biasClip = self._params.biasClip*np.pi/180.0
bias = self._state.bias
# get rotation matrix corresponding to accGyrQuat
accGyrQuat = self.getQuat6D()
R = np.array([
1 - 2*accGyrQuat[2]**2 - 2*accGyrQuat[3]**2, # r11
2*(accGyrQuat[2]*accGyrQuat[1] - accGyrQuat[0]*accGyrQuat[3]), # r12
2*(accGyrQuat[0]*accGyrQuat[2] + accGyrQuat[3]*accGyrQuat[1]), # r13
2*(accGyrQuat[0]*accGyrQuat[3] + accGyrQuat[2]*accGyrQuat[1]), # r21
1 - 2*accGyrQuat[1]**2 - 2*accGyrQuat[3]**2, # r22
2*(accGyrQuat[2]*accGyrQuat[3] - accGyrQuat[1]*accGyrQuat[0]), # r23
2*(accGyrQuat[3]*accGyrQuat[1] - accGyrQuat[0]*accGyrQuat[2]), # r31
2*(accGyrQuat[0]*accGyrQuat[1] + accGyrQuat[3]*accGyrQuat[2]), # r32
1 - 2*accGyrQuat[1]**2 - 2*accGyrQuat[2]**2, # r33
], float)
# calculate R*b_hat (only the x and y component, as z is not needed)
biasLp = np.array([
R[0]*bias[0] + R[1]*bias[1] + R[2]*bias[2],
R[3]*bias[0] + R[4]*bias[1] + R[5]*bias[2],
], float)
# low-pass filter R and R*b_hat
R = self.filterVec(R, self._params.tauAcc, accTs, self._coeffs.accLpB, self._coeffs.accLpA,
self._state.motionBiasEstRLpState)
biasLp = self.filterVec(biasLp, self._params.tauAcc, accTs, self._coeffs.accLpB,
self._coeffs.accLpA, self._state.motionBiasEstBiasLpState)
# set measurement error and covariance for the respective Kalman filter update
if self._state.restDetected and self._params.restBiasEstEnabled:
e = self._state.restLastGyrLp - bias
R = np.eye(3)
w = np.full(3, self._coeffs.biasRestW)
elif self._params.motionBiasEstEnabled:
e = np.array([
-accEarth[1]/accTs + biasLp[0] - R[0]*bias[0] - R[1]*bias[1] - R[2]*bias[2],
accEarth[0]/accTs + biasLp[1] - R[3]*bias[0] - R[4]*bias[1] - R[5]*bias[2],
- R[6]*bias[0] - R[7]*bias[1] - R[8]*bias[2],
], float)
R.shape = (3, 3)
w = np.array([self._coeffs.biasMotionW, self._coeffs.biasMotionW, self._coeffs.biasVerticalW], float)
else:
w = None
e = None
# Kalman filter update
# step 1: P = P + V (also increase covariance if there is no measurement update!)
if self._state.biasP[0, 0] < self._coeffs.biasP0:
self._state.biasP[0, 0] += self._coeffs.biasV
if self._state.biasP[1, 1] < self._coeffs.biasP0:
self._state.biasP[1, 1] += self._coeffs.biasV
if self._state.biasP[2, 2] < self._coeffs.biasP0:
self._state.biasP[2, 2] += self._coeffs.biasV
if w is not None:
# clip disagreement to -2..2 °/s
# (this also effectively limits the harm done by the first inclination correction step)
e = np.clip(e, -biasClip, biasClip)
# step 2: K = P R^T inv(W + R P R^T)
K = self._state.biasP @ R.T @ np.linalg.inv(np.diag(w) + R @ self._state.biasP @ R.T)
# step 3: bias = bias + K (y - R bias) = bias + K e
bias += K @ e
# step 4: P = P - K R P
self._state.biasP -= K @ R @ self._state.biasP
# clip bias estimate to -2..2 °/s
bias[:] = np.clip(bias, -biasClip, biasClip)
def updateMag(self, mag):
"""Performs magnetometer update step.
It is only necessary to call this function directly if gyroscope, accelerometers and magnetometers have
different sampling rates. Otherwise, simply use :meth:`update`.
Should be called after :meth:`updateAcc`.
:param mag: magnetometer measurement in arbitrary units -- numpy array with shape (3,)
:return: None
"""
assert mag.shape == (3,)
# ignore [0 0 0] samples
if mag[0] == 0.0 and mag[1] == 0.0 and mag[2] == 0.0:
return
magTs = self._coeffs.magTs
# rest detection
if self._params.restBiasEstEnabled:
magLp = self.filterVec(mag, self._params.restFilterTau, magTs, self._coeffs.restMagLpB,
self._coeffs.restMagLpA, self._state.restMagLpState)
deviation = mag - magLp
squaredDeviation = deviation.dot(deviation)
magNormSquared = magLp.dot(magLp)
if squaredDeviation >= self._params.restThMag**2*magNormSquared:
self._state.restT = 0.0
self._state.restDetected = False
self._state.restLastMagLp = magLp
self._state.restLastSquaredDeviations[2] = squaredDeviation
# bring magnetometer measurement into 6D earth frame
magEarth = self.quatRotate(self.getQuat6D(), mag)
if self._params.magDistRejectionEnabled:
magNormDip = self._state.magNormDip
magNormDip[0] = math.sqrt(magEarth.dot(magEarth))
magNormDip[1] = -math.asin(magEarth[2]/magNormDip[0])
if self._params.magCurrentTau > 0:
magNormDip[:] = self.filterVec(magNormDip, self._params.magCurrentTau, magTs,
self._coeffs.magNormDipLpB, self._coeffs.magNormDipLpA,
self._state.magNormDipLpState)
# magnetic disturbance detection
if abs(magNormDip[0] - self._state.magRefNorm) < self._params.magNormTh*self._state.magRefNorm and \
abs(magNormDip[1] - self._state.magRefDip) < self._params.magDipTh*np.pi/180.0:
self._state.magUndisturbedT += magTs
if self._state.magUndisturbedT >= self._params.magMinUndisturbedTime:
self._state.magDistDetected = False
self._state.magRefNorm += self._coeffs.kMagRef*(magNormDip[0] - self._state.magRefNorm)
self._state.magRefDip += self._coeffs.kMagRef*(magNormDip[1] - self._state.magRefDip)
else:
self._state.magUndisturbedT = 0.0
self._state.magDistDetected = True
# new magnetic field acceptance
if abs(magNormDip[0] - self._state.magCandidateNorm) < self._params.magNormTh*self._state.magCandidateNorm \
and abs(magNormDip[1] - self._state.magCandidateDip) < self._params.magDipTh*np.pi/180.0:
gyrNorm = math.sqrt(self._state.restLastGyrLp.dot(self._state.restLastGyrLp))
if gyrNorm >= self._params.magNewMinGyr*np.pi/180.0:
self._state.magCandidateT += magTs
self._state.magCandidateNorm += self._coeffs.kMagRef*(magNormDip[0] - self._state.magCandidateNorm)
self._state.magCandidateDip += self._coeffs.kMagRef*(magNormDip[1] - self._state.magCandidateDip)
if self._state.magDistDetected and (self._state.magCandidateT >= self._params.magNewTime or (
self._state.magRefNorm == 0.0 and self._state.magCandidateT >= self._params.magNewFirstTime)):
self._state.magRefNorm = self._state.magCandidateNorm
self._state.magRefDip = self._state.magCandidateDip
self._state.magDistDetected = False
self._state.magUndisturbedT = self._params.magMinUndisturbedTime
else:
self._state.magCandidateT = 0.0
self._state.magCandidateNorm = magNormDip[0]
self._state.magCandidateDip = magNormDip[1]
# calculate disagreement angle based on current magnetometer measurement
self._state.lastMagDisAngle = math.atan2(magEarth[0], magEarth[1]) - self._state.delta
# make sure the disagreement angle is in the range [-pi, pi]
if self._state.lastMagDisAngle > np.pi:
self._state.lastMagDisAngle -= 2*np.pi
elif self._state.lastMagDisAngle < -np.pi:
self._state.lastMagDisAngle += 2*np.pi
k = self._coeffs.kMag
if self._params.magDistRejectionEnabled:
# magnetic disturbance rejection
if self._state.magDistDetected:
if self._state.magRejectT <= self._params.magMaxRejectionTime:
self._state.magRejectT += magTs
k = 0
else:
k /= self._params.magRejectionFactor
else:
self._state.magRejectT = max(self._state.magRejectT - self._params.magRejectionFactor*magTs, 0.0)
# ensure fast initial convergence
if self._state.kMagInit != 0.0:
# make sure that the gain k is at least 1/N, N=1,2,3,... in the first few samples
if k < self._state.kMagInit:
k = self._state.kMagInit
# iterative expression to calculate 1/N
self._state.kMagInit = self._state.kMagInit/(self._state.kMagInit+1)
# disable if t > tauMag
if self._state.kMagInit*self._params.tauMag < self._coeffs.magTs:
self._state.kMagInit = 0.0
# first-order filter step
self._state.delta += k*self._state.lastMagDisAngle
# calculate correction angular rate to facilitate debugging
self._state.lastMagCorrAngularRate = k*self._state.lastMagDisAngle/self._coeffs.magTs
# make sure delta is in the range [-pi, pi]
if self._state.delta > np.pi:
self._state.delta -= 2*np.pi
elif self._state.delta < -np.pi:
self._state.delta += 2*np.pi
def update(self, gyr, acc, mag=None):
"""Performs filter update step for one sample.
:param gyr: gyr gyroscope measurement in rad/s -- numpy array with shape (3,)
:param acc: acc accelerometer measurement in m/s² -- numpy array with shape (3,)
:param mag: optional mag magnetometer measurement in arbitrary units -- numpy array with shape (3,)
:return: None
"""
self.updateGyr(gyr)
self.updateAcc(acc)
if mag is not None:
self.updateMag(mag)
def updateBatch(self, gyr, acc, mag=None):
"""Performs batch update for multiple samples at once.
In order to use this function, all input data must have the same sampling rate and be provided as a
numpy array. The output is a dictionary containing
- **quat6D** -- the 6D quaternion -- numpy array with shape (N, 4)
- **bias** -- gyroscope bias estimate in rad/s -- numpy array with shape (N, 3)
- **biasSigma** -- uncertainty of gyroscope bias estimate in rad/s -- numpy array with shape (N,)
- **restDetected** -- rest detection state -- boolean numpy array with shape (N,)
in all cases and if magnetometer data is provided additionally
- **quat9D** -- the 9D quaternion -- numpy array with shape (N, 4)
- **delta** -- heading difference angle between 6D and 9D quaternion in rad -- numpy array with shape (N,)
- **magDistDetected** -- magnetic disturbance detection state -- boolean numpy array with shape (N,)
:param gyr: gyroscope measurement in rad/s -- numpy array with shape (N,3)
:param acc: accelerometer measurement in m/s² -- numpy array with shape (N,3)
:param mag: optional magnetometer measurement in arbitrary units -- numpy array with shape (N,3)
:return: dict with entries as described above
"""
N = gyr.shape[0]
assert acc.shape == gyr.shape
assert gyr.shape == (N, 3)
out6D = np.empty((N, 4))
outBias = np.empty((N, 3))
outBiasSigma = np.empty((N,))
outRest = np.empty(N, dtype=bool)
if mag is not None:
assert mag.shape == gyr.shape
out9D = np.empty((N, 4))
outDelta = np.empty((N,))
outMagDist = np.empty(N, dtype=bool)
for i in range(N):
self.update(gyr[i], acc[i], mag[i])
out6D[i] = self.getQuat6D()
out9D[i] = self.getQuat9D()
outDelta[i] = self._state.delta
outBias[i], outBiasSigma[i] = self.getBiasEstimate()
outRest[i] = self._state.restDetected
outMagDist[i] = self._state.magDistDetected
return dict(quat6D=out6D, quat9D=out9D, delta=outDelta, bias=outBias, biasSigma=outBiasSigma,
restDetected=outRest, magDistDetected=outMagDist)
else:
for i in range(N):
self.update(gyr[i], acc[i])
out6D[i] = self.getQuat6D()
outBias[i], outBiasSigma[i] = self.getBiasEstimate()
outRest[i] = self._state.restDetected
return dict(quat6D=out6D, bias=outBias, biasSigma=outBiasSigma, restDetected=outRest)
def getQuat3D(self):
r"""Returns the angular velocity strapdown integration quaternion
:math:`^{\mathcal{S}_i}_{\mathcal{I}_i}\mathbf{q}`.
:return: quaternion as numpy array with shape (4,)
"""
return self._state.gyrQuat.copy()
def getQuat6D(self):
r"""Returns the 6D (magnetometer-free) orientation quaternion
:math:`^{\mathcal{S}_i}_{\mathcal{E}_i}\mathbf{q}`.
:return: quaternion as numpy array with shape (4,)
"""
return self.quatMultiply(self._state.accQuat, self._state.gyrQuat)
def getQuat9D(self):
r"""Returns the 9D (with magnetometers) orientation quaternion
:math:`^{\mathcal{S}_i}_{\mathcal{E}}\mathbf{q}`.
:return: quaternion as numpy array with shape (4,)
"""
return self.quatApplyDelta(self.quatMultiply(self._state.accQuat, self._state.gyrQuat), self._state.delta)
def getDelta(self):
r""" Returns the heading difference :math:`\delta` between :math:`\mathcal{E}_i` and :math:`\mathcal{E}`.
:math:`^{\mathcal{E}_i}_{\mathcal{E}}\mathbf{q} = \begin{bmatrix}\cos\frac{\delta}{2} & 0 & 0 &
\sin\frac{\delta}{2}\end{bmatrix}^T`.
:return: delta angle in rad (:cpp:member:`VQFState::delta`)
"""
return self._state.delta
def getBiasEstimate(self):
"""Returns the current gyroscope bias estimate and the uncertainty.
The returned standard deviation sigma represents the estimation uncertainty in the worst direction and is based
on an upper bound of the largest eigenvalue of the covariance matrix.
:return: gyroscope bias estimate (rad/s) as (3,) numpy array and standard deviation sigma of the estimation
uncertainty (rad/s)
"""
# use largest absolute row sum as upper bound estimate for largest eigenvalue (Gershgorin circle theorem)
# and clip output to biasSigmaInit
P = min(np.max(np.sum(np.abs(self._state.biasP), axis=1)), self._coeffs.biasP0)
sigma = np.sqrt(P)*np.pi/100.0/180.0
return self._state.bias.copy(), sigma
def setBiasEstimate(self, bias, sigma):
"""Sets the current gyroscope bias estimate and the uncertainty.
If a value for the uncertainty sigma is given, the covariance matrix is set to a corresponding scaled identity
matrix.
:param bias: gyroscope bias estimate (rad/s)
:param sigma: standard deviation of the estimation uncertainty (rad/s) - set to -1 (default) in order to not
change the estimation covariance matrix
"""
assert bias.shape == (3,)
self._state.bias[:] = bias
if sigma > 0:
self._state.biasP = (sigma*180.0*100.0/np.pi)**2 * np.eye(3)
def getRestDetected(self):
"""Returns true if rest was detected."""
return self._state.restDetected
def getMagDistDetected(self):
"""Returns true if a disturbed magnetic field was detected."""
return self._state.magDistDetected
def getRelativeRestDeviations(self):
"""Returns the relative deviations used in rest detection.
Looking at those values can be useful to understand how rest detection is working and which thresholds are
suitable. The output array is filled with the last values for gyroscope, accelerometer, and magnetometer,
relative to the threshold. In order for rest to be detected, all values must stay below 1.
:return: relative rest deviations as (3,) numpy array
"""
magNormSquared = self._state.restLastMagLp.dot(self._state.restLastMagLp)
return np.array([
np.sqrt(self._state.restLastSquaredDeviations[0]) / (self._params.restThGyr * np.pi / 180.0),
np.sqrt(self._state.restLastSquaredDeviations[1]) / self._params.restThAcc,
np.sqrt(self._state.restLastSquaredDeviations[2] / magNormSquared) / self._params.restThMag,
], float)
def getMagRefNorm(self):
"""Returns the norm of the currently accepted magnetic field reference."""
return self._state.magRefNorm
def getMagRefDip(self):
"""Returns the dip angle of the currently accepted magnetic field reference."""
return self._state.magRefDip
def setMagRef(self, norm, dip):
"""Overwrites the current magnetic field reference.
:param norm: norm of the magnetic field reference
:param dip: dip angle of the magnetic field reference
"""
self._state.magRefNorm = norm
self._state.magRefDip = dip
def setTauAcc(self, tauAcc):
r"""Sets the time constant for accelerometer low-pass filtering.
For more details, see :cpp:member:`VQFParams::tauAcc`.
:param tauAcc: time constant :math:`\tau_\mathrm{acc}` in seconds
"""
if self._params.tauAcc == tauAcc:
return
self._params.tauAcc = tauAcc
newB, newA = self.filterCoeffs(self._params.tauAcc, self._coeffs.accTs)
self.filterAdaptStateForCoeffChange(self._state.lastAccLp, self._coeffs.accLpB, self._coeffs.accLpA,
newB, newA, self._state.accLpState)
# For R and biasLP, the last value is not saved in the state.
# Since b0 is small (at reasonable settings), the last output is close to state[0].
self.filterAdaptStateForCoeffChange(self._state.motionBiasEstRLpState[0].copy(), self._coeffs.accLpB,
self._coeffs.accLpA, newB, newA, self._state.motionBiasEstRLpState)
self.filterAdaptStateForCoeffChange(self._state.motionBiasEstBiasLpState[0].copy(), self._coeffs.accLpB,
self._coeffs.accLpA, newB, newA, self._state.motionBiasEstBiasLpState)
self._coeffs.accLpB = newB
self._coeffs.accLpA = newA
def setTauMag(self, tauMag):
r"""Sets the time constant for the magnetometer update.
For more details, see :cpp:member:`VQFParams::tauMag`.
:param tauMag: time constant :math:`\tau_\mathrm{mag}` in seconds
"""
self._params.tauMag = tauMag
self._coeffs.kMag = self.gainFromTau(self._params.tauMag, self._coeffs.magTs)
def setMotionBiasEstEnabled(self, enabled):
"""Enables/disabled gyroscope bias estimation during motion."""
if self._params.motionBiasEstEnabled == enabled:
return
self._params.motionBiasEstEnabled = enabled
self._state.motionBiasEstRLpState = np.full((2, 9), np.nan, float)
self._state.motionBiasEstBiasLpState = np.full((2, 2), np.nan, float)
def setRestBiasEstEnabled(self, enabled):
"""Enables/disables rest detection and bias estimation during rest."""
if self._params.restBiasEstEnabled == enabled:
return
self._params.restBiasEstEnabled = enabled
self._state.restDetected = False
self._state.restLastSquaredDeviations = np.zeros(3, float)
self._state.restT = 0.0
self._state.restLastGyrLp = np.zeros(3, float)
self._state.restGyrLpState = np.full((2, 3), np.nan, float)
self._state.restLastAccLp = np.zeros(3, float)
self._state.restAccLpState = np.full((2, 3), np.nan, float)
self._state.restLastMagLp = np.zeros(3, float)
self._state.restMagLpState = np.full((2, 3), np.nan, float)
def setMagDistRejectionEnabled(self, enabled):
"""Enables/disables magnetic disturbance detection and rejection."""
if self._params.magDistRejectionEnabled == enabled:
return
self._params.magDistRejectionEnabled = enabled
self._state.magDistDetected = True
self._state.magRefNorm = 0.0
self._state.magRefDip = 0.0
self._state.magUndisturbedT = 0.0
self._state.magRejectT = self._params.magMaxRejectionTime
self._state.magCandidateNorm = -1.0
self._state.magCandidateDip = 0.0
self._state.magCandidateT = 0.0
self._state.magNormDip = np.zeros(2, float)
self._state.magNormDipLpState = np.full((2, 2), np.nan, float)
def setRestDetectionThresholds(self, thGyr, thAcc, thMag):
"""Sets the current thresholds for rest detection.
:param thGyr: new value for :cpp:member:`VQFParams::restThGyr`
:param thAcc: new value for :cpp:member:`VQFParams::restThAcc`
:param thMag: new value for :cpp:member:`VQFParams::restThMag`
"""
self._params.restThGyr = thGyr
self._params.restThAcc = thAcc
self._params.restThMag = thMag
@property
def params(self):
"""Read-only property to access the current parameters.
:return: dict with entries corresponding to :cpp:struct:`VQFParams`
"""
return copy.deepcopy(self._params)
@property
def coeffs(self):
"""Read-only property to access the coefficients used by the algorithm.
:return: dict with entries corresponding to :cpp:struct:`VQFCoefficients`
"""
return copy.deepcopy(self._coeffs)
@property
def state(self):
"""Property to access the current state.
This property can be written to in order to set a completely arbitrary filter state, which is intended for
debugging purposes. However, note that the returned dict is a copy of the state and changing elements of this
dict will not influence the actual state. In order to modify the state, access the state, change some elements
and then replace the whole state with the modified copy, e.g.
.. code-block::
# does not work: v.state['delta'] = 0
state = vqf.state
state['delta'] = 0
vqf.state = state
:return: dict with entries corresponding to :cpp:struct:`VQFState`
"""
return dataclasses.asdict(self._state)
@state.setter
def state(self, state):
assert state.keys() == {f.name for f in dataclasses.fields(PyVQFState)}
for k in state:
assert isinstance(state[k], type(getattr(self._state, k)))
if isinstance(state[k], np.ndarray):
assert state[k].dtype == getattr(self._state, k).dtype
assert state[k].shape == getattr(self._state, k).shape
self._state = PyVQFState(**copy.deepcopy(state))
def resetState(self):
"""Resets the state to the default values at initialization.
Resetting the state is equivalent to creating a new instance of this class.
"""
self._state = PyVQFState()
self._state.biasP = self._coeffs.biasP0*np.eye(3)
self._state.magRejectT = self._params.magMaxRejectionTime
@staticmethod
def quatMultiply(q1, q2):
r"""Performs quaternion multiplication (:math:`\mathbf{q}_\mathrm{out} = \mathbf{q}_1 \otimes \mathbf{q}_2`).
:param q1: input quaternion 1 -- numpy array with shape (4,)
:param q2: input quaternion 2 -- numpy array with shape (4,)
:return: output quaternion -- numpy array with shape (4,)
"""
assert q1.shape == (4,)
assert q2.shape == (4,)
q10, q11, q12, q13 = q1.tolist()
q20, q21, q22, q23 = q2.tolist()
w = q10 * q20 - q11 * q21 - q12 * q22 - q13 * q23
x = q10 * q21 + q11 * q20 + q12 * q23 - q13 * q22
y = q10 * q22 - q11 * q23 + q12 * q20 + q13 * q21
z = q10 * q23 + q11 * q22 - q12 * q21 + q13 * q20
return np.array([w, x, y, z], float)
@staticmethod
def quatConj(q):
r"""Calculates the quaternion conjugate (:math:`\mathbf{q}_\mathrm{out} = \mathbf{q}^*`).
:param q: input quaternion -- numpy array with shape (4,)
:return: output quaternion -- numpy array with shape (4,)
"""
assert q.shape == (4,)
return np.array([q[0], -q[1], -q[2], -q[3]], float)
@staticmethod
def quatApplyDelta(q, delta):
r""" Applies a heading rotation by the angle delta (in rad) to a quaternion.
:math:`\mathbf{q}_\mathrm{out} = \begin{bmatrix}\cos\frac{\delta}{2} & 0 & 0 &
\sin\frac{\delta}{2}\end{bmatrix} \otimes \mathbf{q}`
:param q: input quaternion -- numpy array with shape (4,)
:param delta: heading rotation angle in rad
:return: output quaternion -- numpy array with shape (4,)
"""
assert q.shape == (4,)
c = np.cos(delta/2)
s =
|
np.sin(delta/2)
|
numpy.sin
|
import numpy as np
import pandas as pd
def Outputs(data):
return np.round(1.-(1./(1.+np.exp(-data))))
def GeneticFunction(data):
return ((np.minimum( ((((0.058823499828577 + data["Sex"]) - np.cos((data["Pclass"] / 2.0))) * 2.0)), ((0.885868))) * 2.0) +
np.maximum( ((data["SibSp"] - 2.409090042114258)), ( -(np.minimum( (data["Sex"]), (np.sin(data["Parch"]))) * data["Pclass"]))) +
(0.138462007045746 * ((np.minimum( (data["Sex"]), (((data["Parch"] / 2.0) / 2.0))) * data["Age"]) - data["Cabin"])) +
np.minimum( ((np.sin((data["Parch"] * ((data["Fare"] - 0.720430016517639) * 2.0))) * 2.0)), ((data["SibSp"] / 2.0))) +
np.maximum( (np.minimum( ( -np.cos(data["Embarked"])), (0.138462007045746))), (np.sin(((data["Cabin"] - data["Fare"]) * 2.0)))) +
-np.minimum( ((((data["Age"] * data["Parch"]) * data["Embarked"]) + data["Parch"])), (np.sin(data["Pclass"]))) +
np.minimum( (data["Sex"]), ((np.sin( -(data["Fare"] * np.cos((data["Fare"] * 1.630429983139038)))) / 2.0))) +
np.minimum( ((0.230145)), (np.sin(np.minimum( (((67.0 / 2.0) * np.sin(data["Fare"]))), (0.31830988618379069))))) +
np.sin((np.sin(data["Cabin"]) * (np.sin((12.6275)) * np.maximum( (data["Age"]), (data["Fare"]))))) +
np.sin(((np.minimum( (data["Fare"]), ((data["Cabin"] * data["Embarked"]))) / 2.0) * -data["Fare"])) +
np.minimum( (((2.675679922103882 * data["SibSp"]) * np.sin(((96) * np.sin(data["Cabin"]))))), (data["Parch"])) +
np.sin(np.sin((np.maximum( (np.minimum( (data["Age"]), (data["Cabin"]))), ((data["Fare"] * 0.31830988618379069))) * data["Cabin"]))) +
np.maximum( (np.sin(((12.4148) * (data["Age"] / 2.0)))), (np.sin((-3.0 * data["Cabin"])))) +
(np.minimum( (np.sin((((np.sin(((data["Fare"] * 2.0) * 2.0)) * 2.0) * 2.0) * 2.0))), (data["SibSp"])) / 2.0) +
((data["Sex"] - data["SibSp"]) * (np.cos(((data["Embarked"] - 0.730768978595734) + data["Age"])) / 2.0)) +
((np.sin(data["Cabin"]) / 2.0) - (np.cos(np.minimum( (data["Age"]), (data["Embarked"]))) * np.sin(data["Embarked"]))) +
np.minimum( (0.31830988618379069), ((data["Sex"] * (2.212120056152344 * (0.720430016517639 - np.sin((data["Age"] * 2.0))))))) +
(np.minimum( (np.cos(data["Fare"])), (np.maximum( (np.sin(data["Age"])), (data["Parch"])))) * np.cos((data["Fare"] / 2.0))) +
np.sin((data["Parch"] * np.minimum( ((data["Age"] - 1.5707963267948966)), ((np.cos((data["Pclass"] * 2.0)) / 2.0))))) +
(data["Parch"] * (np.sin(((data["Fare"] * (0.623655974864960 * data["Age"])) * 2.0)) / 2.0)) +
(0.31830988618379069 * np.cos(np.maximum( ((0.602940976619720 * data["Fare"])), ((np.sin(0.720430016517639) * data["Age"]))))) +
(np.minimum( ((data["SibSp"] / 2.0)), (np.sin(((data["Pclass"] - data["Fare"]) * data["SibSp"])))) * data["SibSp"]) +
np.tanh((data["Sex"] * np.sin((5.199999809265137 * np.sin((data["Cabin"] * np.cos(data["Fare"]))))))) +
(np.minimum( (data["Parch"]), (data["Sex"])) * np.cos(np.maximum( ((np.cos(data["Parch"]) + data["Age"])), (3.1415926535897931)))) +
(np.minimum( (np.tanh(((data["Cabin"] / 2.0) + data["Parch"]))), ((data["Sex"] + np.cos(data["Age"])))) / 2.0) +
(np.sin((np.sin(data["Sex"]) * (np.sin((data["Age"] * data["Pclass"])) * data["Pclass"]))) / 2.0) +
(data["Sex"] * (np.cos(((data["Sex"] + data["Fare"]) * ((8.48635) * (63)))) / 2.0)) +
np.minimum( (data["Sex"]), ((np.cos((data["Age"] * np.tanh(np.sin(np.cos(data["Fare"]))))) / 2.0))) +
(np.tanh(np.tanh( -np.cos((np.maximum( (np.cos(data["Fare"])), (0.094339601695538)) * data["Age"])))) / 2.0) +
(np.tanh(np.cos((np.cos(data["Age"]) + (data["Age"] + np.minimum( (data["Fare"]), (data["Age"])))))) / 2.0) +
(np.tanh(np.cos((data["Age"] * ((-2.0 + np.sin(data["SibSp"])) + data["Fare"])))) / 2.0) +
(np.minimum( (((281) - data["Fare"])), (np.sin((np.maximum( ((176)), (data["Fare"])) * data["SibSp"])))) * 2.0) +
np.sin(((np.maximum( (data["Embarked"]), (data["Age"])) * 2.0) * (((785) * 3.1415926535897931) * data["Age"]))) +
np.minimum( (data["Sex"]), (np.sin( -(np.minimum( ((data["Cabin"] / 2.0)), (data["SibSp"])) * (data["Fare"] / 2.0))))) +
np.sin(np.sin((data["Cabin"] * (data["Embarked"] + (np.tanh( -data["Age"]) + data["Fare"]))))) +
(np.cos(np.cos(data["Fare"])) * (np.sin((data["Embarked"] - ((734) * data["Fare"]))) / 2.0)) +
((np.minimum( (data["SibSp"]), (np.cos(data["Fare"]))) * np.cos(data["SibSp"])) * np.sin((data["Age"] / 2.0))) +
(np.sin((np.sin((data["SibSp"] * np.cos((data["Fare"] * 2.0)))) + (data["Cabin"] * 2.0))) / 2.0) +
(((data["Sex"] * data["SibSp"]) * np.sin(np.sin( -(data["Fare"] * data["Cabin"])))) * 2.0) +
(np.sin((data["SibSp"] * ((((5.428569793701172 + 67.0) * 2.0) / 2.0) * data["Age"]))) / 2.0) +
(data["Pclass"] * (np.sin(((data["Embarked"] * data["Cabin"]) * (data["Age"] - (1.07241)))) / 2.0)) +
(np.cos((((( -data["SibSp"] + data["Age"]) + data["Parch"]) * data["Embarked"]) / 2.0)) / 2.0) +
(0.31830988618379069 * np.sin(((data["Age"] * ((data["Embarked"] * np.sin(data["Fare"])) * 2.0)) * 2.0))) +
((np.minimum( ((data["Age"] * 0.058823499828577)), (data["Sex"])) - 0.63661977236758138) * np.tanh(np.sin(data["Pclass"]))) +
-np.minimum( ((np.cos(((727) * ((data["Fare"] + data["Parch"]) * 2.0))) / 2.0)), (data["Fare"])) +
(np.minimum( (np.cos(data["Fare"])), (data["SibSp"])) * np.minimum( (np.sin(data["Parch"])), (np.cos((data["Embarked"] * 2.0))))) +
(np.minimum( (((data["Fare"] / 2.0) - 2.675679922103882)), (0.138462007045746)) * np.sin((1.5707963267948966 * data["Age"]))) +
np.minimum( ((0.0821533)), (((
|
np.sin(data["Fare"])
|
numpy.sin
|
import dgl
import time
import numpy as np
from multiprocessing import Process
from scipy import sparse as spsp
import mxnet as mx
import backend as F
import unittest
num_nodes = 100
num_edges = int(num_nodes * num_nodes * 0.1)
def worker_func(worker_id):
time.sleep(3)
print("worker starts")
np.random.seed(0)
csr = (spsp.random(num_nodes, num_nodes, density=0.1, format='csr') != 0).astype(np.int64)
g = dgl.contrib.graph_store.create_graph_from_store("test_graph5", "shared_mem")
# Verify the graph structure loaded from the shared memory.
src, dst = g.all_edges()
coo = csr.tocoo()
assert F.array_equal(dst, F.tensor(coo.row))
assert F.array_equal(src, F.tensor(coo.col))
assert F.array_equal(g.ndata['feat'][0], F.tensor(np.arange(10), dtype=np.float32))
assert F.array_equal(g.edata['feat'][0], F.tensor(np.arange(10), dtype=np.float32))
g.ndata['test4'] = mx.nd.zeros((g.number_of_nodes(), 10))
g.edata['test4'] = mx.nd.zeros((g.number_of_edges(), 10))
if worker_id == 0:
time.sleep(3)
g.ndata['test4'][0] = 1
g.edata['test4'][0] = 2
else:
time.sleep(5)
assert np.all(g.ndata['test4'][0].asnumpy() == 1)
assert np.all(g.edata['test4'][0].asnumpy() == 2)
g.destroy()
def server_func(num_workers):
print("server starts")
|
np.random.seed(0)
|
numpy.random.seed
|
from __future__ import division, absolute_import, print_function
import sys
import warnings
from decimal import Decimal
import numpy as np
from numpy.testing import *
class TestEinSum(TestCase):
def test_einsum_errors(self):
# Need enough arguments
assert_raises(ValueError, np.einsum)
assert_raises(ValueError, np.einsum, "")
# subscripts must be a string
assert_raises(TypeError, np.einsum, 0, 0)
# out parameter must be an array
assert_raises(TypeError, np.einsum, "", 0, out='test')
# order parameter must be a valid order
assert_raises(TypeError, np.einsum, "", 0, order='W')
# casting parameter must be a valid casting
assert_raises(ValueError, np.einsum, "", 0, casting='blah')
# dtype parameter must be a valid dtype
assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type')
# other keyword arguments are rejected
assert_raises(TypeError, np.einsum, "", 0, bad_arg=0)
# issue 4528 revealed a segfault with this call
assert_raises(TypeError, np.einsum, *(None,)*63)
# number of operands must match count in subscripts string
assert_raises(ValueError, np.einsum, "", 0, 0)
assert_raises(ValueError, np.einsum, ",", 0, [0], [0])
assert_raises(ValueError, np.einsum, ",", [0])
# can't have more subscripts than dimensions in the operand
assert_raises(ValueError, np.einsum, "i", 0)
assert_raises(ValueError, np.einsum, "ij", [0, 0])
assert_raises(ValueError, np.einsum, "...i", 0)
assert_raises(ValueError, np.einsum, "i...j", [0, 0])
assert_raises(ValueError, np.einsum, "i...", 0)
assert_raises(ValueError, np.einsum, "ij...", [0, 0])
# invalid ellipsis
assert_raises(ValueError, np.einsum, "i..", [0, 0])
assert_raises(ValueError, np.einsum, ".i...", [0, 0])
assert_raises(ValueError, np.einsum, "j->..j", [0, 0])
assert_raises(ValueError, np.einsum, "j->.j...", [0, 0])
# invalid subscript character
assert_raises(ValueError, np.einsum, "i%...", [0, 0])
assert_raises(ValueError, np.einsum, "...j$", [0, 0])
assert_raises(ValueError, np.einsum, "i->&", [0, 0])
# output subscripts must appear in input
assert_raises(ValueError, np.einsum, "i->ij", [0, 0])
# output subscripts may only be specified once
assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]])
# dimensions much match when being collapsed
assert_raises(ValueError, np.einsum, "ii", np.arange(6).reshape(2, 3))
assert_raises(ValueError, np.einsum, "ii->i", np.arange(6).reshape(2, 3))
# broadcasting to new dimensions must be enabled explicitly
assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3))
assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]],
out=np.arange(4).reshape(2, 2))
def test_einsum_views(self):
# pass-through
a = np.arange(6)
a.shape = (2, 3)
b = np.einsum("...", a)
assert_(b.base is a)
b = np.einsum(a, [Ellipsis])
assert_(b.base is a)
b = np.einsum("ij", a)
assert_(b.base is a)
assert_equal(b, a)
b = np.einsum(a, [0, 1])
assert_(b.base is a)
assert_equal(b, a)
# transpose
a = np.arange(6)
a.shape = (2, 3)
b = np.einsum("ji", a)
assert_(b.base is a)
assert_equal(b, a.T)
b = np.einsum(a, [1, 0])
assert_(b.base is a)
assert_equal(b, a.T)
# diagonal
a = np.arange(9)
a.shape = (3, 3)
b = np.einsum("ii->i", a)
assert_(b.base is a)
assert_equal(b, [a[i, i] for i in range(3)])
b = np.einsum(a, [0, 0], [0])
assert_(b.base is a)
assert_equal(b, [a[i, i] for i in range(3)])
# diagonal with various ways of broadcasting an additional dimension
a = np.arange(27)
a.shape = (3, 3, 3)
b = np.einsum("...ii->...i", a)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0])
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
b = np.einsum("ii...->...i", a)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(2, 0, 1)])
b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0])
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(2, 0, 1)])
b = np.einsum("...ii->i...", a)
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis])
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum("jii->ij", a)
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum(a, [1, 0, 0], [0, 1])
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum("ii...->i...", a)
assert_(b.base is a)
assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis])
assert_(b.base is a)
assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
b = np.einsum("i...i->i...", a)
assert_(b.base is a)
assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis])
assert_(b.base is a)
assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
b = np.einsum("i...i->...i", a)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(1, 0, 2)])
b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0])
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(1, 0, 2)])
# triple diagonal
a = np.arange(27)
a.shape = (3, 3, 3)
b = np.einsum("iii->i", a)
assert_(b.base is a)
assert_equal(b, [a[i, i, i] for i in range(3)])
b = np.einsum(a, [0, 0, 0], [0])
assert_(b.base is a)
assert_equal(b, [a[i, i, i] for i in range(3)])
# swap axes
a = np.arange(24)
a.shape = (2, 3, 4)
b = np.einsum("ijk->jik", a)
assert_(b.base is a)
assert_equal(b, a.swapaxes(0, 1))
b = np.einsum(a, [0, 1, 2], [1, 0, 2])
assert_(b.base is a)
assert_equal(b, a.swapaxes(0, 1))
def check_einsum_sums(self, dtype):
# Check various sums. Does many sizes to exercise unrolled loops.
# sum(a, axis=-1)
for n in range(1, 17):
a = np.arange(n, dtype=dtype)
assert_equal(np.einsum("i->", a), np.sum(a, axis=-1).astype(dtype))
assert_equal(np.einsum(a, [0], []),
np.sum(a, axis=-1).astype(dtype))
for n in range(1, 17):
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
assert_equal(np.einsum("...i->...", a),
np.sum(a, axis=-1).astype(dtype))
assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis]),
np.sum(a, axis=-1).astype(dtype))
# sum(a, axis=0)
for n in range(1, 17):
a = np.arange(2*n, dtype=dtype).reshape(2, n)
assert_equal(np.einsum("i...->...", a),
np.sum(a, axis=0).astype(dtype))
assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis]),
np.sum(a, axis=0).astype(dtype))
for n in range(1, 17):
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
assert_equal(np.einsum("i...->...", a),
np.sum(a, axis=0).astype(dtype))
assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis]),
np.sum(a, axis=0).astype(dtype))
# trace(a)
for n in range(1, 17):
a = np.arange(n*n, dtype=dtype).reshape(n, n)
assert_equal(np.einsum("ii", a), np.trace(a).astype(dtype))
assert_equal(np.einsum(a, [0, 0]), np.trace(a).astype(dtype))
# multiply(a, b)
assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case
for n in range(1, 17):
a = np.arange(3*n, dtype=dtype).reshape(3, n)
b = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
assert_equal(np.einsum("..., ...", a, b), np.multiply(a, b))
assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis]),
np.multiply(a, b))
# inner(a,b)
for n in range(1, 17):
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("...i, ...i", a, b), np.inner(a, b))
assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0]),
np.inner(a, b))
for n in range(1, 11):
a = np.arange(n*3*2, dtype=dtype).reshape(n, 3, 2)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("i..., i...", a, b), np.inner(a.T, b.T).T)
assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis]),
np.inner(a.T, b.T).T)
# outer(a,b)
for n in range(1, 17):
a = np.arange(3, dtype=dtype)+1
b = np.arange(n, dtype=dtype)+1
assert_equal(np.einsum("i,j", a, b), np.outer(a, b))
assert_equal(np.einsum(a, [0], b, [1]), np.outer(a, b))
# Suppress the complex warnings for the 'as f8' tests
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# matvec(a,b) / a.dot(b) where a is matrix, b is vector
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("ij, j", a, b), np.dot(a, b))
assert_equal(np.einsum(a, [0, 1], b, [1]), np.dot(a, b))
c = np.arange(4, dtype=dtype)
np.einsum("ij,j", a, b, out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1], b, [1], out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("ji,j", a.T, b.T), np.dot(b.T, a.T))
assert_equal(np.einsum(a.T, [1, 0], b.T, [1]), np.dot(b.T, a.T))
c = np.arange(4, dtype=dtype)
np.einsum("ji,j", a.T, b.T, out=c, dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(b.T.astype('f8'),
a.T.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a.T, [1, 0], b.T, [1], out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(b.T.astype('f8'),
a.T.astype('f8')).astype(dtype))
# matmat(a,b) / a.dot(b) where a is matrix, b is matrix
for n in range(1, 17):
if n < 8 or dtype != 'f2':
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n*6, dtype=dtype).reshape(n, 6)
assert_equal(np.einsum("ij,jk", a, b), np.dot(a, b))
assert_equal(np.einsum(a, [0, 1], b, [1, 2]), np.dot(a, b))
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n*6, dtype=dtype).reshape(n, 6)
c = np.arange(24, dtype=dtype).reshape(4, 6)
np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1], b, [1, 2], out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
# matrix triple product (note this is not currently an efficient
# way to multiply 3 matrices)
a = np.arange(12, dtype=dtype).reshape(3, 4)
b = np.arange(20, dtype=dtype).reshape(4, 5)
c = np.arange(30, dtype=dtype).reshape(5, 6)
if dtype != 'f2':
assert_equal(np.einsum("ij,jk,kl", a, b, c),
a.dot(b).dot(c))
assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3]),
a.dot(b).dot(c))
d = np.arange(18, dtype=dtype).reshape(3, 6)
np.einsum("ij,jk,kl", a, b, c, out=d,
dtype='f8', casting='unsafe')
assert_equal(d, a.astype('f8').dot(b.astype('f8')
).dot(c.astype('f8')).astype(dtype))
d[...] = 0
np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d,
dtype='f8', casting='unsafe')
assert_equal(d, a.astype('f8').dot(b.astype('f8')
).dot(c.astype('f8')).astype(dtype))
# tensordot(a, b)
if np.dtype(dtype) != np.dtype('f2'):
a = np.arange(60, dtype=dtype).reshape(3, 4, 5)
b = np.arange(24, dtype=dtype).reshape(4, 3, 2)
assert_equal(np.einsum("ijk, jil -> kl", a, b),
np.tensordot(a, b, axes=([1, 0], [0, 1])))
assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]),
np.tensordot(a, b, axes=([1, 0], [0, 1])))
c = np.arange(10, dtype=dtype).reshape(5, 2)
np.einsum("ijk,jil->kl", a, b, out=c,
dtype='f8', casting='unsafe')
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
axes=([1, 0], [0, 1])).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c,
dtype='f8', casting='unsafe')
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
axes=([1, 0], [0, 1])).astype(dtype))
# logical_and(logical_and(a!=0, b!=0), c!=0)
a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype)
b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype)
c = np.array([True, True, False, True, True, False, True, True])
assert_equal(np.einsum("i,i,i->i", a, b, c,
dtype='?', casting='unsafe'),
np.logical_and(np.logical_and(a!=0, b!=0), c!=0))
assert_equal(np.einsum(a, [0], b, [0], c, [0], [0],
dtype='?', casting='unsafe'),
np.logical_and(np.logical_and(a!=0, b!=0), c!=0))
a = np.arange(9, dtype=dtype)
assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a))
assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a))
assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a))
assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a))
# Various stride0, contiguous, and SSE aligned variants
for n in range(1, 25):
a = np.arange(n, dtype=dtype)
if np.dtype(dtype).itemsize > 1:
assert_equal(np.einsum("...,...", a, a), np.multiply(a, a))
assert_equal(np.einsum("i,i", a, a), np.dot(a, a))
assert_equal(np.einsum("i,->i", a, 2), 2*a)
assert_equal(np.einsum(",i->i", 2, a), 2*a)
assert_equal(np.einsum("i,->", a, 2), 2*np.sum(a))
assert_equal(np.einsum(",i->", 2, a), 2*np.sum(a))
assert_equal(np.einsum("...,...", a[1:], a[:-1]),
np.multiply(a[1:], a[:-1]))
assert_equal(np.einsum("i,i", a[1:], a[:-1]),
np.dot(a[1:], a[:-1]))
assert_equal(np.einsum("i,->i", a[1:], 2), 2*a[1:])
assert_equal(np.einsum(",i->i", 2, a[1:]), 2*a[1:])
assert_equal(np.einsum("i,->", a[1:], 2), 2*
|
np.sum(a[1:])
|
numpy.sum
|
import cv2
from imageai.Detection.keras_retinanet.models.resnet import resnet50_retinanet
from imageai.Detection.keras_retinanet.utils.image import read_image_bgr, read_image_array, read_image_stream, \
preprocess_image, resize_image
from imageai.Detection.keras_retinanet.utils.visualization import draw_box, draw_caption
from imageai.Detection.keras_retinanet.utils.colors import label_color
import matplotlib.pyplot as plt
import matplotlib.image as pltimage
import numpy as np
import tensorflow as tf
import os
from keras import backend as K
from keras.layers import Input
from PIL import Image
import colorsys
from imageai.Detection.YOLOv3.models import yolo_main, tiny_yolo_main
from imageai.Detection.YOLOv3.utils import letterbox_image, yolo_eval
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
class ObjectDetection:
"""
This is the object detection class for images in the ImageAI library. It provides support for RetinaNet
, YOLOv3 and TinyYOLOv3 object detection networks . After instantiating this class, you can set it's properties and
make object detections using it's pre-defined functions.
The following functions are required to be called before object detection can be made
* setModelPath()
* At least of of the following and it must correspond to the model set in the setModelPath()
[setModelTypeAsRetinaNet(), setModelTypeAsYOLOv3(), setModelTypeAsTinyYOLOv3()]
* loadModel() [This must be called once only before performing object detection]
Once the above functions have been called, you can call the detectObjectsFromImage() function of
the object detection instance object at anytime to obtain observable objects in any image.
"""
def __init__(self):
self.__modelType = ""
self.modelPath = ""
self.__modelPathAdded = False
self.__modelLoaded = False
self.__model_collection = []
# Instance variables for RetinaNet Model
self.__input_image_min = 1333
self.__input_image_max = 800
self.numbers_to_names = {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus',
6: 'train',
7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign',
12: 'parking meter',
13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow',
20: 'elephant',
21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag',
27: 'tie',
28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball',
33: 'kite',
34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard',
38: 'tennis racket',
39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon',
45: 'bowl',
46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot',
52: 'hot dog',
53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant',
59: 'bed',
60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote',
66: 'keyboard',
67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink',
72: 'refrigerator',
73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear',
78: 'hair dryer',
79: 'toothbrush'}
# Unique instance variables for YOLOv3 and TinyYOLOv3 model
self.__yolo_iou = 0.45
self.__yolo_score = 0.1
self.__yolo_anchors = np.array(
[[10., 13.], [16., 30.], [33., 23.], [30., 61.], [62., 45.], [59., 119.], [116., 90.], [156., 198.],
[373., 326.]])
self.__yolo_model_image_size = (416, 416)
self.__yolo_boxes, self.__yolo_scores, self.__yolo_classes = "", "", ""
self.sess = K.get_session()
# Unique instance variables for TinyYOLOv3.
self.__tiny_yolo_anchors = np.array(
[[10., 14.], [23., 27.], [37., 58.], [81., 82.], [135., 169.], [344., 319.]])
def setModelTypeAsRetinaNet(self):
"""
'setModelTypeAsRetinaNet()' is used to set the model type to the RetinaNet model
for the video object detection instance instance object .
:return:
"""
self.__modelType = "retinanet"
def setModelTypeAsYOLOv3(self):
"""
'setModelTypeAsYOLOv3()' is used to set the model type to the YOLOv3 model
for the video object detection instance instance object .
:return:
"""
self.__modelType = "yolov3"
def setModelTypeAsTinyYOLOv3(self):
"""
'setModelTypeAsTinyYOLOv3()' is used to set the model type to the TinyYOLOv3 model
for the video object detection instance instance object .
:return:
"""
self.__modelType = "tinyyolov3"
def setModelPath(self, model_path):
"""
'setModelPath()' function is required and is used to set the file path to a RetinaNet
object detection model trained on the COCO dataset.
:param model_path:
:return:
"""
if (self.__modelPathAdded == False):
self.modelPath = model_path
self.__modelPathAdded = True
def loadModel(self, detection_speed="normal"):
"""
'loadModel()' function is required and is used to load the model structure into the program from the file path defined
in the setModelPath() function. This function receives an optional value which is "detection_speed".
The value is used to reduce the time it takes to detect objects in an image, down to about a 10% of the normal time, with
with just slight reduction in the number of objects detected.
* prediction_speed (optional); Acceptable values are "normal", "fast", "faster", "fastest" and "flash"
:param detection_speed:
:return:
"""
if (self.__modelType == "retinanet"):
if (detection_speed == "normal"):
self.__input_image_min = 800
self.__input_image_max = 1333
elif (detection_speed == "fast"):
self.__input_image_min = 400
self.__input_image_max = 700
elif (detection_speed == "faster"):
self.__input_image_min = 300
self.__input_image_max = 500
elif (detection_speed == "fastest"):
self.__input_image_min = 200
self.__input_image_max = 350
elif (detection_speed == "flash"):
self.__input_image_min = 100
self.__input_image_max = 250
elif (self.__modelType == "yolov3"):
if (detection_speed == "normal"):
self.__yolo_model_image_size = (416, 416)
elif (detection_speed == "fast"):
self.__yolo_model_image_size = (320, 320)
elif (detection_speed == "faster"):
self.__yolo_model_image_size = (208, 208)
elif (detection_speed == "fastest"):
self.__yolo_model_image_size = (128, 128)
elif (detection_speed == "flash"):
self.__yolo_model_image_size = (96, 96)
elif (self.__modelType == "tinyyolov3"):
if (detection_speed == "normal"):
self.__yolo_model_image_size = (832, 832)
elif (detection_speed == "fast"):
self.__yolo_model_image_size = (576, 576)
elif (detection_speed == "faster"):
self.__yolo_model_image_size = (416, 416)
elif (detection_speed == "fastest"):
self.__yolo_model_image_size = (320, 320)
elif (detection_speed == "flash"):
self.__yolo_model_image_size = (272, 272)
if (self.__modelLoaded == False):
if (self.__modelType == ""):
raise ValueError("You must set a valid model type before loading the model.")
elif (self.__modelType == "retinanet"):
model = resnet50_retinanet(num_classes=80)
model.load_weights(self.modelPath)
self.__model_collection.append(model)
self.__modelLoaded = True
elif (self.__modelType == "yolov3"):
model = yolo_main(Input(shape=(None, None, 3)), len(self.__yolo_anchors) // 3,
len(self.numbers_to_names))
model.load_weights(self.modelPath)
hsv_tuples = [(x / len(self.numbers_to_names), 1., 1.)
for x in range(len(self.numbers_to_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101)
np.random.shuffle(self.colors)
np.random.seed(None)
self.__yolo_input_image_shape = K.placeholder(shape=(2,))
self.__yolo_boxes, self.__yolo_scores, self.__yolo_classes = yolo_eval(model.output,
self.__yolo_anchors,
len(self.numbers_to_names),
self.__yolo_input_image_shape,
score_threshold=self.__yolo_score,
iou_threshold=self.__yolo_iou)
self.__model_collection.append(model)
self.__modelLoaded = True
elif (self.__modelType == "tinyyolov3"):
model = tiny_yolo_main(Input(shape=(None, None, 3)), len(self.__tiny_yolo_anchors) // 2,
len(self.numbers_to_names))
model.load_weights(self.modelPath)
hsv_tuples = [(x / len(self.numbers_to_names), 1., 1.)
for x in range(len(self.numbers_to_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101)
np.random.shuffle(self.colors)
np.random.seed(None)
self.__yolo_input_image_shape = K.placeholder(shape=(2,))
self.__yolo_boxes, self.__yolo_scores, self.__yolo_classes = yolo_eval(model.output,
self.__tiny_yolo_anchors,
len(self.numbers_to_names),
self.__yolo_input_image_shape,
score_threshold=self.__yolo_score,
iou_threshold=self.__yolo_iou)
self.__model_collection.append(model)
self.__modelLoaded = True
def detectObjectsFromImage(self, input_image="", output_image_path="", input_type="file", output_type="file",
extract_detected_objects=False, minimum_percentage_probability=50,
display_percentage_probability=True, display_object_name=True, thread_safe=False):
"""
'detectObjectsFromImage()' function is used to detect objects observable in the given image path:
* input_image , which can be a filepath, image numpy array or image file stream
* output_image_path (only if output_type = file) , file path to the output image that will contain the detection boxes and label, if output_type="file"
* input_type (optional) , file path/numpy array/image file stream of the image. Acceptable values are "file", "array" and "stream"
* output_type (optional) , file path/numpy array/image file stream of the image. Acceptable values are "file" and "array"
* extract_detected_objects (optional) , option to save each object detected individually as an image and return an array of the objects' image path.
* minimum_percentage_probability (optional, 50 by default) , option to set the minimum percentage probability for nominating a detected object for output.
* display_percentage_probability (optional, True by default), option to show or hide the percentage probability of each object in the saved/returned detected image
* display_display_object_name (optional, True by default), option to show or hide the name of each object in the saved/returned detected image
* thread_safe (optional, False by default), enforce the loaded detection model works across all threads if set to true, made possible by forcing all Tensorflow inference to run on the default graph.
The values returned by this function depends on the parameters parsed. The possible values returnable
are stated as below
- If extract_detected_objects = False or at its default value and output_type = 'file' or
at its default value, you must parse in the 'output_image_path' as a string to the path you want
the detected image to be saved. Then the function will return:
1. an array of dictionaries, with each dictionary corresponding to the objects
detected in the image. Each dictionary contains the following property:
* name (string)
* percentage_probability (float)
* box_points (list of x1,y1,x2 and y2 coordinates)
- If extract_detected_objects = False or at its default value and output_type = 'array' ,
Then the function will return:
1. a numpy array of the detected image
2. an array of dictionaries, with each dictionary corresponding to the objects
detected in the image. Each dictionary contains the following property:
* name (string)
* percentage_probability (float)
* box_points (list of x1,y1,x2 and y2 coordinates)
- If extract_detected_objects = True and output_type = 'file' or
at its default value, you must parse in the 'output_image_path' as a string to the path you want
the detected image to be saved. Then the function will return:
1. an array of dictionaries, with each dictionary corresponding to the objects
detected in the image. Each dictionary contains the following property:
* name (string)
* percentage_probability (float)
* box_points (list of x1,y1,x2 and y2 coordinates)
2. an array of string paths to the image of each object extracted from the image
- If extract_detected_objects = True and output_type = 'array', the the function will return:
1. a numpy array of the detected image
2. an array of dictionaries, with each dictionary corresponding to the objects
detected in the image. Each dictionary contains the following property:
* name (string)
* percentage_probability (float)
* box_points (list of x1,y1,x2 and y2 coordinates)
3. an array of numpy arrays of each object detected in the image
:param input_image:
:param output_image_path:
:param input_type:
:param output_type:
:param extract_detected_objects:
:param minimum_percentage_probability:
:param display_percentage_probability:
:param display_object_name:
:param thread_safe:
:return image_frame:
:return output_objects_array:
:return detected_objects_image_array:
"""
if (self.__modelLoaded == False):
raise ValueError("You must call the loadModel() function before making object detection.")
elif (self.__modelLoaded == True):
try:
if (self.__modelType == "retinanet"):
output_objects_array = []
detected_objects_image_array = []
if (input_type == "file"):
image = read_image_bgr(input_image)
elif (input_type == "array"):
image = read_image_array(input_image)
elif (input_type == "stream"):
image = read_image_stream(input_image)
detected_copy = image.copy()
detected_copy = cv2.cvtColor(detected_copy, cv2.COLOR_BGR2RGB)
detected_copy2 = image.copy()
detected_copy2 = cv2.cvtColor(detected_copy2, cv2.COLOR_BGR2RGB)
image = preprocess_image(image)
image, scale = resize_image(image, min_side=self.__input_image_min, max_side=self.__input_image_max)
model = self.__model_collection[0]
if thread_safe == True:
with self.sess.graph.as_default():
_, _, detections = model.predict_on_batch(np.expand_dims(image, axis=0))
else:
_, _, detections = model.predict_on_batch(np.expand_dims(image, axis=0))
predicted_numbers = np.argmax(detections[0, :, 4:], axis=1)
scores = detections[0, np.arange(detections.shape[1]), 4 + predicted_numbers]
detections[0, :, :4] /= scale
min_probability = minimum_percentage_probability / 100
counting = 0
for index, (label, score), in enumerate(zip(predicted_numbers, scores)):
if score < min_probability:
continue
counting += 1
objects_dir = output_image_path + "-objects"
if (extract_detected_objects == True and output_type == "file"):
if (os.path.exists(objects_dir) == False):
os.mkdir(objects_dir)
color = label_color(label)
detection_details = detections[0, index, :4].astype(int)
draw_box(detected_copy, detection_details, color=color)
if (display_object_name == True and display_percentage_probability == True):
caption = "{} {:.3f}".format(self.numbers_to_names[label], (score * 100))
draw_caption(detected_copy, detection_details, caption)
elif (display_object_name == True):
caption = "{} ".format(self.numbers_to_names[label])
draw_caption(detected_copy, detection_details, caption)
elif (display_percentage_probability == True):
caption = " {:.3f}".format((score * 100))
draw_caption(detected_copy, detection_details, caption)
each_object_details = {}
each_object_details["name"] = self.numbers_to_names[label]
each_object_details["percentage_probability"] = score * 100
each_object_details["box_points"] = detection_details.tolist()
output_objects_array.append(each_object_details)
if (extract_detected_objects == True):
splitted_copy = detected_copy2.copy()[detection_details[1]:detection_details[3],
detection_details[0]:detection_details[2]]
if (output_type == "file"):
splitted_image_path = os.path.join(objects_dir,
self.numbers_to_names[label] + "-" + str(
counting) + ".jpg")
pltimage.imsave(splitted_image_path, splitted_copy)
detected_objects_image_array.append(splitted_image_path)
elif (output_type == "array"):
detected_objects_image_array.append(splitted_copy)
if (output_type == "file"):
pltimage.imsave(output_image_path, detected_copy)
if (extract_detected_objects == True):
if (output_type == "file"):
return output_objects_array, detected_objects_image_array
elif (output_type == "array"):
return detected_copy, output_objects_array, detected_objects_image_array
else:
if (output_type == "file"):
return output_objects_array
elif (output_type == "array"):
return detected_copy, output_objects_array
elif (self.__modelType == "yolov3" or self.__modelType == "tinyyolov3"):
output_objects_array = []
detected_objects_image_array = []
if (input_type == "file"):
image = Image.open(input_image)
input_image = read_image_bgr(input_image)
elif (input_type == "array"):
image = Image.fromarray(np.uint8(input_image))
input_image = read_image_array(input_image)
elif (input_type == "stream"):
image = Image.open(input_image)
input_image = read_image_stream(input_image)
detected_copy = input_image
detected_copy = cv2.cvtColor(detected_copy, cv2.COLOR_BGR2RGB)
detected_copy2 = input_image
detected_copy2 = cv2.cvtColor(detected_copy2, cv2.COLOR_BGR2RGB)
new_image_size = (self.__yolo_model_image_size[0] - (self.__yolo_model_image_size[0] % 32),
self.__yolo_model_image_size[1] - (self.__yolo_model_image_size[1] % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype="float32")
image_data /= 255.
image_data = np.expand_dims(image_data, 0)
model = self.__model_collection[0]
if thread_safe == True:
with self.sess.graph.as_default():
out_boxes, out_scores, out_classes = self.sess.run(
[self.__yolo_boxes, self.__yolo_scores, self.__yolo_classes],
feed_dict={
model.input: image_data,
self.__yolo_input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
else:
out_boxes, out_scores, out_classes = self.sess.run(
[self.__yolo_boxes, self.__yolo_scores, self.__yolo_classes],
feed_dict={
model.input: image_data,
self.__yolo_input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
min_probability = minimum_percentage_probability / 100
counting = 0
for a, b in reversed(list(enumerate(out_classes))):
predicted_class = self.numbers_to_names[b]
box = out_boxes[a]
score = out_scores[a]
if score < min_probability:
continue
counting += 1
objects_dir = output_image_path + "-objects"
if (extract_detected_objects == True and output_type == "file"):
if (os.path.exists(objects_dir) == False):
os.mkdir(objects_dir)
label = "{} {:.2f}".format(predicted_class, score)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
try:
color = label_color(b)
except:
color = (255, 0, 0)
detection_details = [left, top, right, bottom]
draw_box(detected_copy, detection_details, color=color)
if (display_object_name == True and display_percentage_probability == True):
draw_caption(detected_copy, detection_details, label)
elif (display_object_name == True):
draw_caption(detected_copy, detection_details, predicted_class)
elif (display_percentage_probability == True):
draw_caption(detected_copy, detection_details, str(score * 100))
each_object_details = {}
each_object_details["name"] = predicted_class
each_object_details["percentage_probability"] = score * 100
each_object_details["box_points"] = detection_details
output_objects_array.append(each_object_details)
if (extract_detected_objects == True):
splitted_copy = detected_copy2.copy()[detection_details[1]:detection_details[3],
detection_details[0]:detection_details[2]]
if (output_type == "file"):
splitted_image_path = os.path.join(objects_dir,
predicted_class + "-" + str(
counting) + ".jpg")
pltimage.imsave(splitted_image_path, splitted_copy)
detected_objects_image_array.append(splitted_image_path)
elif (output_type == "array"):
detected_objects_image_array.append(splitted_copy)
if (output_type == "file"):
pltimage.imsave(output_image_path, detected_copy)
if (extract_detected_objects == True):
if (output_type == "file"):
return output_objects_array, detected_objects_image_array
elif (output_type == "array"):
return detected_copy, output_objects_array, detected_objects_image_array
else:
if (output_type == "file"):
return output_objects_array
elif (output_type == "array"):
return detected_copy, output_objects_array
except:
raise ValueError(
"Ensure you specified correct input image, input type, output type and/or output image path ")
def CustomObjects(self, person=False, bicycle=False, car=False, motorcycle=False, airplane=False,
bus=False, train=False, truck=False, boat=False, traffic_light=False, fire_hydrant=False,
stop_sign=False,
parking_meter=False, bench=False, bird=False, cat=False, dog=False, horse=False, sheep=False,
cow=False, elephant=False, bear=False, zebra=False,
giraffe=False, backpack=False, umbrella=False, handbag=False, tie=False, suitcase=False,
frisbee=False, skis=False, snowboard=False,
sports_ball=False, kite=False, baseball_bat=False, baseball_glove=False, skateboard=False,
surfboard=False, tennis_racket=False,
bottle=False, wine_glass=False, cup=False, fork=False, knife=False, spoon=False, bowl=False,
banana=False, apple=False, sandwich=False, orange=False,
broccoli=False, carrot=False, hot_dog=False, pizza=False, donut=False, cake=False, chair=False,
couch=False, potted_plant=False, bed=False,
dining_table=False, toilet=False, tv=False, laptop=False, mouse=False, remote=False,
keyboard=False, cell_phone=False, microwave=False,
oven=False, toaster=False, sink=False, refrigerator=False, book=False, clock=False, vase=False,
scissors=False, teddy_bear=False, hair_dryer=False,
toothbrush=False):
"""
The 'CustomObjects()' function allows you to handpick the type of objects you want to detect
from an image. The objects are pre-initiated in the function variables and predefined as 'False',
which you can easily set to true for any number of objects available. This function
returns a dictionary which must be parsed into the 'detectCustomObjectsFromImage()'. Detecting
custom objects only happens when you call the function 'detectCustomObjectsFromImage()'
* true_values_of_objects (array); Acceptable values are 'True' and False for all object values present
:param boolean_values:
:return: custom_objects_dict
"""
custom_objects_dict = {}
input_values = [person, bicycle, car, motorcycle, airplane,
bus, train, truck, boat, traffic_light, fire_hydrant, stop_sign,
parking_meter, bench, bird, cat, dog, horse, sheep, cow, elephant, bear, zebra,
giraffe, backpack, umbrella, handbag, tie, suitcase, frisbee, skis, snowboard,
sports_ball, kite, baseball_bat, baseball_glove, skateboard, surfboard, tennis_racket,
bottle, wine_glass, cup, fork, knife, spoon, bowl, banana, apple, sandwich, orange,
broccoli, carrot, hot_dog, pizza, donut, cake, chair, couch, potted_plant, bed,
dining_table, toilet, tv, laptop, mouse, remote, keyboard, cell_phone, microwave,
oven, toaster, sink, refrigerator, book, clock, vase, scissors, teddy_bear, hair_dryer,
toothbrush]
actual_labels = ["person", "bicycle", "car", "motorcycle", "airplane",
"bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign",
"parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear",
"zebra",
"giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis",
"snowboard",
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket",
"bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich",
"orange",
"broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant",
"bed",
"dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
"microwave",
"oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
"hair dryer",
"toothbrush"]
for input_value, actual_label in zip(input_values, actual_labels):
if (input_value == True):
custom_objects_dict[actual_label] = "valid"
else:
custom_objects_dict[actual_label] = "invalid"
return custom_objects_dict
def detectCustomObjectsFromImage(self, custom_objects=None, input_image="", output_image_path="", input_type="file",
output_type="file", extract_detected_objects=False,
minimum_percentage_probability=50, display_percentage_probability=True,
display_object_name=True, thread_safe=False):
"""
'detectCustomObjectsFromImage()' function is used to detect predefined objects observable in the given image path:
* custom_objects , an instance of the CustomObject class to filter which objects to detect
* input_image , which can be file to path, image numpy array or image file stream
* output_image_path , file path to the output image that will contain the detection boxes and label, if output_type="file"
* input_type (optional) , file path/numpy array/image file stream of the image. Acceptable values are "file", "array" and "stream"
* output_type (optional) , file path/numpy array/image file stream of the image. Acceptable values are "file" and "array"
* extract_detected_objects (optional, False by default) , option to save each object detected individually as an image and return an array of the objects' image path.
* minimum_percentage_probability (optional, 50 by default) , option to set the minimum percentage probability for nominating a detected object for output.
* display_percentage_probability (optional, True by default), option to show or hide the percentage probability of each object in the saved/returned detected image
* display_display_object_name (optional, True by default), option to show or hide the name of each object in the saved/returned detected image
* thread_safe (optional, False by default), enforce the loaded detection model works across all threads if set to true, made possible by forcing all Tensorflow inference to run on the default graph.
The values returned by this function depends on the parameters parsed. The possible values returnable
are stated as below
- If extract_detected_objects = False or at its default value and output_type = 'file' or
at its default value, you must parse in the 'output_image_path' as a string to the path you want
the detected image to be saved. Then the function will return:
1. an array of dictionaries, with each dictionary corresponding to the objects
detected in the image. Each dictionary contains the following property:
* name (string)
* percentage_probability (float)
* box_points (list of x1,y1,x2 and y2 coordinates)
- If extract_detected_objects = False or at its default value and output_type = 'array' ,
Then the function will return:
1. a numpy array of the detected image
2. an array of dictionaries, with each dictionary corresponding to the objects
detected in the image. Each dictionary contains the following property:
* name (string)
* percentage_probability (float)
* box_points (list of x1,y1,x2 and y2 coordinates)
- If extract_detected_objects = True and output_type = 'file' or
at its default value, you must parse in the 'output_image_path' as a string to the path you want
the detected image to be saved. Then the function will return:
1. an array of dictionaries, with each dictionary corresponding to the objects
detected in the image. Each dictionary contains the following property:
* name (string)
* percentage_probability (float)
* box_points (list of x1,y1,x2 and y2 coordinates)
2. an array of string paths to the image of each object extracted from the image
- If extract_detected_objects = True and output_type = 'array', the the function will return:
1. a numpy array of the detected image
2. an array of dictionaries, with each dictionary corresponding to the objects
detected in the image. Each dictionary contains the following property:
* name (string)
* percentage_probability (float)
* box_points (list of x1,y1,x2 and y2 coordinates)
3. an array of numpy arrays of each object detected in the image
:param input_image:
:param output_image_path:
:param input_type:
:param output_type:
:param extract_detected_objects:
:param minimum_percentage_probability:
:return output_objects_array:
:param display_percentage_probability:
:param display_object_name
:return detected_copy:
:return detected_detected_objects_image_array:
"""
if (self.__modelLoaded == False):
raise ValueError("You must call the loadModel() function before making object detection.")
elif (self.__modelLoaded == True):
try:
if (self.__modelType == "retinanet"):
output_objects_array = []
detected_objects_image_array = []
if (input_type == "file"):
image = read_image_bgr(input_image)
elif (input_type == "array"):
image = read_image_array(input_image)
elif (input_type == "stream"):
image = read_image_stream(input_image)
detected_copy = image.copy()
detected_copy = cv2.cvtColor(detected_copy, cv2.COLOR_BGR2RGB)
detected_copy2 = image.copy()
detected_copy2 = cv2.cvtColor(detected_copy2, cv2.COLOR_BGR2RGB)
image = preprocess_image(image)
image, scale = resize_image(image, min_side=self.__input_image_min, max_side=self.__input_image_max)
model = self.__model_collection[0]
if thread_safe == True:
with self.sess.graph.as_default():
_, _, detections = model.predict_on_batch(np.expand_dims(image, axis=0))
else:
_, _, detections = model.predict_on_batch(np.expand_dims(image, axis=0))
predicted_numbers = np.argmax(detections[0, :, 4:], axis=1)
scores = detections[0, np.arange(detections.shape[1]), 4 + predicted_numbers]
detections[0, :, :4] /= scale
min_probability = minimum_percentage_probability / 100
counting = 0
for index, (label, score), in enumerate(zip(predicted_numbers, scores)):
if score < min_probability:
continue
if (custom_objects != None):
check_name = self.numbers_to_names[label]
if (custom_objects[check_name] == "invalid"):
continue
counting += 1
objects_dir = output_image_path + "-objects"
if (extract_detected_objects == True and output_type == "file"):
if (os.path.exists(objects_dir) == False):
os.mkdir(objects_dir)
color = label_color(label)
detection_details = detections[0, index, :4].astype(int)
draw_box(detected_copy, detection_details, color=color)
if (display_object_name == True and display_percentage_probability == True):
caption = "{} {:.3f}".format(self.numbers_to_names[label], (score * 100))
draw_caption(detected_copy, detection_details, caption)
elif (display_object_name == True):
caption = "{} ".format(self.numbers_to_names[label])
draw_caption(detected_copy, detection_details, caption)
elif (display_percentage_probability == True):
caption = " {:.3f}".format((score * 100))
draw_caption(detected_copy, detection_details, caption)
each_object_details = {}
each_object_details["name"] = self.numbers_to_names[label]
each_object_details["percentage_probability"] = score * 100
each_object_details["box_points"] = detection_details.tolist()
output_objects_array.append(each_object_details)
if (extract_detected_objects == True):
splitted_copy = detected_copy2.copy()[detection_details[1]:detection_details[3],
detection_details[0]:detection_details[2]]
if (output_type == "file"):
splitted_image_path = os.path.join(objects_dir,
self.numbers_to_names[label] + "-" + str(
counting) + ".jpg")
pltimage.imsave(splitted_image_path, splitted_copy)
detected_objects_image_array.append(splitted_image_path)
elif (output_type == "array"):
detected_objects_image_array.append(splitted_copy)
if (output_type == "file"):
pltimage.imsave(output_image_path, detected_copy)
if (extract_detected_objects == True):
if (output_type == "file"):
return output_objects_array, detected_objects_image_array
elif (output_type == "array"):
return detected_copy, output_objects_array, detected_objects_image_array
else:
if (output_type == "file"):
return output_objects_array
elif (output_type == "array"):
return detected_copy, output_objects_array
elif (self.__modelType == "yolov3" or self.__modelType == "tinyyolov3"):
output_objects_array = []
detected_objects_image_array = []
if (input_type == "file"):
image = Image.open(input_image)
input_image = read_image_bgr(input_image)
elif (input_type == "array"):
image = Image.fromarray(np.uint8(input_image))
input_image = read_image_array(input_image)
elif (input_type == "stream"):
image = Image.open(input_image)
input_image = read_image_stream(input_image)
detected_copy = input_image
detected_copy = cv2.cvtColor(detected_copy, cv2.COLOR_BGR2RGB)
detected_copy2 = input_image
detected_copy2 = cv2.cvtColor(detected_copy2, cv2.COLOR_BGR2RGB)
new_image_size = (self.__yolo_model_image_size[0] - (self.__yolo_model_image_size[0] % 32),
self.__yolo_model_image_size[1] - (self.__yolo_model_image_size[1] % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype="float32")
image_data /= 255.
image_data =
|
np.expand_dims(image_data, 0)
|
numpy.expand_dims
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""blob helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six.moves import cPickle as pickle
import numpy as np
import cv2
import math
import numpy.random as npr
import utils.segms as segm_utils
import utils.boxes_3d as box_utils_3d
from core.config import cfg
def get_image_blob(im):
"""Convert an image into a network input.
Arguments:
im (ndarray): a gray scale image
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale (float): image scale (target size) / (original size)
im_info (ndarray)
"""
processed_im, im_scale = prep_im_for_blob(im, entry = None, phase = 'test')
blob = im_list_to_blob(processed_im)
slices, height, width = blob.shape[2], blob.shape[3], blob.shape[4]
im_info = np.hstack((slices, height, width, im_scale))[np.newaxis, :]
return blob, im_scale, im_info.astype(np.float32)
def im_list_to_blob(ims):
"""Convert a list of images into a network input. Assumes images were
prepared using prep_im_for_blob or equivalent
Output is a 5D HCSHW tensor of the images concatenated along axis 0 with
shape.
"""
if not isinstance(ims, list):
ims = [ims]
max_shape = get_max_shape([im.shape[:3] for im in ims]) # np array [max_s, max_h, max_w]
num_images = len(ims)
blob = np.zeros(
(num_images, max_shape[0], max_shape[1], max_shape[2], 1), dtype=np.float32)
for i in range(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], 0:im.shape[2], 0] = im
# Move channels (axis 4) to axis 1
# Axis order will become: (batch elem, channel, slices, height, width)
channel_swap = (0, 4, 1, 2, 3)
blob = blob.transpose(channel_swap)
return blob
def get_max_shape(im_shapes):
"""Calculate max spatial size (s, h, w) for batching given a list of image shapes
"""
max_shape = np.array(im_shapes).max(axis=0)
assert max_shape.size == 3
# Pad the image so they can be divisible by a stride
if cfg.FPN.FPN_ON:
stride = float(cfg.FPN.COARSEST_STRIDE)
max_shape[0] = int(np.ceil(max_shape[0] / stride) * stride)
max_shape[1] = int(np.ceil(max_shape[1] / stride) * stride)
max_shape[2] = int(np.ceil(max_shape[2] / stride) * stride)
return max_shape
def crop_data_3d(im, entry):
#random select the cropping start index and crop with half-overlap
#select the cropped block containing most positive voxels because of the sparsity
data_slices, data_height, data_width = map(int, im.shape[:])
boxes = entry['boxes'].copy()
segms = entry['segms'].copy()
ss = np.array(cfg.TRAIN.IN_SIZE, dtype=np.int16)
x_min = math.floor(np.min(boxes[:, 0]))
y_min = math.floor(np.min(boxes[:, 1]))
z_min = math.floor(np.min(boxes[:, 2]))
x_s_min = 0
x_s_max = min(x_min, data_width - ss[2])
y_s_min = 0
y_s_max = min(y_min, data_height - ss[1])
z_s_min = 0
z_s_max = min(z_min, data_slices - ss[0])
x_s = x_s_min if x_s_min == x_s_max else \
npr.choice(range(x_s_min, x_s_max + 1))
y_s = y_s_min if y_s_min == y_s_max else \
npr.choice(range(y_s_min, y_s_max + 1))
z_s = z_s_min if z_s_min == z_s_max else \
npr.choice(range(z_s_min, z_s_max + 1))
s_list = list(range(z_s, data_slices - ss[0], int(ss[0] / 2)))
h_list = list(range(y_s, data_height - ss[1], int(ss[1] / 2)))
w_list = list(range(x_s, data_width - ss[2], int(ss[2] / 2)))
s_list.append(data_slices - ss[0])
h_list.append(data_height - ss[1])
w_list.append(data_width - ss[2])
max_pos_num = 0
posit = []
for z in s_list:
for y in h_list:
for x in w_list:
boxes[:, 0::3] -= x
boxes[:, 1::3] -= y
boxes[:, 2::3] -= z
np.clip(boxes[:, 0::3], 0, ss[2] - 1, out=boxes[:, 0::3])
np.clip(boxes[:, 1::3], 0, ss[1] - 1, out=boxes[:, 1::3])
np.clip(boxes[:, 2::3], 0, ss[0] - 1, out=boxes[:, 2::3])
invalid = (boxes[:, 0] == boxes[:, 3]) | (boxes[:, 1] == boxes[:, 4]) | (boxes[:, 2] == boxes[:, 5])
valid_inds = np.nonzero(~ invalid)[0]
pos_box_volumes, _ = box_utils_3d.boxes_volume(boxes[valid_inds, :])
tmp_pos_num = np.sum(pos_box_volumes)
if tmp_pos_num > max_pos_num:
max_pos_num = tmp_pos_num
posit = [x, y, z]
boxes = entry['boxes'].copy()
x, y, z = posit[:]
im = im[z: z+ss[0], y: y+ss[1], x: x+ss[2]]
boxes[:, 0::3] -= x
boxes[:, 1::3] -= y
boxes[:, 2::3] -= z
segms[:, 0] -= x
segms[:, 1] -= y
segms[:, 2] -= z
np.clip(boxes[:, 0::3], 0, ss[2] - 1, out=boxes[:, 0::3])
np.clip(boxes[:, 1::3], 0, ss[1] - 1, out=boxes[:, 1::3])
np.clip(boxes[:, 2::3], 0, ss[0] - 1, out=boxes[:, 2::3])
np.clip(segms[:, 0], 0, ss[2] - 1, out=segms[:, 0])
np.clip(segms[:, 1], 0, ss[1] - 1, out=segms[:, 1])
np.clip(segms[:, 2], 0, ss[0] - 1, out=segms[:, 2])
entry['boxes'] = boxes
entry['segms'] = segms
entry['slices'] = ss[0]
entry['height'] = ss[1]
entry['width'] = ss[2]
return im
def prep_im_for_blob(im, entry, phase):
"""Prepare an image for use as a network input blob. Specially:
- Subtract per-channel pixel mean
- Convert to float32
- Rescale to each of the specified target size (capped at max_size)
- crop if need
Returns a list of transformed images, one for each target size. Also returns
the scale factors that were used to compute each returned image.
"""
im = im.astype(np.float32, copy=False)
if cfg.PP_METHOD == 'norm1':
mask = im > 0
mean_val = np.mean(im[mask])
std_val = np.std(im[mask])
im = (im - mean_val) / std_val
elif cfg.PP_METHOD == 'norm2':
im = im/65535.
if phase == 'train' and cfg.TRAIN.NEED_CROP:
im = crop_data_3d(im, entry)
# Check bounding box
boxes = entry['boxes']
invalid = (boxes[:, 0] == boxes[:, 3]) | (boxes[:, 1] == boxes[:, 4]) | (boxes[:, 2] == boxes[:, 5])
valid_inds = np.nonzero(~ invalid)[0]
if len(valid_inds) < len(boxes):
for key in ['boxes', 'segms', 'gt_classes', 'seg_volumes', 'gt_overlaps', 'is_crowd',
'gt_keypoints', 'max_classes', 'max_overlaps', 'bbox_targets']:
if key in entry:
entry[key] = entry[key][valid_inds]
entry['box_to_gt_ind_map'] = np.array(list(range(len(valid_inds))))
im_scales = [1.0]
ims = [im]
return ims, im_scales
def get_im_blob_sizes(im_shape, target_sizes, max_size):
"""Calculate im blob size for multiple target_sizes given original im shape
"""
im_size_min = np.min(im_shape)
im_size_max = np.max(im_shape)
im_sizes = []
for target_size in target_sizes:
im_scale = get_target_scale(im_size_min, im_size_max, target_size, max_size)
im_sizes.append(np.round(im_shape * im_scale))
return np.array(im_sizes)
def get_target_scale(im_size_min, im_size_max, target_size, max_size):
"""Calculate target resize scale
"""
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than max_size
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
return im_scale
def zeros(shape, int32=False):
"""Return a blob of all zeros of the given shape with the correct float or
int data type.
"""
return
|
np.zeros(shape, dtype=np.int32 if int32 else np.float32)
|
numpy.zeros
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
def load_data(data_dir,group):
losses = np.load(os.path.join(data_dir,'{}_losses.npy'.format(group)))
#images = np.load(os.path.join(data_dir,'{}_inputs.npy'.format(group)))
#recs = np.load(os.path.join(data_dir,'recs.npy'))
names = np.load(os.path.join(data_dir,'{}_files.npy'.format(group)))
#names = [os.path.basename(n)[:-4] for n in names]
if 'abnormal' in group:
labels = [1]*losses.shape[0]
elif 'entering' in group:
labels = [1]*losses.shape[0]
else:
labels = [0]*losses.shape[0]
return losses, np.array(labels), np.array(names)
def evaluate(cfg, dataset='test', groupings=['normal','abnormal'], threshold=0.0):
experiment_dir = os.path.join("experiments/",cfg['experiment'],dataset)
plt.figure(figsize=(8,6))
plt.ylabel('reconstruction error (MSE)', fontsize=12)
file = open(os.path.join(experiment_dir,'extreems.txt'),"w")
losses, labels, names = np.array([]), np.array([]), np.array([])
for group in groupings:
loss, lab, name = load_data(experiment_dir,group)
# find extreems
indices = np.argsort(loss)
file.write("{}\n".format(dataset))
#print(indices[-3:])
for l, n in zip(loss[indices[:3]],name[indices[:3]]):
file.write("loss {}, file {}\n".format(l, n))
middle_idx = len(indices)//2
for l, n in zip(loss[indices[middle_idx-1:middle_idx+1]],name[indices[middle_idx-1:middle_idx+1]]):
file.write("loss {}, file {}\n".format(l, n))
for l, n in zip(loss[indices[-3:]],name[indices[-3:]]):
file.write("loss {}, file {}\n".format(l, n))
x = np.random.random_sample(loss.shape[0])
if 'abnormal' in group:
plt.plot(x,loss,'.',color='r', label='Abnormal')
elif 'entering' in group:
plt.plot(x,loss,'.',color='y', label='Entering')
#plt.plot(x,loss,'.',color='r')
else:
plt.plot(x,loss,'.',color='g', label='Normal')
losses = np.concatenate((losses,loss), axis=0)
labels =
|
np.concatenate((labels,lab), axis=0)
|
numpy.concatenate
|
""" Setup PBjam sessions and perform mode ID and peakbagging
This module contains the input layer for setting up PBjam sessions for
peakbagging solar-like oscillators. This is the easiest way to handle targets
in PBjam.
It's possible to manually initiate star class instances and do all the fitting
that way, but it's simpler to just use the session class, which handles
everything, including formatting of the inputs.
A PBjam session is started by initializing the session class instance with a
target ID, $\nu_{max}$, a large separation, effective temperature and Gaia bp_rp
color. The class instance is the called to run through all the peakbagging
steps automatically. See the Session class documentation for an example.
Lists of the above can be provided for multiple targets, but it's often simpler
to just provide PBjam with a dictionary or Pandas dataframe. See mytgts.csv
for a template.
Custom timeseries or periodograms can be provided as either file pathnames,
`numpy' arrays, or lightkurve.LightCurve/lightkurve.periodogram objects. If
nothing is provided PBjam will download the data automatically using
`LightKurve'.
Specific quarters, campgains or sectors can be requested with the relevant
keyword (i.e., 'quarter' for KIC, etc.). If none of these are provided, PBjam
will download all available data, using the long cadence versions by default.
Once initialized, the session class contains a list of star class instances
for each requested target, with associated spectra for each.
The next step is to perform a mode ID on the spectra. At the moment PBjam
only supports use of the asymptotic relation mode ID method. Additional methods
can be added in future.
Finally the peakbagging method takes the output from the modeID and performs
a proper HMC peakbagging run to get the unparameterized mode frequencies.
Plotting the results of each stage is also possible.
Note
----
For automatic download the long cadence data set is used by default, so set
the cadence to `short' for main-sequence targets.
"""
import lightkurve as lk
from lightkurve.periodogram import Periodogram
import numpy as np
import astropy.units as units
import pandas as pd
import os, pickle, warnings
from .star import star, _format_name
from datetime import datetime
from .jar import references
def _organize_sess_dataframe(vardf):
""" Takes input dataframe and tidies it up.
Checks to see if required columns are present in the input dataframe,
and adds optional columns if they don't exists, containing None values.
Parameters
----------
vardf : Pandas.DataFrame
Input dataframe
"""
keys = ['ID', 'numax', 'dnu', 'numax_err', 'dnu_err']
if not any(x not in keys for x in vardf.keys()):
raise(KeyError, 'Some of the required keywords were missing.')
N = len(vardf)
singles = ['exptime', 'campaign', 'sector', 'month', 'quarter', 'mission']
doubles = ['teff', 'bp_rp']
for key in singles:
if key not in vardf.keys():
vardf[key] = np.array([None]*N)
for key in doubles:
if key not in vardf.keys():
vardf[key] = np.array([None]*N)
vardf[key+'_err'] = np.array([None]*N)
if 'timeseries' not in vardf.keys():
_format_col(vardf, None, 'timeseries')
if 'spectrum' not in vardf.keys():
_format_col(vardf, None, 'spectrum')
def _organize_sess_input(**vardct):
""" Takes input and organizes them in a dataframe.
Checks to see if required inputs are present and inserts them into a
dataframe. Any optional columns that are not included in the input are
added as empty columns.
Parameters
----------
vardct : objects
Variable inputs to Session class to be arranged into a dataframe
Returns
-------
vardf : Pandas.DataFrame
Dataframe containing the inputs from Session class call.
"""
vardf = pd.DataFrame({'ID': np.array(vardct['ID']).reshape((-1, 1)).flatten()})
N = len(vardf)
singles = ['exptime', 'campaign', 'sector', 'month', 'quarter', 'mission']
doubles = ['numax', 'dnu', 'teff', 'bp_rp']
for key in singles:
if not vardct[key]:
vardf[key] = np.array([None]*N)
else:
vardf[key] = vardct[key]
for key in doubles:
if not vardct[key]:
vardf[key] = np.array([None]*N)
vardf[key+'_err'] = np.array([None]*N)
else:
vardf[key] = np.array(vardct[key]).reshape((-1, 2))[:, 0].flatten()
vardf[key+'_err'] = np.array(vardct[key]).reshape((-1, 2))[:, 1].flatten()
return vardf
def _sort_lc(lc):
""" Sort a lightcurve in Lightkurve object.
Lightkurve lightcurves are not necessarily sorted in time, which causes
an error in periodogram.
Parameters
----------
lc : Lightkurve.LightCurve instance
Lightkurve object to be modified
Returns
-------
lc : Lightkurve.LightCurve instance
The sorted Lightkurve object
"""
sidx = np.argsort(lc.time)
lc.time = lc.time[sidx]
lc.flux = lc.flux[sidx]
return lc
def _query_lightkurve(ID, download_dir, use_cached, lkwargs):
""" Get time series using LightKurve
Performs a search for available fits files on MAST and then downloads them
if nessary.
The search results are cached with an expiration of 30 days. If a search
result is found, the fits file cache is searched for a matching file list
which is then used.
Parameters
----------
ID : str
ID string of the target
download_dir : str
Directory for fits file and search results caches.
use_cached : bool, optional
Whether or not to use the cached time series. Default is True.
lkwargs : dict
Dictionary to be passed to LightKurve
Returns
-------
lc : Lightkurve.LightCurve instance
The concatenated time series for the target.
"""
ID = _format_name(ID)
_set_mission(ID, lkwargs)
ID = _getMASTidentifier(ID, lkwargs)
search = _perform_search(ID, lkwargs, use_cached)
fitsFiles = _check_lc_cache(search, lkwargs['mission'])
lc = _load_fits(fitsFiles, lkwargs['mission'])
lc = _clean_lc(lc)
return lc
def _arr_to_lk(x, y, name, typ):
""" LightKurve object from input.
Creates either a lightkurve.LightCurve or lightkurve.periodogram object
from the input arrays.
Parameters
----------
x : list-like
First column of timeseries or periodogram (time/frequency).
y : list-like
Second column of timeseries or periodogram (flux/power).
name : string
Target ID
typ : string
Either timeseries or periodogram.
Returns
-------
lkobj : object
Either lightkurve.LightCurve or lightkurve.periodogram object
depending on typ.
"""
if typ == 'timeseries':
return lk.LightCurve(time=x, flux=y, targetid=name)
elif typ == 'spectrum':
return Periodogram(x*units.microhertz,
units.Quantity(y, None),
targetid=name)
else:
raise KeyError("Don't modify anything but spectrum and timeseries cols")
def _format_col(vardf, col, key):
""" Add timeseries or spectrum column to dataframe based on input
Based on the contents of col, will try to format col and add it as a column
to vardf with column name key. col can be many things, so the decision is
based mainly on the dimensionality of col.
If dim = 0, it's assumed that col is either None, or a string, (for the
latter it assumes there is then only one target).
If dim = 1, it's assumed that col is a list-like object, consisting of
either None or strings, these are passed along without modification.
If dim = 2, col is assumed to be either a time series or power spectrum
of shape (2,M), with time/frequency in 1st row and flux/power in the
second.
If dim = 3, it is assumed to be list of (2,M) arrays.
In both of the latter cases col is converted to Lightkurve object(s).
Parameters
----------
vardf : pandas.DataFrame instance
Pandas Dataframe instance to which either a timeseries or spectrum column
will be added.
col : object
Input from Session call, corresponding to key
key : str
Name of column to add to vardf
"""
N = np.shape(vardf['ID'])[0]
col = np.array(col, dtype=object)
# If dim = 0, it's either none or a string
if col.ndim == 0:
if not col:
# If none, then multiply up to length of ID
vardf[key] = np.array([None]*N)
else:
# If string, then single target
vardf[key] = np.array(col).reshape((-1, 1)).flatten()
# If dim = 1, it's either a list of nones, strings or lightkurve objects
elif col.ndim == 1:
vardf[key] = col
# if dim = 2, it's an array or tuple, with time and flux
elif col.ndim == 2:
x =
|
np.array(col[0, :], dtype=float)
|
numpy.array
|
import numpy as np
import MITgcmutils as mit
#import matplotlib.pyplot as plt
import os
import gc
from scipy.interpolate import griddata
from scipy.interpolate import interp1d
from multiprocessing import Pool
#plt.ion()
#-- directories --
dir_grd12 = '/glade/p/univ/ufsu0011/runs/gridMIT_update1/'
dir_grd50 = '/glade/p/univ/ufsu0011/runs/chao50/gridMIT/'
dir_in = '/glade/p/univ/ufsu0011/runs/orar/memb00'
dir_out = '/glade/p/univ/ufsu0011/data_in/bound_cond_50'
#-- some parameters --
nt = 73 #also need earlier and past year for interpolation,
#for now it is coded in a 'normal year' fashion
nproc = 36 #number of processors used for parallelization
#-- time param for loading --
dt = 200 #1/12 model time step
spy = 86400*365
dump = 5*86400 #5-d dumps
d_iter = dump/dt
nDump = spy/dump;
#-- grid params --
# hz grid: defined in lat/lon for indexing,
# but in [m] with co-localized origin for interpolation
rSphere = 6370000.0
#- 1/12 -
rC12 = mit.rdmds(dir_grd12 + 'RC')
rF12 = mit.rdmds(dir_grd12 + 'RF')
[nr12] = rC12[:, 0, 0].shape
xx12 = mit.rdmds(dir_grd12 + 'XC')
[ny12, nx12] = xx12.shape
zzz12 = np.zeros([nr12+2])
zzz12[1:-1] = rC12[:, 0, 0]
zzz12[-1] = rF12[-1, 0, 0]
#- 1/50 -
rC50 = mit.rdmds(dir_grd50 + 'RC')
[nr50] = rC50[:, 0, 0].shape
xx50 = mit.rdmds(dir_grd50 + 'XC')
[ny50, nx50] = xx50.shape
#-- variables and boundaries --
varN = ['t', 's', 'uE', 'vN']
nvar = len(varN)
varOrder = [0, 1, 2, 3] #Ordering of variables in diagnostic used to make obcs
bbdy = ['south', 'north', 'east', 'west']
nnbdy = len(bbdy)
#-----------------------------------------------------
# Define horizontal and vertical interpolation methods
#-----------------------------------------------------
def interp_obcs(tttt):
#print("-- tt=%02i || Interpolate %s at bdy %s --" % (tttt, varN[ivar], bbdy[ibdy]))
tmpvarin = var12[tttt, :, :, :].reshape([nr12, ny2*nx2])
#- hz interp -
tmpvar = np.zeros([nr12+2, nxy50])
for kkk in range(nr12):
tmpvar[kkk+1] = griddata(xy12, tmpvarin[kkk, :], (xx50, yy50), method=mmeth)
tmpvar[0, :] = tmpvar[1, :]
tmpvar[-1, :] = tmpvar[-2, :]
#- vert interp -
tmpvar2 = np.zeros([nr50, nxy50])
for ij in range(nxy50):
# FOR TRACER ONLY
# find last wet point and repeat it downward for constant interpolation
if varN[ivar] == 't' or varN[ivar] == 's':
tmpk = np.where( tmpvar[:, ij] == 0.0 )[0]
if tmpk.size > 0:
if (tmpk[0] > 0 and tmpk[0] < (nr12+1) ):
tmpvar[tmpk[0]:, ij] = tmpvar[tmpk[0]-1, ij]
#
f = interp1d(zzz12, tmpvar[:, ij])
tmpvar2[:, ij] = f(rC50[:, 0, 0])
return tmpvar2
#------------------------------------------------------------------
# Make Boundary conditions from our previous 1/12 runs
#------------------------------------------------------------------
iper = 2003
offset = int((iper-1958)*spy/dt)
iters = np.arange(d_iter, (nDump+1)*d_iter, d_iter, dtype='int') + offset
tmpdir = str('%s/%04i' % (dir_out, iper))
if not os.path.isdir(tmpdir):
os.makedirs( tmpdir )
#-- Loop over variables and boundaries --
for ivar in range(nvar):
if varN[ivar] == 'uE':
flag_repz = False #to repeat grid points downward for vert. interp
mmeth = 'cubic' #interpolation method
# 1/50
x50deg = mit.rdmds(dir_grd50 + 'XG')
y50deg = mit.rdmds(dir_grd50 + 'YC')
hFac50 = mit.rdmds(dir_grd50 + 'hFacW')
# 1/12
x12deg = mit.rdmds(dir_grd12 + 'XG')
y12deg = mit.rdmds(dir_grd12 + 'YC')
elif varN[ivar] == 'vN':
flag_repz = False #to repeat grid points downward for vert. interp
mmeth = 'cubic' #interpolation method
# 1/50
x50deg = mit.rdmds(dir_grd50 + 'XC')
y50deg = mit.rdmds(dir_grd50 + 'YG')
hFac50 = mit.rdmds(dir_grd50 + 'hFacS')
# 1/12
x12deg = mit.rdmds(dir_grd12 + 'XC')
y12deg = mit.rdmds(dir_grd12 + 'YG')
else: # tracers
flag_repz = True #to repeat grid points downward for vert. interp
mmeth = 'linear' #interpolation method
# 1/50
x50deg = mit.rdmds(dir_grd50 + 'XC')
y50deg = mit.rdmds(dir_grd50 + 'YC')
hFac50 = mit.rdmds(dir_grd50 + 'hFacC')
# 1/12
x12deg = mit.rdmds(dir_grd12 + 'XC')
y12deg = mit.rdmds(dir_grd12 + 'YC')
# define associated grid in [m] with co-localized origin
tmpx50 = np.radians(x50deg - x50deg[0,0]) * rSphere * np.cos(np.radians(y50deg))
tmpy50 = np.radians(y50deg - y50deg[0,0]) * rSphere
tmpx12 = np.radians(x12deg - x50deg[0,0]) * rSphere * np.cos(np.radians(y12deg))
tmpy12 = np.radians(y12deg - y50deg[0,0]) * rSphere
# need to be consistent with ordering in bbdy
[sbdy, nbdy, wbdy, ebdy] = [y50deg.min(), y50deg.max(), x50deg.min(), x50deg.max()]
#
for ibdy in range(nnbdy):
print("== Deal with %s at bdy %s ==" % (varN[ivar], bbdy[ibdy]) )
#
#-- boundary parameters --
deltaxy = 3 #+/- deltaxy grid points (on the 1/12 grid) around boundary
if bbdy[ibdy] == 'south':
#- 1/50 -
if varN[ivar] == 'vN': #obcs are applied at inner grid points
msk50 = hFac50[:, 1, :]
xx50 = tmpx50[1, :]
yy50 = tmpy50[1, :]
else:
msk50 = hFac50[:, 0, :]
xx50 = tmpx50[0, :]
yy50 = tmpy50[0, :]
nxy50 = nx50
#- associated subdomain on the 1/12 grid -
iiw = np.where(x12deg[0,:]>wbdy)[0][0] - deltaxy
iie = np.where(x12deg[0,:]>ebdy)[0][0] + deltaxy
jjs =
|
np.where(y12deg[:,0]>sbdy)
|
numpy.where
|
#!/usr/bin/python
# Wflow is Free software, see below:
#
# Copyright (c) <NAME>/Deltares 2005-2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Run the wflow_sbm hydrological model..
usage
::
wflow_sbm [-h][-v level][-F runinfofile][-L logfile][-C casename][-R runId]
[-c configfile][-T last_step][-S first_step][-s seconds][-W][-E][-N][-U discharge]
[-P parameter multiplication][-X][-f][-I][-i tbl_dir][-x subcatchId][-u updatecols]
[-p inputparameter multiplication][-l loglevel]
-X: save state at the end of the run over the initial conditions at the start
-f: Force overwrite of existing results
-T: Set end time of the run: yyyy-mm-dd hh:mm:ss
-S: Set start time of the run: yyyy-mm-dd hh:mm:ss
-s: Set the model timesteps in seconds
-I: re-initialize the initial model conditions with default
-i: Set input table directory (default is intbl)
-x: Apply multipliers (-P/-p ) for subcatchment only (e.g. -x 1)
-C: set the name of the case (directory) to run
-R: set the name runId within the current case
-L: set the logfile
-E: Switch on reinfiltration of overland flow
-c: name of wflow the configuration file (default: Casename/wflow_sbm.ini).
-h: print usage information
-W: If set, this flag indicates that an ldd is created for the water level
for each timestep. If not the water is assumed to flow according to the
DEM. Wflow will run a lot slower with this option. Most of the time
(shallow soil, steep topography) you do not need this option. Also, if you
need it you migth actually need another model.
-U: The argument to this option should be a .tss file with measured discharge in
[m^3/s] which the progam will use to update the internal state to match
the measured flow. The number of columns in this file should match the
number of gauges in the wflow_gauges.map file.
-u: list of gauges/columns to use in update. Format:
-u [1 , 4 ,13]
The above example uses column 1, 4 and 13
-P: set parameter change string (e.g: -P "self.FC = self.FC * 1.6") for non-dynamic variables
-p: set parameter change string (e.g: -P "self.Precipitation = self.Precipitation * 1.11") for
dynamic variables
-l: loglevel (most be one of DEBUG, WARNING, ERROR)
"""
import os.path
import numpy as np
import pcraster.framework
from wflow.wf_DynamicFramework import *
from wflow.wflow_adapt import *
from wflow.wflow_funcs import *
import pcraster as pcr
import pdb
import math
from numba import jit
wflow = "wflow_sbm: "
updateCols = []
def usage(*args):
sys.stdout = sys.stderr
"""Way"""
for msg in args:
print(msg)
print(__doc__)
sys.exit(0)
def estimate_iterations_kin_wave(Q, Beta, alpha, timestepsecs, dx, mv):
celerity = pcr.ifthen(Q > 0.0, 1.0 / (alpha * Beta * Q**(Beta-1)))
courant = (timestepsecs / dx) * celerity
np_courant = pcr.pcr2numpy(courant, mv)
np_courant[np_courant==mv] = np.nan
try:
it_kin = int(np.ceil(1.25*(np.nanpercentile(np_courant,95))))
except:
it_kin = 1
return it_kin
@jit(nopython=True)
def _sCurve(X, a=0.0, b=1.0, c=1.0):
"""
sCurve function:
Input:
- X input map
- C determines the steepness or "stepwiseness" of the curve.
The higher C the sharper the function. A negative C reverses the function.
- b determines the amplitude of the curve
- a determines the centre level (default = 0)
Output:
- result
"""
s = 1.0 / (b + np.exp(-c * (X - a)))
return s
@jit(nopython=True)
def actEvap_unsat_SBM(
RootingDepth,
UStoreDepth,
UStoreLayerThickness,
sumLayer,
RestPotEvap,
sumActEvapUStore,
c,
L,
thetaS,
thetaR,
ust=0,
):
"""
Actual evaporation function:
- first try to get demand from the saturated zone, using the rootingdepth as a limiting factor
- secondly try to get the remaining water from the unsaturated store
- it uses an S-Curve the make sure roots het wet/dry gradually (basically)
representing a root-depth distribution
if ust is True, all ustore is deems to be avaiable fro the roots a
Input:
- RootingDepth, UStoreDepth, FirstZoneDepth, PotTrans, smoothpar
Output:
- ActEvap, FirstZoneDepth, UStoreDepth ActEvapUStore
"""
# AvailCap is fraction of unsat zone containing roots
if ust >= 1:
AvailCap = UStoreDepth * 0.99
else:
if L > 0:
AvailCap = min(1.0, max(0.0, (RootingDepth - sumLayer) / L))
else:
AvailCap = 0.0
MaxExtr = AvailCap * UStoreDepth
# Calculate the reduction of RestPotEvap due to differences in rooting density in the soil column
# The used model is based on Vrugt et al. (2001) and uses as input parameters for z* and Pz the
# values of Hoffman and van Genuchten (z* = 0.20 and Pz = 1.00)
# Next step is to make use of the Feddes curve in order to decrease ActEvapUstore when soil moisture values
# occur above or below ideal plant growing conditions (see also Feddes et al., 1978). h1-h4 values are
# actually negative, but all values are made positive for simplicity.
hb = 1 # cm (pF 1 for atmospheric pressure)
h1 = 1 # cm
h2 = 100 # cm (pF 2 for field capacity)
h3 = 400 # cm (pF 3, critical pF value)
h4 = 15849 # cm (pF 4.2, wilting point)
# According to Brooks-Corey
par_lambda = 2 / (c - 3)
if L > 0.0:
vwc = UStoreDepth / L
else:
vwc = 0.0
vwc = max(vwc, 0.0000001)
head = hb / (
((vwc) / (thetaS - thetaR)) ** (1 / par_lambda)
) # Note that in the original formula, thetaR is extracted from vwc, but thetaR is not part of the numerical vwc calculation
head = max(head,hb)
# Transform h to a reduction coefficient value according to Feddes et al. (1978).
if(head <= h1):
alpha = 0
elif(head >= h4):
alpha = 0
elif((head < h2) & (head > h1)):
alpha = (head - h1) / (h2 - h1)
elif((head > h3) & (head < h4)):
alpha = 1 - (head - h3) / (h4 - h3)
else:
alpha = 1
ActEvapUStore = (min(MaxExtr, RestPotEvap, UStoreDepth)) * alpha
UStoreDepth = UStoreDepth - ActEvapUStore
RestPotEvap = RestPotEvap - ActEvapUStore
sumActEvapUStore = ActEvapUStore + sumActEvapUStore
return UStoreDepth, sumActEvapUStore, RestPotEvap
@jit(nopython=True)
def infiltration(AvailableForInfiltration, PathFrac, cf_soil, TSoil,InfiltCapSoil,InfiltCapPath, UStoreCapacity, modelSnow, soilInfReduction):
SoilInf = AvailableForInfiltration * (1 - PathFrac)
PathInf = AvailableForInfiltration * PathFrac
if modelSnow & soilInfReduction:
bb = 1.0 / (1.0 - cf_soil)
soilInfRedu = _sCurve(TSoil, a=0.0, b=bb, c=8.0)
else:
soilInfRedu = 1.0
MaxInfiltSoil = min(InfiltCapSoil * soilInfRedu, SoilInf)
MaxInfiltPath = min(InfiltCapPath * soilInfRedu, PathInf)
InfiltSoilPath = min(MaxInfiltPath + MaxInfiltSoil, max(0.0, UStoreCapacity))
return InfiltSoilPath
@jit(nopython=True)
def unsatzone_flow(UStoreLayerDepth, InfiltSoilPath, L, z, KsatVerFrac, c, KsatVer, f, thetaS, thetaR, SoilWaterCapacity, SWDold, shape_layer, TransferMethod):
m = 0
UStoreLayerDepth[m] = UStoreLayerDepth[m] + InfiltSoilPath
if L[m] > 0.0:
#sbm option for vertical transfer (only for 1 layer)
if (TransferMethod == 1 and shape_layer == 1):
Sd = SoilWaterCapacity - SWDold
if Sd <= 0.00001:
st = 0.0
else:
st = KsatVerFrac[m] * KsatVer * (min(UStoreLayerDepth[m],L[m]*(thetaS-thetaR))/Sd)
else:
st = KsatVerFrac[m] * KsatVer * np.exp(-f * z[m]) * min((UStoreLayerDepth[m]/(L[m] * (thetaS-thetaR)))**c[m],1.0)
ast = min(st,UStoreLayerDepth[m])
UStoreLayerDepth[m] = UStoreLayerDepth[m] - ast
else:
ast = 0.0
for m in range(1,len(L)):
UStoreLayerDepth[m] = UStoreLayerDepth[m] + ast
if L[m] > 0.0:
st = KsatVerFrac[m] * KsatVer * np.exp(-f* z[m]) * min((UStoreLayerDepth[m]/(L[m] * (thetaS-thetaR)))**c[m],1.0)
ast = min(st,UStoreLayerDepth[m])
else:
ast = 0.0
UStoreLayerDepth[m] = UStoreLayerDepth[m] - ast
return ast, UStoreLayerDepth
@jit(nopython=True)
def sbm_cell(nodes, nodes_up, ldd, layer, static, dyn, modelSnow, soilInfReduction, timestepsecs, basetimestep, deltaT, nrpaddyirri, shape, TransferMethod, it_kinL=1, ust=0):
shape_layer = layer['UStoreLayerThickness'].shape
# flat new state
ssf_new = np.zeros(dyn['ssf'].size, dtype=dyn['ssf'].dtype)
qo_new = np.zeros(dyn['LandRunoff'].size, dtype=dyn['LandRunoff'].dtype)
qo_new = np.concatenate((qo_new, np.array([0], dtype=dyn['LandRunoff'].dtype)))
# append zero to end to deal with nodata (-1) in indices
ssf_new = np.concatenate((ssf_new, np.array([0], dtype=dyn['ssf'].dtype)))
ldd_ = np.concatenate((ldd, np.array([0], dtype=ldd.dtype)))
slope_ = np.concatenate((static['slope'], np.array([0], dtype=static['slope'].dtype)))
SWDold = np.zeros(dyn['ssf'].size, dtype=dyn['ssf'].dtype)
sumUSold = np.zeros(dyn['ssf'].size, dtype=dyn['ssf'].dtype)
for i in range(len(nodes)):
for j in range(len(nodes[i])):
idx = nodes[i][j]
nbs = nodes_up[i][j]
sumlayer = np.unique(layer['UStoreLayerThickness'][:,idx].cumsum())
sumlayer_0 = np.concatenate((np.array([0.0]), sumlayer))
SWDold[idx] = dyn['SatWaterDepth'][idx]
sumUSold[idx] = layer['UStoreLayerDepth'][:,idx].sum()
n = np.where(dyn['zi'][idx] > sumlayer_0)[0]
if len(n) > 1:
L = np.concatenate((layer['UStoreLayerThickness'][n[0:-1],idx], np.array([dyn['zi'][idx] - sumlayer_0[n[-1]]]))).astype(np.float64)
else:
L = np.array([dyn['zi'][idx]]).astype(np.float64)
z = L.cumsum()
dyn['ActEvapUStore'][idx] = 0.0
if static['River'][idx]:
ind = np.where(ldd_[nbs] != ldd_[idx])
chanperc = np.zeros(ldd_[nbs].size)
chanperc[ind] = slope_[nbs][ind]/(slope_[idx]+slope_[nbs][ind])
ssf_in = np.sum((1-chanperc)*ssf_new[nbs])
dyn['ssf_toriver'][idx] = np.sum((chanperc)*ssf_new[nbs])/(1000*1000*1000)/timestepsecs
else:
ssf_in = np.sum(ssf_new[nbs])
dyn['CellInFlow'][idx] = ssf_in
UStoreCapacity = static['SoilWaterCapacity'][idx] - dyn['SatWaterDepth'][idx] - layer['UStoreLayerDepth'][n,idx].sum()
InfiltSoilPath = infiltration(dyn['AvailableForInfiltration'][idx], static['PathFrac'][idx], static['cf_soil'][idx],
dyn['TSoil'][idx],static['InfiltCapSoil'][idx],static['InfiltCapPath'][idx],UStoreCapacity, modelSnow, soilInfReduction)
dyn['InfiltSoilPath'][idx] = InfiltSoilPath
# unsat fluxes first
ast, layer['UStoreLayerDepth'][:,idx] = unsatzone_flow(layer['UStoreLayerDepth'][:,idx], InfiltSoilPath, L, z, layer['KsatVerFrac'][:,idx], layer['c'][:,idx], static['KsatVer'][idx], static['f'][idx],
static['thetaS'][idx], static['thetaR'][idx], static['SoilWaterCapacity'][idx], SWDold[idx], shape_layer[0], TransferMethod)
dyn['Transfer'][idx] = ast
# then evaporation from layers
for k in range(len(L)):
if k==0:
SaturationDeficit = static['SoilWaterCapacity'][idx] - dyn['SatWaterDepth'][idx]
if shape_layer[0] == 1:
soilevapunsat = dyn['restEvap'][idx] * min(1.0, SaturationDeficit / static['SoilWaterCapacity'][idx])
else:
if len(L) == 1:
if dyn['zi'][idx] > 0:
soilevapunsat = dyn['restEvap'][idx] * min(1.0, layer['UStoreLayerDepth'][k,idx]/dyn['zi'][idx])
else:
soilevapunsat = 0.0
else:
soilevapunsat = dyn['restEvap'][idx] * min(1.0, layer['UStoreLayerDepth'][k,idx]/(layer['UStoreLayerThickness'][k,idx]*(static['thetaS'][idx]-static['thetaR'][idx])))
soilevapunsat = min(soilevapunsat, layer['UStoreLayerDepth'][k,idx])
dyn['restEvap'][idx] = dyn['restEvap'][idx] - soilevapunsat
layer['UStoreLayerDepth'][k,idx] = layer['UStoreLayerDepth'][k,idx] - soilevapunsat
if shape_layer[0] == 1:
soilevapsat = 0.0
else:
if len(L) == 1:
soilevapsat = dyn['restEvap'][idx] * min(1.0, (layer['UStoreLayerThickness'][k,idx] - dyn['zi'][idx])/ layer['UStoreLayerThickness'][k,idx])
soilevapsat = min(soilevapsat, (layer['UStoreLayerThickness'][k,idx] - dyn['zi'][idx]) * (static['thetaS'][idx] - static['thetaR'][idx]))
else:
soilevapsat = 0.0
dyn['soilevap'][idx] = soilevapunsat + soilevapsat
dyn['SatWaterDepth'][idx] = dyn['SatWaterDepth'][idx] - soilevapsat
# evaporation available for transpiration
PotTrans = dyn['PotTransSoil'][idx] - dyn['soilevap'][idx] - dyn['ActEvapOpenWaterLand'][idx]
# evaporation from saturated store
wetroots = _sCurve(dyn['zi'][idx], a=static['ActRootingDepth'][idx], c=static['rootdistpar'][idx])
dyn['ActEvapSat'][idx] = min(PotTrans * wetroots, dyn['SatWaterDepth'][idx])
dyn['SatWaterDepth'][idx] = dyn['SatWaterDepth'][idx] - dyn['ActEvapSat'][idx]
RestPotEvap = PotTrans - dyn['ActEvapSat'][idx]
# actual evaporation from UStore
layer['UStoreLayerDepth'][k,idx], dyn['ActEvapUStore'][idx], RestPotEvap = actEvap_unsat_SBM(static['ActRootingDepth'][idx], layer['UStoreLayerDepth'][k,idx], layer['UStoreLayerThickness'][k,idx],
sumlayer[k], RestPotEvap, dyn['ActEvapUStore'][idx], layer['c'][k,idx], L[k], static['thetaS'][idx], static['thetaR'][idx], ust)
else:
# actual evaporation from UStore
layer['UStoreLayerDepth'][k,idx], dyn['ActEvapUStore'][idx], RestPotEvap = actEvap_unsat_SBM(static['ActRootingDepth'][idx], layer['UStoreLayerDepth'][k,idx], layer['UStoreLayerThickness'][k,idx],
sumlayer[k], RestPotEvap, dyn['ActEvapUStore'][idx], layer['c'][k,idx], L[k], static['thetaS'][idx], static['thetaR'][idx], ust)
#check soil moisture balance per layer
du = 0.0
for k in range(L.size-1,-1,-1):
du = max(0,layer['UStoreLayerDepth'][k,idx] - L[k]*(static['thetaS'][idx]-static['thetaR'][idx]))
layer['UStoreLayerDepth'][k,idx] = layer['UStoreLayerDepth'][k,idx] - du
if k > 0:
layer['UStoreLayerDepth'][k-1,idx] = layer['UStoreLayerDepth'][k-1,idx] + du
Ksat = layer['KsatVerFrac'][len(L)-1,idx] * static['KsatVer'][idx] * np.exp(-static['f'][idx] * dyn['zi'][idx])
UStoreCapacity = static['SoilWaterCapacity'][idx] - dyn['SatWaterDepth'][idx] - layer['UStoreLayerDepth'][n,idx].sum()
MaxCapFlux = max(0.0, min(Ksat, dyn['ActEvapUStore'][idx], UStoreCapacity, dyn['SatWaterDepth'][idx]))
if dyn['zi'][idx] > static['ActRootingDepth'][idx]:
CapFluxScale = static['CapScale'][idx] / (static['CapScale'][idx] + dyn['zi'][idx] - static['ActRootingDepth'][idx]) * timestepsecs / basetimestep
else:
CapFluxScale = 0.0
CapFlux = MaxCapFlux * CapFluxScale
netCapflux = CapFlux
actCapFlux = 0.0
for k in range(L.size-1,-1,-1):
toadd = min(netCapflux, max(L[k]*(static['thetaS'][idx]-static['thetaR'][idx]) - layer['UStoreLayerDepth'][k,idx], 0.0))
layer['UStoreLayerDepth'][k,idx] = layer['UStoreLayerDepth'][k,idx] + toadd
netCapflux = netCapflux - toadd
actCapFlux = actCapFlux + toadd
dyn['CapFlux'][idx] = actCapFlux
DeepKsat = static['KsatVer'][idx] * np.exp(-static['f'][idx] * static['SoilThickness'][idx])
DeepTransfer = min(dyn['SatWaterDepth'][idx], DeepKsat)
dyn['ActLeakage'][idx] = max(0.0, min(static['MaxLeakage'][idx], DeepTransfer))
r = (ast - actCapFlux - dyn['ActLeakage'][idx] - dyn['ActEvapSat'][idx] - soilevapsat) * static['DW'][idx]*1000
ssf_new[idx], dyn['zi'][idx], ExfiltSatWater = kinematic_wave_ssf(ssf_in, dyn['ssf'][idx], dyn['zi'][idx], r, static['KsatHorFrac'][idx],
static['KsatVer'][idx], static['slope'][idx], static['neff'][idx], static['f'][idx],
static['SoilThickness'][idx], deltaT, static['DL'][idx]*1000, static['DW'][idx]*1000, static['ssfmax'][idx])
dyn['zi'][idx] = min(dyn['zi'][idx], static['SoilThickness'][idx])
dyn['SatWaterDepth'][idx] = (static['SoilThickness'][idx] - dyn['zi'][idx]) * (static['thetaS'][idx] - static['thetaR'][idx])
n_new = np.where(dyn['zi'][idx] > sumlayer_0)[0]
if len(n_new) > 1:
L_new = np.concatenate((layer['UStoreLayerThickness'][n_new[0:-1],idx], np.array([dyn['zi'][idx] - sumlayer_0[n_new[-1]]]))).astype(np.float64)
else:
L_new = np.array([dyn['zi'][idx]]).astype(np.float64)
ExfiltFromUstore = 0.0
for k in range(L.size-1,-1,-1):
if (np.where(n_new == k))[0].size > 0:
ExfiltFromUstore = max(0,layer['UStoreLayerDepth'][k,idx] - L_new[k]*(static['thetaS'][idx]-static['thetaR'][idx]))
else:
ExfiltFromUstore = layer['UStoreLayerDepth'][k,idx]
layer['UStoreLayerDepth'][k,idx] = layer['UStoreLayerDepth'][k,idx] - ExfiltFromUstore
if k > 0:
layer['UStoreLayerDepth'][k-1,idx] = layer['UStoreLayerDepth'][k-1,idx] + ExfiltFromUstore
dyn['ExfiltWater'][idx] = ExfiltSatWater + ExfiltFromUstore
dyn['ExcessWater'][idx] = dyn['AvailableForInfiltration'][idx] - InfiltSoilPath + du
dyn['ActInfilt'][idx] = InfiltSoilPath - du
ponding_add = 0
if nrpaddyirri > 0:
if static['h_p'][idx] > 0:
ponding_add = min(dyn['ExfiltWater'][idx] + dyn['ExcessWater'][idx], static['h_p'][idx] - dyn['PondingDepth'][idx])
dyn['PondingDepth'][idx] = dyn['PondingDepth'][idx] + ponding_add
dyn['InwaterO'][idx] = max(dyn['ExfiltWater'][idx] + dyn['ExcessWater'][idx] + dyn['RunoffLandCells'][idx] - dyn['ActEvapOpenWaterLand'][idx] - ponding_add, 0.0) * (static['xl'][idx] * static['yl'][idx]) * 0.001 / timestepsecs
dyn['sumUStoreLayerDepth'][idx] = layer['UStoreLayerDepth'][:,idx].sum()
# volumetric water contents per soil layer and root zone
for k in range(layer['UStoreLayerThickness'][:,idx].size):
if (np.where(n_new == k))[0].size > 0:
if layer['UStoreLayerThickness'][k,idx] > 0:
layer['vwc'][k,idx] = (layer['UStoreLayerDepth'][k,idx] + (layer['UStoreLayerThickness'][k,idx] - L_new[k]) * (static['thetaS'][idx] - static['thetaR'][idx])) / layer['UStoreLayerThickness'][k,idx] + static['thetaR'][idx]
else:
layer['vwc'][k,idx] = static['thetaS'][idx]
layer['vwc_perc'][k,idx] = (layer['vwc'][k,idx]/static['thetaS'][idx]) * 100.0
rootStore_unsat = 0
for k in range(L_new.size):
if L_new[k] > 0:
rootStore_unsat = rootStore_unsat + (max(0.0, static['ActRootingDepth'][idx] - sumlayer_0[k])/L_new[k]) * layer['UStoreLayerDepth'][k,idx]
dyn['RootStore_unsat'][idx] = rootStore_unsat
acc_flow = np.zeros(dyn['LandRunoff'].size, dtype=dyn['LandRunoff'].dtype)
acc_flow = np.concatenate((acc_flow, np.array([0], dtype=dyn['LandRunoff'].dtype)))
qo_toriver_acc = np.copy(acc_flow)
q = dyn['InwaterO'] / static['DL']
for v in range(0,it_kinL):
qo_new = np.zeros(dyn['LandRunoff'].size, dtype=dyn['LandRunoff'].dtype)
qo_new = np.concatenate((qo_new, np.array([0], dtype=dyn['LandRunoff'].dtype)))
for i in range(len(nodes)):
for j in range(len(nodes[i])):
idx = nodes[i][j]
nbs = nodes_up[i][j]
if static['River'][idx]:
ind = np.where(ldd_[nbs] != ldd_[idx])
chanperc = np.zeros(ldd_[nbs].size)
chanperc[ind] = slope_[nbs][ind]/(slope_[idx]+slope_[nbs][ind])
if static['SW'][idx] > 0.0:
qo_in = np.sum((1-chanperc)*qo_new[nbs])
qo_toriver_vol = np.sum(chanperc*qo_new[nbs]) * (timestepsecs/it_kinL)
else:
qo_in = 0.0
qo_toriver_vol = np.sum(qo_new[nbs]) * (timestepsecs/it_kinL)
else:
qo_in = np.sum(qo_new[nbs])
qo_toriver_vol = 0.0
qo_new[idx] = kinematic_wave(qo_in, dyn['LandRunoff'][idx], q[idx], dyn['AlphaL'][idx], static['Beta'][idx], timestepsecs/it_kinL, static['DL'][idx])
acc_flow[idx] = acc_flow[idx] + qo_new[idx] * (timestepsecs/it_kinL)
dyn['Qo_in'][idx] = dyn['Qo_in'][idx] + qo_in * (timestepsecs/it_kinL)
qo_toriver_acc[idx] = qo_toriver_acc[idx] + qo_toriver_vol
if static['SW'][idx] > 0:
WaterLevelL = (dyn['AlphaL'][idx] * np.power(qo_new[idx], static['Beta'][idx])) / static['SW'][idx]
Pl = static['SW'][idx] + (2.0 * WaterLevelL)
dyn['AlphaL'][idx] = static['AlpTermR'][idx] * np.power(Pl, static['AlpPow'][idx])
dyn['LandRunoff'][idx]= qo_new[idx]
qo_new = acc_flow/timestepsecs
dyn['qo_toriver'][:] = qo_toriver_acc[:-1]/timestepsecs
dyn['Qo_in'][:] = dyn['Qo_in'][:] / timestepsecs
dyn['SoilWatbal'][:] = (dyn['ActInfilt'][:] - ((dyn['SatWaterDepth'][:] + dyn['sumUStoreLayerDepth'][:]) - (sumUSold[:] + SWDold[:])) +
(dyn['CellInFlow'][:]-ssf_new[:-1])/(static['DW'][:]*static['DL'][:]*1000*1000) - dyn['ExfiltWater'][:] - dyn['soilevap'][:] - dyn['ActEvapUStore'][:] -
dyn['ActEvapSat'][:] - dyn['ActLeakage'][:])
return ssf_new[:-1], qo_new[:-1], dyn, layer
def SnowPackHBV(Snow, SnowWater, Precipitation, Temperature, TTI, TT, TTM, Cfmax, WHC):
"""
HBV Type snowpack modelling using a Temperature degree factor. All correction
factors (RFCF and SFCF) are set to 1. The refreezing efficiency factor is set to 0.05.
:param Snow:
:param SnowWater:
:param Precipitation:
:param Temperature:
:param TTI:
:param TT:
:param TTM:
:param Cfmax:
:param WHC:
:return: Snow,SnowMelt,Precipitation
"""
RFCF = 1.0 # correction factor for rainfall
CFR = 0.05000 # refreeing efficiency constant in refreezing of freewater in snow
SFCF = 1.0 # correction factor for snowfall
RainFrac = pcr.ifthenelse(
1.0 * TTI == 0.0,
pcr.ifthenelse(Temperature <= TT, pcr.scalar(0.0), pcr.scalar(1.0)),
pcr.min((Temperature - (TT - TTI / 2)) / TTI, pcr.scalar(1.0)),
)
RainFrac = pcr.max(
RainFrac, pcr.scalar(0.0)
) # fraction of precipitation which falls as rain
SnowFrac = 1 - RainFrac # fraction of precipitation which falls as snow
Precipitation = (
SFCF * SnowFrac * Precipitation + RFCF * RainFrac * Precipitation
) # different correction for rainfall and snowfall
SnowFall = SnowFrac * Precipitation # snowfall depth
RainFall = RainFrac * Precipitation # rainfall depth
PotSnowMelt = pcr.ifthenelse(
Temperature > TTM, Cfmax * (Temperature - TTM), pcr.scalar(0.0)
) # Potential snow melt, based on temperature
PotRefreezing = pcr.ifthenelse(
Temperature < TTM, Cfmax * CFR * (TTM - Temperature), 0.0
) # Potential refreezing, based on temperature
Refreezing = pcr.ifthenelse(
Temperature < TTM, pcr.min(PotRefreezing, SnowWater), 0.0
) # actual refreezing
# No landuse correction here
SnowMelt = pcr.min(PotSnowMelt, Snow) # actual snow melt
Snow = Snow + SnowFall + Refreezing - SnowMelt # dry snow content
SnowWater = SnowWater - Refreezing # free water content in snow
MaxSnowWater = Snow * WHC # Max water in the snow
SnowWater = (
SnowWater + SnowMelt + RainFall
) # Add all water and potentially supersaturate the snowpack
RainFall = pcr.max(SnowWater - MaxSnowWater, 0.0) # rain + surpluss snowwater
SnowWater = SnowWater - RainFall
return Snow, SnowWater, SnowMelt, RainFall, SnowFall
class WflowModel(pcraster.framework.DynamicModel):
"""
.. versionchanged:: 0.91
- Calculation of GWScale moved to resume() to allow fitting.
.. versionadded:: 0.91
- added S-curve for freezing soil infiltration reduction calculations
.. todo::
- add slope based quick-runoff -> less percolation on hillslopes...
"""
def __init__(self, cloneMap, Dir, RunDir, configfile):
pcraster.framework.DynamicModel.__init__(self)
self.UStoreLayerDepth = []
self.caseName = os.path.abspath(Dir)
self.clonemappath = os.path.join(os.path.abspath(Dir), "staticmaps", cloneMap)
pcr.setclone(self.clonemappath)
self.runId = RunDir
self.Dir = os.path.abspath(Dir)
self.configfile = configfile
self.SaveDir = os.path.join(self.Dir, self.runId)
def irrigationdemand(self, pottrans, acttrans, irareas):
"""
Determine irrigation water demand from the difference bewteen potential
transpiration and actual transpiration.
:param pottrans: potential transpiration (epot minus interception and soil/open water evaporation)
:param acttrans: actual transpiration
:param ir_areas: maps of irrigation areas
:return: demand
"""
Et_diff = pcr.areaaverage(pottrans - acttrans, pcr.nominal(irareas))
# Now determine demand in m^3/s for each area
sqmarea = pcr.areatotal(self.reallength * self.reallength, pcr.nominal(irareas))
m3sec = Et_diff * sqmarea / 1000.0 / self.timestepsecs
return Et_diff, m3sec
def updateRunOff(self):
"""
Updates the kinematic wave reservoir. Should be run after updates to Q
"""
self.WaterLevelR = (self.AlphaR * pow(self.RiverRunoff, self.Beta)) / self.Bw
# wetted perimeter (m)
Pr = self.Bw + (2 * self.WaterLevelR)
# Alpha
self.AlphaR = self.AlpTermR * pow(Pr, self.AlpPow)
self.OldKinWaveVolumeR = self.KinWaveVolumeR
self.KinWaveVolumeR = self.WaterLevelR * self.Bw * self.DCL
self.dyn['AlphaR'] = pcr.pcr2numpy(self.AlphaR,self.mv).ravel()
self.WaterLevelL = pcr.ifthenelse( self.SW > 0, (self.AlphaL * pow(self.LandRunoff, self.Beta)) / self.SW, 0.0)
Pl = self.SW + (2 * self.WaterLevelL)
# Alpha
self.AlphaL = self.AlpTermL * pow(Pl, self.AlpPow)
self.OldKinWaveVolumeL = self.KinWaveVolumeL
self.KinWaveVolumeL = self.WaterLevelL * self.SW * self.DL
self.dyn['AlphaL'] = pcr.pcr2numpy(self.AlphaL,self.mv).ravel()
def stateVariables(self):
"""
returns a list of state variables that are essential to the model.
This list is essential for the resume and suspend functions to work.
This function is specific for each model and **must** be present.
:var self.RiverRunoff: Surface runoff in the kin-wave resrvoir [m^3/s]
:var self.LandRunoff: Surface runoff in the kin-wave resrvoir [m^3/s]
:var self.SurfaceRunoffDyn: Surface runoff in the dyn-wave resrvoir [m^3/s]
:var self.WaterLevelR: Water level in the river kin-wave reservoir [m]
:var self.WaterLevelL: Water level in the land kin-wave reservoir [m]
:var self.WaterLevelDyn: Water level in the dyn-wave resrvoir [m^]
:var self.Snow: Snow pack [mm]
:var self.SnowWater: Snow pack water [mm]
:var self.TSoil: Top soil temperature [oC]
:var self.UStoreDepth: Water in the Unsaturated Store [mm]
:var self.SatWaterDepth: Water in the saturated store [mm]
:var self.CanopyStorage: Amount of water on the Canopy [mm]
:var self.ReservoirVolume: Volume of each reservoir [m^3]
:var self.GlacierStore: Thickness of the Glacier in a gridcell [mm]
"""
states = [
"RiverRunoff",
"WaterLevelR",
"LandRunoff",
"WaterLevelL",
"SatWaterDepth",
"Snow",
"TSoil",
"UStoreLayerDepth",
"SnowWater",
"CanopyStorage",
"SubsurfaceFlow",
]
if hasattr(self, "GlacierFrac"):
states.append("GlacierStore")
if hasattr(self, "ReserVoirSimpleLocs"):
states.append("ReservoirVolume")
if hasattr(self, "ReserVoirComplexLocs"):
states.append("ReservoirWaterLevel")
if hasattr(self, "nrpaddyirri"):
if self.nrpaddyirri > 0:
states.append("PondingDepth")
return states
def supplyCurrentTime(self):
"""
gets the current time in seconds after the start of the run
"""
return self.currentTimeStep() * self.timestepsecs
def suspend(self):
self.logger.info("Saving initial conditions...")
self.wf_suspend(os.path.join(self.SaveDir, "outstate"))
if self.OverWriteInit:
self.logger.info("Saving initial conditions over start conditions...")
self.wf_suspend(self.SaveDir + "/instate/")
def parameters(self):
"""
Define all model parameters here that the framework should handle for the model
See wf_updateparameters and the parameters section of the ini file
If you use this make sure to all wf_updateparameters at the start of the dynamic section
and at the start/end of the initial section
"""
modelparameters = []
# Static model parameters e.g.
# modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
# 3: Input time series ###################################################
self.P_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "Precipitation", "/inmaps/P"
) # timeseries for rainfall
self.PET_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "EvapoTranspiration", "/inmaps/PET"
) # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
self.TEMP_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "Temperature", "/inmaps/TEMP"
) # timeseries for rainfall "/inmaps/TEMP" # global radiation
self.Inflow_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "Inflow", "/inmaps/IF"
) # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
# Meteo and other forcing
modelparameters.append(
self.ParamType(
name="Precipitation",
stack=self.P_mapstack,
type="timeseries",
default=0.0,
verbose=True,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="PotenEvap",
stack=self.PET_mapstack,
type="timeseries",
default=0.0,
verbose=True,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="Temperature",
stack=self.TEMP_mapstack,
type="timeseries",
default=10.0,
verbose=True,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="Inflow",
stack=self.Inflow_mapstack,
type="timeseries",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="IrrigationAreas",
stack="staticmaps/wflow_irrigationareas.map",
type="staticmap",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="IrrigationSurfaceIntakes",
stack="staticmaps/wflow_irrisurfaceintake.map",
type="staticmap",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="IrrigationPaddyAreas",
stack="staticmaps/wflow_irrigationpaddyareas.map",
type="staticmap",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="IrrigationSurfaceReturn",
stack="staticmaps/wflow_irrisurfacereturns.map",
type="staticmap",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="h_max",
stack="staticmaps/wflow_hmax.map",
type="staticmap",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="h_min",
stack="staticmaps/wflow_hmin.map",
type="staticmap",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="h_p",
stack="staticmaps/wflow_hp.map",
type="staticmap",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
return modelparameters
def initial(self):
"""
Initial part of the model, executed only once. Reads all static data from disk
*Soil*
:var M.tbl: M parameter in the SBM model. Governs the decay of Ksat with depth [-]
:var thetaR.tbl: Residual water content [mm/mm]
:var thetaS.tbl: Saturated water content (porosity) [mm/mm]
:var KsatVer.tbl: Saturated conductivity [mm/d]
:var PathFrac.tbl: Fraction of compacted area per grid cell [-]
:var InfiltCapSoil.tbl: Soil infiltration capacity [m/d]
:var InfiltCapPath.tbl: Infiltration capacity of the compacted areas [mm/d]
:var SoilMinThickness.tbl: Minimum wdepth of the soil [mm]
:var SoilThickness.tbl: Maximum depth of the soil [m]
:var RootingDepth.tbl: Depth of the roots [mm]
:var MaxLeakage.tbl: Maximum leakage out of the soil profile [mm/d]
:var CapScale.tbl: Scaling factor in the Capilary rise calculations (100) [mm/d]
:var RunoffGeneratingGWPerc: Fraction of the soil depth that contributes to subcell runoff (0.1) [-]
:var rootdistpar.tbl: Determine how roots are linked to water table. The number
should be negative. A more negative number means that all roots are wet if the water
table is above the lowest part of the roots.
A less negative number smooths this. [mm] (default = -80000)
*Canopy*
:var CanopyGapFraction.tbl: Fraction of precipitation that does not hit the canopy directly [-]
:var MaxCanopyStorage.tbl: Canopy interception storage capacity [mm]
:var EoverR.tbl: Ratio of average wet canopy evaporation rate over rainfall rate [-]
*Surface water*
:var N.tbl: Manning's N parameter
:var N_river.tbl: Manning's N parameter for cells marked as river
*Snow and frozen soil modelling parameters*
:var cf_soil.tbl: Soil infiltration reduction factor when soil is frozen [-] (< 1.0)
:var TTI.tbl: critical temperature for snowmelt and refreezing (1.000) [oC]
:var TT.tbl: defines interval in which precipitation falls as rainfall and snowfall (-1.41934) [oC]
:var Cfmax.tbl: meltconstant in temperature-index ( 3.75653) [-]
:var WHC.tbl: fraction of Snowvolume that can store water (0.1) [-]
:var w_soil.tbl: Soil temperature smooth factor. Given for daily timesteps. (0.1125) [-] Wigmosta, <NAME>., <NAME>, <NAME>, and <NAME> (2009).
"""
global statistics
global multpars
global updateCols
self.thestep = pcr.scalar(0)
self.basetimestep = 86400
self.SSSF = False
pcr.setglobaloption("unittrue")
self.mv = -999
self.count = 0
self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
# Set and get defaults from ConfigFile here ###################################
self.Tslice = int(configget(self.config, "model", "Tslice", "1"))
self.reinit = int(configget(self.config, "run", "reinit", "0"))
self.OverWriteInit = int(configget(self.config, "model", "OverWriteInit", "0"))
self.updating = int(configget(self.config, "model", "updating", "0"))
self.updateFile = configget(self.config, "model", "updateFile", "no_set")
self.TransferMethod = int(
configget(self.config, "model", "transfermethod", "0")
)
self.maxitsupply = int(configget(self.config, "model", "maxitsupply", "5"))
self.UST = int(configget(self.config, "model", "Whole_UST_Avail", "0"))
self.NRiverMethod = int(configget(self.config, "model", "nrivermethod", "1"))
self.kinwaveIters = int(configget(self.config, "model", "kinwaveIters", "0"))
if self.kinwaveIters == 1:
self.logger.info(
"Using sub timestep for kinematic wave (iterate)"
)
if self.TransferMethod == 1:
self.logger.info(
"Applying the original topog_sbm vertical transfer formulation"
)
self.sCatch = int(configget(self.config, "model", "sCatch", "0"))
self.intbl = configget(self.config, "model", "intbl", "intbl")
self.modelSnow = int(configget(self.config, "model", "ModelSnow", "1"))
self.soilInfReduction = int(configget(self.config, "model", "soilInfRedu", "1"))
sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
alf = float(configget(self.config, "model", "Alpha", "60"))
# TODO: make this into a list for all gauges or a map
Qmax = float(configget(self.config, "model", "AnnualDischarge", "300"))
self.UpdMaxDist = float(configget(self.config, "model", "UpdMaxDist", "100"))
self.MaxUpdMult = float(configget(self.config, "model", "MaxUpdMult", "1.3"))
self.MinUpdMult = float(configget(self.config, "model", "MinUpdMult", "0.7"))
self.UpFrac = float(configget(self.config, "model", "UpFrac", "0.8"))
# self.ExternalQbase=int(configget(self.config,'model','ExternalQbase','0'))
self.waterdem = int(configget(self.config, "model", "waterdem", "0"))
WIMaxScale = float(configget(self.config, "model", "WIMaxScale", "0.8"))
self.MassWasting = int(configget(self.config, "model", "MassWasting", "0"))
self.nrLayers = int(configget(self.config, "model", "nrLayers", "1"))
# static maps to use (normally default)
wflow_subcatch = configget(
self.config, "model", "wflow_subcatch", "staticmaps/wflow_subcatch.map"
)
wflow_dem = configget(
self.config, "model", "wflow_dem", "staticmaps/wflow_dem.map"
)
wflow_ldd = configget(
self.config, "model", "wflow_ldd", "staticmaps/wflow_ldd.map"
)
wflow_river = configget(
self.config, "model", "wflow_river", "staticmaps/wflow_river.map"
)
wflow_riverlength = configget(
self.config,
"model",
"wflow_riverlength",
"staticmaps/wflow_riverlength.map",
)
wflow_riverlength_fact = configget(
self.config,
"model",
"wflow_riverlength_fact",
"staticmaps/wflow_riverlength_fact.map",
)
wflow_landuse = configget(
self.config, "model", "wflow_landuse", "staticmaps/wflow_landuse.map"
)
wflow_soil = configget(
self.config, "model", "wflow_soil", "staticmaps/wflow_soil.map"
)
wflow_gauges = configget(
self.config, "model", "wflow_gauges", "staticmaps/wflow_gauges.map"
)
wflow_inflow = configget(
self.config, "model", "wflow_inflow", "staticmaps/wflow_inflow.map"
)
wflow_riverwidth = configget(
self.config, "model", "wflow_riverwidth", "staticmaps/wflow_riverwidth.map"
)
wflow_streamorder = configget(
self.config,
"model",
"wflow_streamorder",
"staticmaps/wflow_streamorder.map",
)
# 2: Input base maps ########################################################
subcatch = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
) # Determines the area of calculations (all cells > 0)
subcatch = pcr.ifthen(subcatch > 0, subcatch)
self.Altitude = self.wf_readmap(
os.path.join(self.Dir, wflow_dem), 0.0, fail=True
) # * pcr.scalar(pcr.defined(subcatch)) # DEM
self.TopoLdd = pcr.ldd(
self.wf_readmap(os.path.join(self.Dir, wflow_ldd), 0.0, fail=True)
) # Local
self.TopoId = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
) # area map
self.River = pcr.cover(
pcr.boolean(
self.wf_readmap(os.path.join(self.Dir, wflow_river), 0.0, fail=True)
),
0,
)
self.RiverLength = pcr.cover(
self.wf_readmap(os.path.join(self.Dir, wflow_riverlength), 0.0), 0.0
)
# Factor to multiply riverlength with (defaults to 1.0)
self.RiverLengthFac = self.wf_readmap(
os.path.join(self.Dir, wflow_riverlength_fact), 1.0
)
# read landuse and soilmap and make sure there are no missing points related to the
# subcatchment map. Currently sets the lu and soil type type to 1
self.LandUse = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_landuse), 0.0, fail=True)
)
self.LandUse = pcr.cover(self.LandUse, pcr.ordinal(subcatch > 0))
self.Soil = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_soil), 0.0, fail=True)
)
self.Soil = pcr.cover(self.Soil, pcr.ordinal(subcatch > 0))
self.OutputLoc = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_gauges), 0.0, fail=True)
) # location of output gauge(s)
self.InflowLoc = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_inflow), 0.0)
) # location abstractions/inflows.
self.RiverWidth = self.wf_readmap(os.path.join(self.Dir, wflow_riverwidth), 0.0)
# Experimental
self.RunoffGenSigmaFunction = int(
configget(self.config, "model", "RunoffGenSigmaFunction", "0")
)
self.SubCatchFlowOnly = int(
configget(self.config, "model", "SubCatchFlowOnly", "0")
)
self.OutputId = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
) # location of subcatchment
# Temperature correction poer cell to add
self.TempCor = self.wf_readmap(
self.Dir
+ "\\"
+ configget(
self.config,
"model",
"TemperatureCorrectionMap",
"staticmaps/wflow_tempcor.map",
),
0.0,
)
self.ZeroMap = 0.0 * pcr.scalar(subcatch) # map with only zero's
# Set static initial values here #########################################
self.pi = 3.1416
self.e = 2.7183
self.SScale = 100.0
self.Latitude = pcr.ycoordinate(pcr.boolean(self.Altitude))
self.Longitude = pcr.xcoordinate(pcr.boolean(self.Altitude))
# Read parameters NEW Method
self.logger.info("Linking parameters to landuse, catchment and soil...")
self.wf_updateparameters()
self.RunoffGeneratingGWPerc = self.readtblDefault(
self.Dir + "/" + self.intbl + "/RunoffGeneratingGWPerc.tbl",
self.LandUse,
subcatch,
self.Soil,
0.1,
)
if hasattr(self, "LAI"):
# Sl must also be defined
if not hasattr(self, "Sl"):
logging.error(
"Sl (specific leaf storage) not defined! Needed becausee LAI is defined."
)
logging.error("Please add it to the modelparameters section. e.g.:")
logging.error(
"Sl=inmaps/clim/LCtoSpecificLeafStorage.tbl,tbl,0.5,1,inmaps/clim/LC.map"
)
if not hasattr(self, "Kext"):
logging.error(
"Kext (canopy extinction coefficient) not defined! Needed becausee LAI is defined."
)
logging.error("Please add it to the modelparameters section. e.g.:")
logging.error(
"Kext=inmaps/clim/LCtoExtinctionCoefficient.tbl,tbl,0.5,1,inmaps/clim/LC.map"
)
if not hasattr(self, "Swood"):
logging.error(
"Swood wood (branches, trunks) canopy storage not defined! Needed becausee LAI is defined."
)
logging.error("Please add it to the modelparameters section. e.g.:")
logging.error(
"Swood=inmaps/clim/LCtoBranchTrunkStorage.tbl,tbl,0.5,1,inmaps/clim/LC.map"
)
self.Cmax = self.Sl * self.LAI + self.Swood
self.CanopyGapFraction = pcr.exp(-self.Kext * self.LAI)
self.np_CanopyGapFraction = pcr.pcr2numpy(self.CanopyGapFraction,self.mv)
# TODO: Add MAXLAI and CWf lookup
else:
self.Cmax = self.readtblDefault(
self.Dir + "/" + self.intbl + "/MaxCanopyStorage.tbl",
self.LandUse,
subcatch,
self.Soil,
1.0,
)
self.CanopyGapFraction = self.readtblDefault(
self.Dir + "/" + self.intbl + "/CanopyGapFraction.tbl",
self.LandUse,
subcatch,
self.Soil,
0.1,
)
self.EoverR = self.readtblDefault(
self.Dir + "/" + self.intbl + "/EoverR.tbl",
self.LandUse,
subcatch,
self.Soil,
0.1,
)
if not hasattr(self, "DemandReturnFlowFraction"):
self.DemandReturnFlowFraction = self.ZeroMap
self.RootingDepth = self.readtblDefault(
self.Dir + "/" + self.intbl + "/RootingDepth.tbl",
self.LandUse,
subcatch,
self.Soil,
750.0,
) # rooting depth
#: rootdistpar determien how roots are linked to water table.
self.rootdistpar = self.readtblDefault(
self.Dir + "/" + self.intbl + "/rootdistpar.tbl",
self.LandUse,
subcatch,
self.Soil,
-8000,
) # rrootdistpar
# Soil parameters
# infiltration capacity if the soil [mm/day]
self.InfiltCapSoil = (
self.readtblDefault(
self.Dir + "/" + self.intbl + "/InfiltCapSoil.tbl",
self.LandUse,
subcatch,
self.Soil,
100.0,
)
* self.timestepsecs
/ self.basetimestep
)
self.CapScale = self.readtblDefault(
self.Dir + "/" + self.intbl + "/CapScale.tbl",
self.LandUse,
subcatch,
self.Soil,
100.0,
) #
# infiltration capacity of the compacted
self.InfiltCapPath = (
self.readtblDefault(
self.Dir + "/" + self.intbl + "/InfiltCapPath.tbl",
self.LandUse,
subcatch,
self.Soil,
10.0,
)
* self.timestepsecs
/ self.basetimestep
)
self.MaxLeakage = (
self.readtblDefault(
self.Dir + "/" + self.intbl + "/MaxLeakage.tbl",
self.LandUse,
subcatch,
self.Soil,
0.0,
)
* self.timestepsecs
/ self.basetimestep
)
self.MaxPercolation = (
self.readtblDefault(
self.Dir + "/" + self.intbl + "/MaxPercolation.tbl",
self.LandUse,
subcatch,
self.Soil,
0.0,
)
* self.timestepsecs
/ self.basetimestep
)
# areas (paths) in [mm/day]
# Fraction area with compacted soil (Paths etc.)
self.PathFrac = self.readtblDefault(
self.Dir + "/" + self.intbl + "/PathFrac.tbl",
self.LandUse,
subcatch,
self.Soil,
0.01,
)
# thickness of the soil
self.SoilThickness = self.readtblDefault(
self.Dir + "/" + self.intbl + "/SoilThickness.tbl",
self.LandUse,
subcatch,
self.Soil,
2000.0,
)
self.thetaR = self.readtblDefault(
self.Dir + "/" + self.intbl + "/thetaR.tbl",
self.LandUse,
subcatch,
self.Soil,
0.01,
)
self.thetaS = self.readtblDefault(
self.Dir + "/" + self.intbl + "/thetaS.tbl",
self.LandUse,
subcatch,
self.Soil,
0.6,
)
# minimum thickness of soild
self.SoilMinThickness = self.readtblDefault(
self.Dir + "/" + self.intbl + "/SoilMinThickness.tbl",
self.LandUse,
subcatch,
self.Soil,
500.0,
)
# KsatVer = $2\inmaps\KsatVer.map
self.KsatVer = (
self.readtblDefault(
self.Dir + "/" + self.intbl + "/KsatVer.tbl",
self.LandUse,
subcatch,
self.Soil,
3000.0,
)
* self.timestepsecs
/ self.basetimestep
)
self.MporeFrac = self.readtblDefault(
self.Dir + "/" + self.intbl + "/MporeFrac.tbl",
self.LandUse,
subcatch,
self.Soil,
0.0,
)
self.KsatHorFrac = self.readtblDefault(
self.Dir + "/" + self.intbl + "/KsatHorFrac.tbl",
self.LandUse,
subcatch,
self.Soil,
1.0,
)
# Check if we have irrigation areas
tt = pcr.pcr2numpy(self.IrrigationAreas, 0.0)
self.nrirri = tt.max()
# Check of we have paddy irrigation areas
tt = pcr.pcr2numpy(self.IrrigationPaddyAreas, 0.0)
self.nrpaddyirri = tt.max()
self.Beta = pcr.scalar(0.6) # For sheetflow
self.M = self.readtblDefault(
self.Dir + "/" + self.intbl + "/M.tbl",
self.LandUse,
subcatch,
self.Soil,
300.0,
) # Decay parameter in Topog_sbm
self.N = self.readtblDefault(
self.Dir + "/" + self.intbl + "/N.tbl",
self.LandUse,
subcatch,
self.Soil,
0.072,
) # Manning overland flow
if self.NRiverMethod == 1:
self.NRiver = self.readtblDefault(
self.Dir + "/" + self.intbl + "/N_River.tbl",
self.LandUse,
subcatch,
self.Soil,
0.036,
) # Manning river
if self.NRiverMethod == 2:
self.NRiver = self.readtblFlexDefault(
self.Dir + "/" + self.intbl + "/N_River.tbl", 0.036, wflow_streamorder
)
self.WaterFrac = self.readtblDefault(
self.Dir + "/" + self.intbl + "/WaterFrac.tbl",
self.LandUse,
subcatch,
self.Soil,
0.0,
) # Fraction Open water
self.et_RefToPot = self.readtblDefault(
self.Dir + "/" + self.intbl + "/et_reftopot.tbl",
self.LandUse,
subcatch,
self.Soil,
1.0,
) # Fraction Open water
if self.modelSnow:
# HBV Snow parameters
# critical temperature for snowmelt and refreezing: TTI= 1.000
self.TTI = self.readtblDefault(
self.Dir + "/" + self.intbl + "/TTI.tbl",
self.LandUse,
subcatch,
self.Soil,
1.0,
)
# TT = -1.41934 # defines interval in which precipitation falls as rainfall and snowfall
self.TT = self.readtblDefault(
self.Dir + "/" + self.intbl + "/TT.tbl",
self.LandUse,
subcatch,
self.Soil,
-1.41934,
)
self.TTM = self.readtblDefault(
self.Dir + "/" + self.intbl + "/TTM.tbl",
self.LandUse,
subcatch,
self.Soil,
-1.41934,
)
# Cfmax = 3.75653 # meltconstant in temperature-index
self.Cfmax = self.readtblDefault(
self.Dir + "/" + self.intbl + "/Cfmax.tbl",
self.LandUse,
subcatch,
self.Soil,
3.75653,
)
# WHC= 0.10000 # fraction of Snowvolume that can store water
self.WHC = self.readtblDefault(
self.Dir + "/" + self.intbl + "/WHC.tbl",
self.LandUse,
subcatch,
self.Soil,
0.1,
)
# Wigmosta, <NAME>., <NAME>, <NAME>, and <NAME> (2009).
self.w_soil = (
self.readtblDefault(
self.Dir + "/" + self.intbl + "/w_soil.tbl",
self.LandUse,
subcatch,
self.Soil,
0.9 * 3.0 / 24.0,
)
* self.timestepsecs
/ self.basetimestep
)
if self.soilInfReduction:
self.cf_soil = pcr.min(
0.99,
self.readtblDefault(
self.Dir + "/" + self.intbl + "/cf_soil.tbl",
self.LandUse,
subcatch,
self.Soil,
0.038,
),
) # Ksat reduction factor fro frozen soi
# We are modelling gletchers
# Determine real slope and cell length
self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
self.ZeroMap, sizeinmetres
)
self.Slope = pcr.slope(self.Altitude)
# self.Slope=pcr.ifthen(pcr.boolean(self.TopoId),pcr.max(0.001,self.Slope*celllength()/self.reallength))
self.Slope = pcr.max(0.00001, self.Slope * pcr.celllength() / self.reallength)
Terrain_angle = pcr.scalar(pcr.atan(self.Slope))
#self.N = pcr.ifthenelse(self.River, self.NRiver, self.N)
if hasattr(self, "ReserVoirSimpleLocs") or hasattr(
self, "ReserVoirComplexLocs"
):
self.ReserVoirLocs = self.ZeroMap
self.filter_P_PET = self.ZeroMap + 1.0
if hasattr(self, "ReserVoirSimpleLocs"):
# Check if we have simple and or complex reservoirs
self.ReserVoirSimpleLocs = pcr.nominal(self.ReserVoirSimpleLocs)
self.ReservoirSimpleAreas = pcr.nominal(self.ReservoirSimpleAreas)
tt_simple = pcr.pcr2numpy(self.ReserVoirSimpleLocs, 0.0)
self.nrresSimple = tt_simple.max()
self.ReserVoirLocs = self.ReserVoirLocs + pcr.cover(
pcr.scalar(self.ReserVoirSimpleLocs)
)
areamap = self.reallength * self.reallength
res_area = pcr.areatotal(pcr.spatial(areamap), self.ReservoirSimpleAreas)
resarea_pnt = pcr.ifthen(pcr.boolean(self.ReserVoirSimpleLocs), res_area)
self.ResSimpleArea = pcr.ifthenelse(
pcr.cover(self.ResSimpleArea, pcr.scalar(0.0)) > 0,
self.ResSimpleArea,
pcr.cover(resarea_pnt, pcr.scalar(0.0)),
)
self.filter_P_PET = pcr.ifthenelse(
pcr.boolean(pcr.cover(res_area, pcr.scalar(0.0))),
res_area * 0.0,
self.filter_P_PET,
)
else:
self.nrresSimple = 0
if hasattr(self, "ReserVoirComplexLocs"):
self.ReservoirComplexAreas = pcr.nominal(self.ReservoirComplexAreas)
self.ReserVoirComplexLocs = pcr.nominal(self.ReserVoirComplexLocs)
tt_complex = pcr.pcr2numpy(self.ReserVoirComplexLocs, 0.0)
self.nrresComplex = tt_complex.max()
self.ReserVoirLocs = self.ReserVoirLocs + pcr.cover(
pcr.scalar(self.ReserVoirComplexLocs)
)
res_area = pcr.cover(pcr.scalar(self.ReservoirComplexAreas), 0.0)
self.filter_P_PET = pcr.ifthenelse(
res_area > 0, res_area * 0.0, self.filter_P_PET
)
# read files
self.sh = {}
res_ids = pcr.ifthen(self.ResStorFunc == 2, self.ReserVoirComplexLocs)
np_res_ids = pcr.pcr2numpy(res_ids, 0)
np_res_ids_u = np.unique(np_res_ids[
|
np.nonzero(np_res_ids)
|
numpy.nonzero
|
import numpy as np
import phi.math as math
def spatial_rank(tensor_or_mac):
"""
Returns the number of spatial dimensions.
Arrays are expected to be of the shape (batch size, spatial dimensions..., component size)
The number of spatial dimensions is equal to the tensor rank minus two.
:param tensor_or_mac: a tensor or StaggeredGrid instance
:return: the number of spatial dimensions as an integer
"""
if isinstance(tensor_or_mac, StaggeredGrid):
return tensor_or_mac.spatial_rank
else:
return len(tensor_or_mac.shape) - 2
def indices_tensor(tensor, dtype=np.float32):
"""
Returns an index tensor of the same spatial shape as the given tensor.
Each index denotes the location within the tensor starting from zero.
Indices are encoded as vectors in the index tensor.
:param tensor: a tensor of shape (batch size, spatial dimensions..., component size)
:param dtype: a numpy data type (default float32)
:return: an index tensor of shape (1, spatial dimensions..., spatial rank)
"""
spatial_dimensions = list(tensor.shape[1:-1])
idx_zyx = np.meshgrid(*[range(dim) for dim in spatial_dimensions], indexing="ij")
idx = np.stack(idx_zyx, axis=-1).reshape([1, ] + spatial_dimensions + [len(spatial_dimensions)])
return idx.astype(dtype)
def normalize_to(target, source=1):
"""
Multiplies the target so that its total content matches the source.
:param target: a tensor
:param source: a tensor or number
:return: normalized tensor of the same shape as target
"""
return target * (math.sum(source) / math.sum(target))
def l1_loss(tensor, batch_norm=True, reduce_batches=True):
if isinstance(tensor, StaggeredGrid):
tensor = tensor.staggered
if reduce_batches:
total_loss = math.sum(math.abs(tensor))
else:
total_loss = math.sum(math.abs(tensor), axis=list(range(1, len(tensor.shape))))
if batch_norm and reduce_batches:
batch_size = math.shape(tensor)[0]
return total_loss / math.to_float(batch_size)
else:
return total_loss
def l2_loss(tensor, batch_norm=True, reduce_batches=True):
return l_n_loss(tensor, 2, batch_norm=batch_norm, reduce_batches=reduce_batches)
def l_n_loss(tensor, n, batch_norm=True, reduce_batches=True):
if isinstance(tensor, StaggeredGrid):
tensor = tensor.staggered
if reduce_batches:
total_loss = math.sum(tensor ** n) / n
else:
total_loss = math.sum(tensor ** n, axis=list(range(1, len(tensor.shape)))) / n
if batch_norm:
batch_size = math.shape(tensor)[0]
return total_loss / math.to_float(batch_size)
else:
return total_loss
def at_centers(field):
if isinstance(field, StaggeredGrid):
return field.at_centers()
else:
return field
# Divergence
def divergence(vel, dx=1, difference="central"):
"""
Computes the spatial divergence of a vector field from finite differences.
:param vel: tensor of shape (batch size, spatial dimensions..., spatial rank) or StaggeredGrid
:param dx: distance between adjacent grid points (default 1)
:param difference: type of difference, one of ('forward', 'central') (default 'forward')
:return: tensor of shape (batch size, spatial dimensions..., 1)
"""
if isinstance(vel, StaggeredGrid):
return vel.divergence()
assert difference in ('central', 'forward')
rank = spatial_rank(vel)
if difference == "forward":
return _forward_divergence_nd(vel) / dx ** rank
else:
return _central_divergence_nd(vel) / (2 * dx) ** rank
def _forward_divergence_nd(field):
rank = spatial_rank(field)
dims = range(rank)
components = []
for dimension in dims:
vq = field[...,rank-dimension-1]
upper_slices = [(slice(1, None) if i == dimension else slice(None)) for i in dims]
lower_slices = [(slice(-1) if i == dimension else slice(None)) for i in dims]
diff = vq[[slice(None)]+upper_slices] - vq[[slice(None)]+lower_slices]
padded = math.pad(diff, [[0,0]] + [([0,1] if i==dimension else [0,0]) for i in dims])
components.append(padded)
return math.expand_dims(math.add(components), -1)
def _central_divergence_nd(tensor):
rank = spatial_rank(tensor)
dims = range(rank)
components = []
tensor = math.pad(tensor, [[0, 0]] + [[1, 1]]*rank + [[0, 0]])
for dimension in dims:
upper_slices = [(slice(2, None) if i == dimension else slice(1, -1)) for i in dims]
lower_slices = [(slice(-2) if i == dimension else slice(1, -1)) for i in dims]
diff = tensor[[slice(None)] + upper_slices + [rank - dimension - 1]] - \
tensor[[slice(None)] + lower_slices + [rank - dimension - 1]]
components.append(diff)
return math.expand_dims(math.add(components), -1)
# Gradient
def gradient(tensor, dx=1, difference="forward"):
"""
Calculates the gradient of a scalar field from finite differences.
The gradient vectors are in reverse order, lowest dimension first.
:param tensor: field with shape (batch_size, spatial_dimensions..., 1)
:param dx: physical distance between grid points (default 1)
:param difference: type of difference, one of ('forward', 'backward', 'central') (default 'forward')
:return: tensor of shape (batch_size, spatial_dimensions..., spatial rank)
"""
if tensor.shape[-1] != 1: raise ValueError("Gradient requires a scalar field as input")
dims = range(spatial_rank(tensor))
field = tensor[...,0]
if 1 in field.shape[1:]:
raise ValueError("All spatial dimensions must have size larger than 1, got {}".format(tensor.shape))
if difference.lower() == "central":
return _central_diff_nd(tensor, dims) / (dx * 2)
elif difference.lower() == "forward":
return _forward_diff_nd(field, dims) / dx
elif difference.lower() == "backward":
return _backward_diff_nd(field, dims) / dx
else:
raise ValueError("Invalid difference type: {}. Can be CENTRAL or FORWARD".format(difference))
def _backward_diff_nd(field, dims):
df_dq = []
for dimension in dims:
upper_slices = [(slice(1, None) if i==dimension else slice(None)) for i in dims]
lower_slices = [(slice(-1) if i==dimension else slice(None)) for i in dims]
diff = field[[slice(None)]+upper_slices] - field[[slice(None)]+lower_slices]
padded = math.pad(diff, [[0,0]]+[([1,0] if i == dimension else [0,0]) for i in dims])
df_dq.append(padded)
return math.stack(df_dq[::-1], axis=-1)
def _forward_diff_nd(field, dims):
df_dq = []
for dimension in dims:
upper_slices = [(slice(1, None) if i==dimension else slice(None)) for i in dims]
lower_slices = [(slice(-1) if i==dimension else slice(None)) for i in dims]
diff = field[[slice(None)]+upper_slices] - field[[slice(None)]+lower_slices]
padded = math.pad(diff, [[0,0]]+[([0,1] if i == dimension else [0,0]) for i in dims])
df_dq.append(padded)
return math.stack(df_dq[::-1], axis=-1)
def _central_diff_nd(field, dims):
field = math.pad(field, [[0,0]] + [[1,1]]*spatial_rank(field) + [[0, 0]], "symmetric")
df_dq = []
for dimension in dims:
upper_slices = [(slice(2, None) if i==dimension else slice(1,-1)) for i in dims]
lower_slices = [(slice(-2) if i==dimension else slice(1,-1)) for i in dims]
diff = field[[slice(None)] + upper_slices + [0]] - field[[slice(None)] + lower_slices + [0]]
df_dq.append(diff)
return math.stack(df_dq[::-1], axis=-1)
# Laplace
def laplace(tensor, weights=None, padding="symmetric"):
if tensor.shape[-1] != 1:
raise ValueError("Laplace operator requires a scalar field as input")
rank = spatial_rank(tensor)
if padding.lower() != "valid":
tensor = math.pad(tensor, [[0,0]] + [[1,1]] * rank + [[0,0]], padding)
if weights is not None:
return _weighted_sliced_laplace_nd(tensor, weights)
if rank == 2:
return _conv_laplace_2d(tensor)
elif rank == 3:
return _conv_laplace_3d(tensor)
else:
return _sliced_laplace_nd(tensor)
def _conv_laplace_2d(tensor):
kernel = np.zeros((3, 3, 1, 1), np.float32)
kernel[1,1,0,0] = -4
kernel[(0,1,1,2),(1,0,2,1),0,0] = 1
return math.conv(tensor, kernel, padding="VALID")
def _conv_laplace_3d(tensor):
kernel = np.zeros((3, 3, 3, 1, 1), np.float32)
kernel[1,1,1,0,0] = -6
kernel[(0,1,1,1,1,2), (1,0,2,1,1,1), (1,1,1,0,2,1), 0,0] = 1
return math.conv(tensor, kernel, padding="VALID")
def _sliced_laplace_nd(tensor):
# Laplace code for n dimensions
dims = range(spatial_rank(tensor))
components = []
for dimension in dims:
center_slices = [(slice(1, -1) if i == dimension else slice(1,-1)) for i in dims]
upper_slices = [(slice(2, None) if i == dimension else slice(1,-1)) for i in dims]
lower_slices = [(slice(-2) if i == dimension else slice(1,-1)) for i in dims]
diff = tensor[[slice(None)] + upper_slices + [slice(None)]] \
+ tensor[[slice(None)] + lower_slices + [slice(None)]] \
- 2 * tensor[[slice(None)] + center_slices + [slice(None)]]
components.append(diff)
return math.add(components)
def _weighted_sliced_laplace_nd(tensor, weights):
if tensor.shape[-1] != 1: raise ValueError("Laplace operator requires a scalar field as input")
dims = range(spatial_rank(tensor))
components = []
for dimension in dims:
center_slices = [(slice(1, -1) if i == dimension else slice(1,-1)) for i in dims]
upper_slices = [(slice(2, None) if i == dimension else slice(1,-1)) for i in dims]
lower_slices = [(slice(-2) if i == dimension else slice(1,-1)) for i in dims]
lower_weights = weights[[slice(None)] + lower_slices + [slice(None)]] * weights[[slice(None)] + center_slices + [slice(None)]]
upper_weights = weights[[slice(None)] + upper_slices + [slice(None)]] * weights[[slice(None)] + center_slices + [slice(None)]]
center_weights = - lower_weights - upper_weights
lower_values = tensor[[slice(None)] + lower_slices + [slice(None)]]
upper_values = tensor[[slice(None)] + upper_slices + [slice(None)]]
center_values = tensor[[slice(None)] + center_slices + [slice(None)]]
diff = upper_values * upper_weights + lower_values * lower_weights + center_values * center_weights
components.append(diff)
return math.add(components)
# Downsample / Upsample
def downsample2x(tensor, interpolation="LINEAR"):
if interpolation.lower() != "linear":
raise ValueError("Only linear interpolation supported")
dims = range(spatial_rank(tensor))
tensor = math.pad(tensor, [[0,0]]+
[([0, 1] if (dim % 2) != 0 else [0,0]) for dim in tensor.shape[1:-1]]
+ [[0,0]], "SYMMETRIC")
for dimension in dims:
upper_slices = [(slice(1, None, 2) if i==dimension else slice(None)) for i in dims]
lower_slices = [(slice(0, None, 2) if i==dimension else slice(None)) for i in dims]
sum = tensor[[slice(None)]+upper_slices+[slice(None)]] + tensor[[slice(None)]+lower_slices+[slice(None)]]
tensor = sum / 2
return tensor
def upsample2x(tensor, interpolation="LINEAR"):
if interpolation.lower() != "linear":
raise ValueError("Only linear interpolation supported")
dims = range(spatial_rank(tensor))
vlen = tensor.shape[-1]
spatial_dims = tensor.shape[1:-1]
tensor = math.pad(tensor, [[0, 0]] + [[1, 1]]*spatial_rank(tensor) + [[0, 0]], "SYMMETRIC")
for dim in dims:
left_slices_1 = [(slice(2, None) if i==dim else slice(None)) for i in dims]
left_slices_2 = [(slice(1,-1) if i==dim else slice(None)) for i in dims]
right_slices_1 = [(slice(1, -1) if i==dim else slice(None)) for i in dims]
right_slices_2 = [(slice(-2) if i==dim else slice(None)) for i in dims]
left = 0.75 * tensor[[slice(None)]+left_slices_2+[slice(None)]] + 0.25 * tensor[[slice(None)]+left_slices_1+[slice(None)]]
right = 0.25 * tensor[[slice(None)]+right_slices_2+[slice(None)]] + 0.75 * tensor[[slice(None)]+right_slices_1+[slice(None)]]
combined = math.stack([right, left], axis=2+dim)
tensor = math.reshape(combined, [-1] + [spatial_dims[dim] * 2 if i == dim else tensor.shape[i+1] for i in dims] + [vlen])
return tensor
def spatial_sum(tensor):
if isinstance(tensor, StaggeredGrid):
tensor = tensor.staggered
summed = math.sum(tensor, axis=math.dimrange(tensor))
for i in math.dimrange(tensor):
summed = math.expand_dims(summed, i)
return summed
class StaggeredGrid:
"""
MACGrids represent a staggered vector field in which each vector component is sampled at the
face centers of centered hypercubes.
Going in the direction of a vector component, the first entry samples the lower face of the first cube and the
last entry the upper face of the last cube.
Therefore staggered grids contain one more entry in each spatial dimension than a centered field.
This results in oversampling in the other directions. There, highest element lies outside the grid.
Attributes:
shape (tuple Tensorshape): the shape of the staggered field
staggered (tensor): array or tensor holding the staggered field
"""
def __init__(self, staggered):
self.staggered = staggered
def __repr__(self):
return "StaggeredGrid(shape=%s)" % self.shape
def at_centers(self):
rank = self.spatial_rank
dims = range(rank)
df_dq = []
for d in dims: # z,y,x
upper_slices = [(slice(1, None) if i == d else slice(-1)) for i in dims]
lower_slices = [(slice(-1) if i == d else slice(-1)) for i in dims]
sum = self.staggered[[slice(None)] + upper_slices + [rank - d - 1]] +\
self.staggered[[slice(None)] + lower_slices + [rank - d - 1]]
df_dq.append(sum / rank)
return math.stack(df_dq[::-1], axis=-1)
def at_faces(self, face_dimension_xyz):
dims = range(self.spatial_rank)
face_dimension_zyx = len(dims) - face_dimension_xyz - 1 # 0=Z, 1=Y, 2=X, etc.
components = []
for d in dims: # z,y,x
if d == face_dimension_zyx:
components.append(self.staggered[..., len(dims) - d - 1])
else:
# Interpolate other components
vq = self.staggered[..., len(dims) - d - 1]
t = vq
for d2 in dims: # z,y,x
slices1 = [(slice(1, None) if i == d2 else slice(None)) for i in dims]
slices2 = [(slice(-1) if i == d2 else slice(None)) for i in dims]
t = t[[slice(None)] + slices1] + t[[slice(None)] + slices2]
if d2 == d:
t = math.pad(t, [[0, 0]] + [([0, 1] if i == d2 else [0, 0]) for i in dims]) / 2
else:
t = math.pad(t, [[0, 0]] + [([1, 0] if i == d2 else [0, 0]) for i in dims]) / 2
components.append(t)
return math.stack(components[::-1], axis=-1)
def divergence(self):
dims = range(self.spatial_rank)
components = []
for dimension in dims:
comp = self.spatial_rank - dimension - 1
upper_slices = [(slice(1, None) if i == dimension else slice(-1)) for i in dims]
lower_slices = [(slice(-1) if i == dimension else slice(-1)) for i in dims]
diff = self.staggered[[slice(None)] + upper_slices + [comp]] - \
self.staggered[[slice(None)] + lower_slices + [comp]]
components.append(diff)
return math.expand_dims(math.add(components), -1)
def abs(self):
return StaggeredGrid(math.abs(self.staggered))
def length_squared(self):
centered = self.at_centers()
scalar = math.sum(centered ** 2, axis=-1)
return math.expand_dims(scalar, axis=-1)
def soft_sqrt(self):
return StaggeredGrid(math.sqrt(math.maximum(self.staggered, 1e-20)))
def normalize(self):
v_length = math.sqrt(math.add([self.staggered[..., i] ** 2 for i in range(self.shape[-1])]))
global_mean = math.mean(v_length, axis=range(1, self.spatial_rank+1))
for i in range(self.spatial_rank+1):
global_mean = math.expand_dims(global_mean, -1)
return StaggeredGrid(self.staggered / global_mean)
def total(self):
v_length = math.sqrt(math.add([self.staggered[..., i] ** 2 for i in range(self.shape[-1])]))
total = math.sum(v_length, axis=range(1, self.spatial_rank+1))
for i in range(self.spatial_rank+1):
total = math.expand_dims(total, -1)
return total
def batch_div(self, tensor):
return StaggeredGrid(self.staggered / tensor)
def advect(self, field, interpolation="LINEAR", dt=1):
"""
Performs a semi-Lagrangian advection step, propagating the field through the velocity field.
A backwards Euler step is performed and the smpling is performed according to the interpolation specified.
:param field: scalar or vector field to propagate
:param velocity: vector field specifying the velocity at each point in space. Shape (batch_size, grid_size,
:param dt:
:param interpolation: LINEAR, BSPLINE, IDW (default is LINEAR)
:return: the advected field
"""
if isinstance(field, StaggeredGrid):
return self.multi_advect([field], interpolation=interpolation, dt=dt)[0]
else:
return self._advect_centered_field(field, dt, interpolation)
def _advect_centered_field(self, field, dt, interpolation):
idx = indices_tensor(field)
centered_velocity = self.at_centers()[..., ::-1] # assume right number of components
sample_coords = idx - centered_velocity * dt
result = math.resample(field, sample_coords, interpolation=interpolation, boundary="REPLICATE")
return result
def _advect_mac(self, field_mac, dt, interpolation):
# resample in each dimension
idx = indices_tensor(self.staggered)
advected_component_fields = []
dims = range(len(self.staggered.shape) - 2)
for d in dims: # z,y,x
velocity_at_staggered_points = self.at_faces(len(dims) - d - 1)[..., ::-1]
sample_coords = idx - velocity_at_staggered_points * dt
d_comp = len(dims) - d - 1
advected = math.resample(field_mac[..., d_comp:d_comp + 1], sample_coords, interpolation=interpolation,
boundary="REPLICATE")
advected_component_fields.append(advected)
all_advected = math.concat(advected_component_fields[::-1], axis=-1)
return all_advected
def multi_advect(self, fields, interpolation="LINEAR", dt=1):
assert isinstance(fields, (list, tuple)), "first parameter must be either a tuple or list"
inputs_lists = []
coords_lists = []
value_generators = []
for field in fields:
if isinstance(field, StaggeredGrid):
i, c, v = self._mac_block_advection(field.staggered, dt)
else:
i, c, v = self._centered_block_advection(field, dt)
inputs_lists.append(i)
coords_lists.append(c)
value_generators.append(v)
inputs = math.concat(sum(inputs_lists, []), 0)
coords = math.concat(sum(coords_lists, []), 0)
all_advected = math.resample(inputs, coords, interpolation=interpolation, boundary="REPLICATE")
all_advected = math.reshape(all_advected, [self.spatial_rank, -1] + list(all_advected.shape[1:]))
all_advected = math.unstack(all_advected)
results = []
abs_i = 0
for i in range(len(inputs_lists)):
n = len(inputs_lists[0])
assigned_advected = all_advected[abs_i:abs_i+n]
results.append(value_generators[i](assigned_advected))
abs_i += n
return results
def _mac_block_advection(self, field_mac, dt):
# resample in each dimension
idx = indices_tensor(self.staggered)
dims = range(len(self.staggered.shape) - 2)
inputs_list = []
coords_list = []
for d in dims: # z,y,x
velocity_at_staggered_points = self.at_faces(len(dims) - d - 1)[..., ::-1]
sample_coords = idx - velocity_at_staggered_points * dt
d_comp = len(dims) - d - 1
coords_list.append(sample_coords)
inputs_list.append(field_mac[..., d_comp:d_comp + 1])
def post_advection(advected_list):
return StaggeredGrid(math.concat(advected_list[::-1], axis=-1))
return inputs_list, coords_list, post_advection
def _centered_block_advection(self, field, dt):
idx = indices_tensor(field)
centered_velocity = self.at_centers()[..., ::-1] # assume right number of components
sample_coords = idx - centered_velocity * dt
return [field], [sample_coords], lambda list: list[0]
def curl(self):
rank = spatial_rank(self.staggered)
if rank == 3:
return self._staggered_curl_3d()
elif rank == 2:
return self._staggered_curl_2d()
else:
raise ValueError("Curl requires a two or three-dimensional vector field")
def pad(self, lower, upper=None, mode="symmetric"):
upper = upper if upper is not None else lower
padded = math.pad(self.staggered, [[0,0]] + [[lower,upper]]*self.spatial_rank + [[0,0]], mode)
return StaggeredGrid(padded)
def _staggered_curl_3d(self):
"""
Calculates the curl operator on a staggered three-dimensional field.
The resulting vector field is a staggered grid.
If the velocities of the vector potential were sampled at the lower faces of a cube, the resulting velocities
are sampled at the centers of the upper edges.
:param vector_potential: three-dimensional vector potential
:return: three-dimensional staggered vector field
"""
kernel = np.zeros((2, 2, 2, 3, 3), np.float32)
derivative = np.array([-1, 1])
# x-component: dz/dy - dy/dz
kernel[0, :, 0, 2, 0] = derivative
kernel[:, 0, 0, 1, 0] = -derivative
# y-component: dx/dz - dz/dx
kernel[:, 0, 0, 0, 1] = derivative
kernel[0, 0, :, 2, 1] = -derivative
# z-component: dy/dx - dx/dy
kernel[0, 0, :, 1, 2] = derivative
kernel[0, :, 0, 0, 2] = -derivative
vector_potential = math.pad(self.staggered, [[0, 0], [0, 1], [0, 1], [0, 1], [0, 0]], "SYMMETRIC")
vector_field = math.conv(vector_potential, kernel, padding="VALID")
return StaggeredGrid(vector_field)
def _staggered_curl_2d(self):
kernel =
|
np.zeros((2, 2, 1, 2), np.float32)
|
numpy.zeros
|
import cv2
import os
import math
import random
import numpy as np
import numpy.random as npr
import torch
import torchvision.transforms as transforms
from utils.bbox import rbox_2_quad
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Remove randomness (may be slower on Tesla GPUs) # https://pytorch.org/docs/stable/notes/randomness.html
if seed == 0:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def hyp_parse(hyp_path):
hyp = {}
keys = []
with open(hyp_path,'r') as f:
for line in f:
if line.startswith('#') or len(line.strip())==0 : continue
v = line.strip().split(':')
try:
hyp[v[0]] = float(v[1].strip().split(' ')[0])
except:
hyp[v[0]] = eval(v[1].strip().split(' ')[0])
keys.append(v[0])
f.close()
return hyp
def model_info(model, report='summary'):
# Plots a line-by-line description of a PyTorch model
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if report is 'full':
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
print('Model Summary: %g layers, %g parameters, %g gradients' % (len(list(model.parameters())), n_p, n_g))
def curriculum_factor(init, final, step=1, mode='suspend_cosine'):
if mode == 'cosine':
sequence = [(0.5 - 0.5 * math.cos(math.pi * i / final)) * (final - init) + init \
for i in range(init, final+step, step)]
elif mode == 'suspend_cosine':
suspend_ratio = 0.1
suspend_interval = (final - init)*suspend_ratio
start = suspend_interval + init if suspend_interval > step else init
sequence = [(0.5 - 0.5 * math.cos(math.pi * i / final)) * (final - init) + init \
if i>start else init for i in range(init, final+step, step)]
# vis
import matplotlib.pylab as plt
import numpy as np
plt.scatter(np.array([x for x in range(init, final+step, step)]),np.array(sequence))
plt.show()
def plot_gt(img, bboxes, im_path, mode='xyxyxyxy'):
if not os.path.exists('temp'):
os.mkdir('temp')
if mode == 'xywha':
bboxes = rbox_2_quad(bboxes,mode=mode)
if mode == 'xyxya':
bboxes = rbox_2_quad(bboxes,mode=mode)
for box in bboxes:
img = cv2.polylines(cv2.UMat(img),[box.reshape(-1,2).astype(np.int32)],True,(0,0,255),2)
cv2.imwrite(os.path.join('temp','augment_%s' % (os.path.split(im_path)[1])),img)
print('Check augmentation results in `temp` folder!!!')
if __name__ == '__main__':
curriculum_factor(836, 6400, 32)
def sort_corners(quads):
sorted = np.zeros(quads.shape, dtype=np.float32)
for i, corners in enumerate(quads):
corners = corners.reshape(4, 2)
centers = np.mean(corners, axis=0)
corners = corners - centers
cosine = corners[:, 0] / np.sqrt(corners[:, 0] ** 2 + corners[:, 1] ** 2)
cosine = np.minimum(np.maximum(cosine, -1.0), 1.0)
thetas = np.arccos(cosine) / np.pi * 180.0
indice =
|
np.where(corners[:, 1] > 0)
|
numpy.where
|
"""
This is memory error safe version of lowess.
Checks both, array from x!tandem and dta files
3rd ver
Jan 8, 2010
"""
from numpy import array, ceil, sort, transpose, clip, zeros, ones, median
from numpy import sum as summa
from numpy.linalg import solve
from numpy.linalg.linalg import LinAlgError
from copy import deepcopy
import math
def lowess(x, y, X2, f=2. / 3., itr=3):
# X2 are x values to estimate Y2 on
# Uncomment to show the first 101 values of x, y, and X2
'''
print('\n\n\n Debugging lowess')
print('Values for x')
for i in list(x)[:101]:
print(i,end=',',flush=True)
print('\n\nValues for y')
for i in list(y)[:101]:
print(i,end=',',flush=True)
print('\n\nValues for X2')
for i in list(X2)[:101]:
print(i,end=',',flush=True)
print ('\n')
'''
x = array(x, 'float32')
y = array(y, 'float32')
X2 = array(X2, 'float32')
n = len(x) # total number of points
r = int(ceil(f * n)) # the number of NN
h = [sort(abs(x - x[i]))[r] for i in range(n)] # the bandwith in x, given NN for each point
# 111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111
# figure out the chunk size safe for handling in the RAM
chunkSize_1 = n # whole thing
attempt = 0
while True:
if attempt >= 8:
break
try:
test1 = [x[0:chunkSize_1]] - transpose([x]) # VLAD. Potential MemoryError
test2 = [x[0:chunkSize_1]] - transpose([x]) # VLAD. Potential MemoryError
test3 = [x[0:chunkSize_1]] - transpose([x]) # VLAD. Potential MemoryError
test4 = [x[0:chunkSize_1]] - transpose([x]) # VLAD. Potential MemoryError
test5 = [x[0:chunkSize_1]] - transpose([x]) # VLAD. Potential MemoryError
test6 = [x[0:chunkSize_1]] - transpose([x]) # VLAD. Potential MemoryError
test7 = [x[0:chunkSize_1]] - transpose([x]) # VLAD. Potential MemoryError
test8 = [x[0:chunkSize_1]] - transpose([x]) # VLAD. Potential MemoryError
break
except MemoryError:
chunkSize_1 = int(chunkSize_1 / 2) # split size into half
attempt += 1
try:
del test1, test2, test3, test4, test5, test6, test7, test8
except NameError:
pass
# chunk sizes, number, and left-overs
numChunks_1 = math.floor(n / chunkSize_1)
leftChunkSize_1 = n - numChunks_1 * chunkSize_1
# compute the indices
chunkList_1 = [] # that list of tuples (start, end) how the big dataset is going to be split
for i in range(numChunks_1):
start = chunkSize_1 * i
end = chunkSize_1 * (i + 1)
chunkList_1.append((start, end))
if leftChunkSize_1 != 0:
chunkList_1.append((end, n))
# 111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111
N2 = len(X2)
H2 = [sort(abs(x - X2[i]))[r] for i in range(N2)] # the bandwith in x, given NN for each point
# 222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222
# figure out the chunk size safe for handling in the RAM
chunkSize = N2 # whole thing
while True:
try:
test1 = [X2[0:chunkSize]] - transpose([x]) # VLAD. Potential MemoryError
test2 = [X2[0:chunkSize]] - transpose([x]) # VLAD. Potential MemoryError
test3 = [X2[0:chunkSize]] - transpose([x]) # VLAD. Potential MemoryError
test4 = [X2[0:chunkSize]] - transpose([x]) # VLAD. Potential MemoryError
test5 = [X2[0:chunkSize]] - transpose([x]) # VLAD. Potential MemoryError
test6 = [X2[0:chunkSize]] - transpose([x]) # VLAD. Potential MemoryError
test7 = [X2[0:chunkSize]] - transpose([x]) # VLAD. Potential MemoryError
test8 = [X2[0:chunkSize]] - transpose([x]) # VLAD. Potential MemoryError
break
except (MemoryError, ValueError):
chunkSize = chunkSize / 2 # split size into half
try:
del test1, test2, test3, test4, test5, test6, test7, test8
except NameError:
pass
# chunk sizes, number, and left-overs
numChunks = math.floor(N2 / chunkSize)
leftChunkSize = N2 - numChunks * chunkSize
# compute the indices
chunkList = [] # that list of tuples (start, end) how the big dataset is going to be split
for i in range(numChunks):
start = chunkSize * i
end = chunkSize * (i + 1)
chunkList.append((start, end))
if leftChunkSize != 0:
chunkList.append((end, N2))
# 222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222
# ---- Main Iteration Loop--------------
yest = zeros(n, 'd')
delta = ones(n, 'd') # weights characterizing how well y fits the trend
Y2EST = zeros(N2, 'd')
Y2RETURN = deepcopy(Y2EST)
for iteration in range(itr):
try:
# 1111111111111111111111111111111111111111111111111111111111111
for chunk_1 in chunkList_1:
d = [x[chunk_1[0]:chunk_1[1]]] - transpose([x])
dh = abs(d) / h[chunk_1[0]:chunk_1[1]]
del d
w = clip(dh, 0.0, 1.0)
del dh
w = (1 - w ** 3) ** 3
# estimate for each point
for i in range(n)[chunk_1[0]:chunk_1[1]]:
weights = delta * w[:, i - chunk_1[0]]
b = array([summa(weights * y), summa(weights * y * x)])
A = array([[summa(weights), summa(weights * x)],
[summa(weights * x), summa(weights * x * x)]])
beta = solve(A, b) # Ax=b
yest[i] = beta[0] + beta[1] * x[i]
del weights, b, A
# 1111111111111111111111111111111111111111111111111111111111111
# 22222222222222222222222222222222222222222222222222222222222222
# go though chunks if can not lift the entire NxM array
for chunk in chunkList:
D2 = [X2[chunk[0]:chunk[1]]] - transpose([x])
DH2 = abs(D2) / H2[chunk[0]:chunk[1]]
del D2
W2 = clip(DH2, 0.0, 1.0)
del DH2
W2 = (1 - W2 ** 3) ** 3
# estimate for each point
for i in range(N2)[chunk[0]:chunk[1]]:
WEIGHTS2 = delta * W2[:, i - chunk[0]]
b = array([summa(WEIGHTS2 * y), summa(WEIGHTS2 * y * x)])
A = array([[summa(WEIGHTS2), summa(WEIGHTS2 * x)],
[
|
summa(WEIGHTS2 * x)
|
numpy.sum
|
""" Connections for reservoir neural networks """
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
from . import conn_utils
# Based on https://github.com/nschaetti/EchoTorch/blob/master/echotorch/nn/ESNCell.py
def create_gaussian_matrix(name, width, mean=0.0, std=1.0, sparsity=None, is_sparse=False):
"""
Creates a random square matrix with Gaussian distribution according to
parameters for evodynamic.connection.WeightedConnection.
Parameters
----------
name : str
Name of the Tensor.
width : int
Width of the adjacency matrix.
mean : float
Mean for the Gaussian distribution.
std : float
Standard deviation for the Gaussian distribution.
sparsity : float between 0 and 1
Percentage of zeros in the matrix.
is_sparse : Boolean
Determines whether the returning Tensor is sparse or not.
Returns
-------
out : Tensor
Random adjacency matrix for TensorFlow.
"""
nodes = width
size = (width, width)
if is_sparse:
indices = []
values = []
if sparsity is None:
values = np.random.normal(loc=mean, scale=std, size=nodes*nodes)
for i in range(nodes):
for j in range(nodes):
indices.append([i,j])
else:
for i in range(nodes):
for j in range(nodes):
if sparsity < np.random.random():
indices.append([i,j])
values.append(np.random.normal(loc=mean, scale=std))
initial = tf.cast(tf.SparseTensor(indices=indices, values=values,\
dense_shape=[nodes, nodes]), tf.float64)
else:
if sparsity is None:
conn_matrix =
|
np.random.normal(loc=mean, scale=std, size=size)
|
numpy.random.normal
|
import matplotlib
import matplotlib.pyplot as plt
from time import sleep
from subprocess import Popen, PIPE, STDOUT
from IPython.display import clear_output, display
import urllib.parse
from io import BytesIO
from zipfile import ZipFile
import urllib.request
import datetime
import os
from subprocess import Popen, PIPE, STDOUT
import pandas
import numpy
from IPython.display import display, HTML
def readDataAndGenerateCSV(scenario):
if scenario == 1 or scenario == 2:
zNormalized = 1
if scenario == 3:
zNormalized = 0
p = Popen(['java', '-jar', 'StockDataGenerateCSV-1.0-jar-with-dependencies.jar', 'properties_scenario' + str(scenario) + '.conf'], stdout=PIPE, stderr=STDOUT)
for line in p.stdout:
if line[2] == 115:
print(str(line)[2:-3])
sleep(2)
if line[2] == 101:
clear_output(wait=True)
print(str(line)[2:-3])
else:
print(str(line)[2:-3])
if zNormalized == 1:
stocks_file = open("stockTS.csvzNorm.csv")
else:
stocks_file = open("stockTS.csv")
stocks_lines = stocks_file.readlines()
stocks = {}
for s in stocks_lines:
stock_name = s.split(",")[0]
stock_values_str = s.split("\n")[0].split(",")[3:264]
stock_values = []
for st in stock_values_str:
stock_values.append(float(st))
stocks[stock_name] = stock_values
return stocks
def discoverBundles(scenario):
if scenario > 0:
p = Popen(['java', '-jar', 'BundleDiscovery-1.0-jar-with-dependencies.jar', 'properties_scenario' + str(scenario) + '.conf'], stdout=PIPE, stderr=STDOUT)
for line in p.stdout:
if line[2] == 115:
print(str(line)[2:-3])
sleep(2)
if line[2] == 101:
clear_output(wait=True)
print(str(line)[2:-3])
else:
print(str(line)[2:-3])
else:
p = Popen(['java', '-jar', './GET_DB_DATA.jar'], stdout=PIPE, stderr=STDOUT)
for line in p.stdout:
if line[2] == 115:
print(str(line)[2:-3])
sleep(2)
if line[2] == 101:
clear_output(wait=True)
print(str(line)[2:-3])
else:
print(str(line)[2:-3])
def plotAllData(stock_market, year, stocks):
xlim_1 = 0
xlim_2 = 261
plt.rcParams['figure.figsize'] = [28, 14]
plt.title('Stocks of ' + stock_market + '\'s Stock Market', fontsize=35)
plt.ylabel('Close Value', fontsize=35)
plt.xlabel('Working Day of Year ' + year, fontsize=35)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
axes = plt.gca()
axes.set_xlim([xlim_1, xlim_2])
plt.grid(True)
for k, v in stocks.items():
plt.plot(v)
#return bundles_members, bundles_duration
def plotAllBundles(stock_market, year, stocks):
results_file = open("results.txt","r")
results_lines = results_file.readlines()
xlim_1 = 0
xlim_2 = 261
bundles_members = {}
bundles_duration = {}
for b in results_lines:
bundle_name = b.split(";")[0]
duration = b.split(";")[2]
#members = b.split("\n")[0].split(":")[1].split(",")
members = b.split(";")[1].split(",")
bundles_members[bundle_name] = members
bundles_duration[bundle_name] = duration
for k, v in bundles_members.items():
duration = bundles_duration[k]
x1 = int(duration.split("-")[0][1:])
x2 = int(duration.split("-")[1][:-2])
minimum = []
maximum = []
for i in list(range(x2-x1)):
minimum.append(10000000000)
maximum.append(-1)
plt.rcParams['figure.figsize'] = [28, 14]
plt.title('Discovered Bundles of Stocks (' + stock_market + '\'s Stock Market)', fontsize=35)
plt.ylabel('Close Value', fontsize=35)
plt.xlabel('Working Day of Year ' + year, fontsize=35)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
axes = plt.gca()
axes.set_xlim([xlim_1, xlim_2])
for member in bundles_members[k]:
ts = stocks[member]
idx = 0
for t in list(range(x1, x2)):
if ts[t] < minimum[idx]:
minimum[idx] = ts[t]
if ts[t] > maximum[idx]:
maximum[idx] = ts[t]
idx += 1
p = plt.plot(list(range(x1, x2)), maximum, linewidth=5.0)
plt.plot(list(range(x1, x2)), minimum, color=p[0].get_color(), linewidth=5.0)
plt.grid(True)
plt.fill_between(list(range(x1, x2)), minimum, maximum, color=p[0].get_color(), alpha=0.15)
def plotSelectedBundle(scenario, bundle_to_visualize, stock_market, year, stocks):
results_file = open("results.txt","r")
results_lines = results_file.readlines()
xlim_1 = 0
xlim_2 = 261
bundles_members = {}
bundles_duration = {}
for b in results_lines:
bundle_name = b.split(";")[0]
duration = b.split(";")[2]
#members = b.split("\n")[0].split(":")[1].split(",")
members = b.split(";")[1].split(",")
bundles_members[bundle_name] = members
bundles_duration[bundle_name] = duration
if bundle_to_visualize == -1:
if scenario == 1 or scenario == 2:
bundle_to_visualize = 'Bundle_0'
if scenario == 3:
bundle_to_visualize = 'Bundle_100'
else:
bundle_to_visualize = bundle_to_visualize
duration = bundles_duration[bundle_to_visualize]
x1 = int(duration.split("-")[0][1:])
x2 = int(duration.split("-")[1][:-2])
minimum = []
maximum = []
for i in list(range(x2-x1)):
minimum.append(10000000000)
maximum.append(-1)
plt.rcParams['figure.figsize'] = [28, 14]
plt.title('Discovered Bundles of ' + bundle_to_visualize + ' (' + stock_market + '\'s Stock Market)', fontsize=35)
plt.ylabel('Close Value', fontsize=35)
plt.xlabel('Working Day of Year ' + year, fontsize=35)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
axes = plt.gca()
axes.set_xlim([xlim_1, xlim_2])
print('BUNDLE MEMBERS:')
for member in bundles_members[bundle_to_visualize]:
ts = stocks[member]
idx = 0
for t in list(range(x1, x2)):
if ts[t] < minimum[idx]:
minimum[idx] = ts[t]
if ts[t] > maximum[idx]:
maximum[idx] = ts[t]
idx += 1
plt.plot(ts)
print(member)
plt.axvline(x = x1)
plt.axvline(x = x2)
plt.plot(list(range(x1, x2)), maximum, color='#539ecd', linewidth=5.0)
plt.plot(list(range(x1, x2)), minimum, color='#539ecd', linewidth=5.0)
plt.grid(True)
plt.fill_between(list(range(x1, x2)), minimum, maximum, color='#539ecd', alpha=0.25)
def segmentData(year, interval, symbol, zNormalize):
if os.path.exists("singleStockTS.csv"):
os.remove("singleStockTS.csv")
count1 = 1
count2 = 0
string_to_write = ""
for i in range(1,12):
month = str(i)
if i < 10:
month = "0" + month
link = "http://5.175.24.176/Qualimaster/history/" + str(interval) + "/" + year + month + "_" + symbol + ".zip"
link = urllib.parse.urlsplit(link)
link = list(link)
link[2] = urllib.parse.quote(link[2])
link = urllib.parse.urlunsplit(link)
url = urllib.request.urlopen(link)
with ZipFile(BytesIO(url.read())) as my_zip_file:
for contained_file in my_zip_file.namelist():
for line in my_zip_file.open(contained_file).readlines():
line = line.decode("utf-8")
date = line.split(",")[0]
day_of_week = datetime.datetime.strptime(date, '%m/%d/%Y').strftime('%a')
if day_of_week == "Mon" and prev_day_of_week == "Fri":
if count2 == 45:
with open('singleStockTS.csv', 'a') as the_file:
the_file.write(string_to_write + "\n")
count1 += 1
string_to_write = "Week_" + str(count1) + ",X,Y"
count2 = 0
if count1>1 and count1<52:
string_to_write += "," + str(float(line.split(",")[5]))
count2 += 1
prev_day_of_week = day_of_week
p = Popen(['java', '-jar', 'StockDataGenerateCSV-1.0-jar-with-dependencies.jar', str(zNormalize), 'singleStockTS.csv', str(45)], stdout=PIPE, stderr=STDOUT)
for line in p.stdout:
if line[2] == 115:
print(str(line)[2:-3])
sleep(2)
if line[2] == 101:
clear_output(wait=True)
print(str(line)[2:-3])
else:
print(str(line)[2:-3])
if zNormalize == 1:
stocks_file = open("singleStockTS.csvzNorm.csv")
else:
stocks_file = open("singleStockTS.csv")
stocks_lines = stocks_file.readlines()
stocks = {}
for s in stocks_lines:
stock_name = s.split(",")[0]
stock_values_str = s.split("\n")[0].split(",")[3:264]
stock_values = []
for st in stock_values_str:
stock_values.append(float(st))
stocks[stock_name] = stock_values
return stocks
def plotAllData2(symbol, year, stocks):
xlim_1 = 0
xlim_2 = 44
plt.rcParams['figure.figsize'] = [28, 14]
plt.title(symbol, fontsize=35)
plt.ylabel('Close Value', fontsize=35)
plt.xlabel('Working Hour of Week for Year ' + year, fontsize=35)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
axes = plt.gca()
axes.set_xlim([xlim_1, xlim_2])
plt.grid(True)
for k, v in stocks.items():
plt.plot(v)
def plotAllBundles2(symbol, year, stocks):
results_file = open("results.txt","r")
results_lines = results_file.readlines()
xlim_1 = 0
xlim_2 = 44
bundles_members = {}
bundles_duration = {}
for b in results_lines:
bundle_name = b.split(";")[0]
duration = b.split(";")[2]
#members = b.split("\n")[0].split(":")[1].split(",")
members = b.split(";")[1].split(",")
bundles_members[bundle_name] = members
bundles_duration[bundle_name] = duration
for k, v in bundles_members.items():
duration = bundles_duration[k]
x1 = int(duration.split("-")[0][1:])
x2 = int(duration.split("-")[1][:-2])
minimum = []
maximum = []
for i in list(range(x2-x1)):
minimum.append(10000000000)
maximum.append(-1)
plt.rcParams['figure.figsize'] = [28, 14]
plt.title('Discovered Bundles of ' + symbol, fontsize=35)
plt.ylabel('Close Value', fontsize=35)
plt.xlabel('Working Hour of Week for Year ' + year, fontsize=35)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
axes = plt.gca()
axes.set_xlim([xlim_1, xlim_2])
for member in bundles_members[k]:
ts = stocks[member]
idx = 0
for t in list(range(x1, x2)):
if ts[t] < minimum[idx]:
minimum[idx] = ts[t]
if ts[t] > maximum[idx]:
maximum[idx] = ts[t]
idx += 1
p = plt.plot(list(range(x1, x2)), maximum, linewidth=5.0)
plt.plot(list(range(x1, x2)), minimum, color=p[0].get_color(), linewidth=5.0)
plt.grid(True)
plt.fill_between(list(range(x1, x2)), minimum, maximum, color=p[0].get_color(), alpha=0.15)
def plotSelectedBundle2(scenario, bundle_to_visualize, symbol, year, stocks):
results_file = open("results.txt","r")
results_lines = results_file.readlines()
xlim_1 = 0
xlim_2 = 44
bundles_members = {}
bundles_duration = {}
for b in results_lines:
bundle_name = b.split(";")[0]
duration = b.split(";")[2]
#members = b.split("\n")[0].split(":")[1].split(",")
members = b.split(";")[1].split(",")
bundles_members[bundle_name] = members
bundles_duration[bundle_name] = duration
duration = bundles_duration[bundle_to_visualize]
x1 = int(duration.split("-")[0][1:])
x2 = int(duration.split("-")[1][:-2])
minimum = []
maximum = []
for i in list(range(x2-x1)):
minimum.append(10000000000)
maximum.append(-1)
plt.rcParams['figure.figsize'] = [28, 14]
plt.title('Discovered Bundles of ' + bundle_to_visualize, fontsize=35)
plt.ylabel('Close Value', fontsize=35)
plt.xlabel('Working Hour of Week for Year ' + year, fontsize=35)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
axes = plt.gca()
axes.set_xlim([xlim_1, xlim_2])
print('BUNDLE MEMBERS:')
for member in bundles_members[bundle_to_visualize]:
ts = stocks[member]
idx = 0
for t in list(range(x1, x2)):
if ts[t] < minimum[idx]:
minimum[idx] = ts[t]
if ts[t] > maximum[idx]:
maximum[idx] = ts[t]
idx += 1
plt.plot(ts)
print(member)
plt.axvline(x = x1)
plt.axvline(x = x2)
plt.plot(list(range(x1, x2)), maximum, color='#539ecd', linewidth=5.0)
plt.plot(list(range(x1, x2)), minimum, color='#539ecd', linewidth=5.0)
plt.grid(True)
plt.fill_between(list(range(x1, x2)), minimum, maximum, color='#539ecd', alpha=0.25)
def getSimilarBundles(sort_by):
pandas.set_option('display.max_colwidth', -1)
p = Popen(['java', '-jar', 'simjoin-0.0.1-SNAPSHOT-jar-with-dependencies.jar', 'ssjoin_config.properties'], stdout=PIPE, stderr=STDOUT)
f = open('ssjoin_out.txt', "r")
lines_ssjoin = f.readlines()
f.close()
f = open('results.txt', "r")
lines_bundles = f.readlines()
f.close()
first_time = True
for line in lines_ssjoin:
res_list = []
bundle1 = lines_bundles[int(line.split(",")[0])]
bundle1_name = bundle1.split(";")[0]
bundle1_members = bundle1.split(";")[1]
bundle1_interval_start = int(bundle1.split(";")[2].split("-")[0][1:])
bundle1_interval_end = int(bundle1.split(";")[2].split("-")[1][:-2])
bundle2 = lines_bundles[int(line.split(",")[1])]
bundle2_name = bundle2.split(";")[0]
bundle2_members = bundle2.split(";")[1]
bundle2_interval_start = int(bundle2.split(";")[2].split("-")[0][1:])
bundle2_interval_end = int(bundle2.split(";")[2].split("-")[1][:-2])
res_list.append(bundle1_name + " " + bundle2_name)
similarity = float(line.split(",")[2].split("\n")[0][:-1])
res_list.append(similarity)
x = range(bundle1_interval_start, bundle1_interval_end+1)
x_len = bundle1_interval_end-bundle1_interval_start
y = range(bundle2_interval_start, bundle2_interval_end+1)
y_len = bundle2_interval_end-bundle2_interval_start
xs = set(x)
intersect_length = len(xs.intersection(y))
interval_similarity = 0
if x_len > y_len:
interval_similarity = intersect_length/x_len
else:
interval_similarity = intersect_length/y_len
res_list.append(interval_similarity)
bundle1_set = set(''.join(bundle1_members).split(","))
bundle2_set = set(''.join(bundle2_members).split(","))
common = bundle1_set.intersection(bundle2_set)
res_list.append(common)
not_common = bundle1_set.symmetric_difference(bundle2_set)
res_list.append(not_common)
if first_time == True:
newArray = numpy.array(res_list)
first_time = False
else:
newArray =
|
numpy.vstack([newArray, res_list])
|
numpy.vstack
|
# -*- coding: utf-8 -*-
"""ALE plots."""
import math
from collections import defaultdict
from copy import deepcopy
from operator import attrgetter
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import sklearn.base
from joblib import parallel_backend
from matplotlib.colors import SymLogNorm
from matplotlib.lines import Line2D
from matplotlib.patches import Polygon
from wildfires.cache.proxy_backend import HashProxy
from wildfires.qstat import get_ncpus
from wildfires.utils import shorten_features
from .. import variable
from ..cache import add_cached_shape, cache, get_proxied_estimator, process_proxy
from ..model import assign_n_jobs
from ..plotting import get_float_format, get_sci_format, update_label_with_exp
from ..utils import column_check, tqdm
orig_clone = sklearn.base.clone
@cache
def cached_clone(estimator, safe=True):
"""Adapted from sklearn.base.clone."""
from wildfires.dask_cx1 import DaskRandomForestRegressor
if not isinstance(estimator, DaskRandomForestRegressor):
return orig_clone(estimator, safe=safe)
estimator_params = estimator.get_params(deep=False)
new_estimator = estimator.__class__(**deepcopy(estimator_params))
new_params = new_estimator.get_params(deep=False)
for name in estimator_params:
param1 = estimator_params[name]
param2 = new_params[name]
assert param1 is param2
return new_estimator
def delegate_clone(estimator, safe=True):
from wildfires.dask_cx1 import DaskRandomForestRegressor
if isinstance(estimator, HashProxy) or isinstance(
estimator, DaskRandomForestRegressor
):
cached_est = cached_clone(estimator, safe=safe)
# Update n_jobs since this will be determined by the original call to the
# cached function. Do this lazily so it is only updated when needed.
return process_proxy((cached_est,), (assign_n_jobs,))[0]
return orig_clone(estimator, safe=safe)
# Transparently cache estimator cloning.
sklearn.base.clone = delegate_clone
# Import after the line above in order for the `clone` caching to take effect within
# the module.
import alepython # isort:skip
import alepython.ale # isort:skip
# Transparently cache the ALE computations.
alepython.ale.first_order_ale_quant = cache(alepython.ale.first_order_ale_quant)
alepython.ale.second_order_ale_quant = cache(
alepython.ale.second_order_ale_quant, ignore=["n_jobs"]
)
alepython.ale._mc_replicas = cache(alepython.ale._mc_replicas, ignore=["verbose"])
# Make use of proxied access to `predict` implicitly.
_orig_ale_plot = alepython.ale.ale_plot
def proxied_ale_plot(**kwargs):
kwargs["model"] = get_proxied_estimator(kwargs["model"])
# Cache the 'shape' attribute of `train_set`.
kwargs["train_set"] = add_cached_shape(kwargs["train_set"])
return _orig_ale_plot(**kwargs)
alepython.ale.ale_plot = proxied_ale_plot
# Implicitly handle getting array data from Series.
orig_asarray = np.asarray
def lazy_series_asarray(*args, **kwargs):
if len(args) == 1 and not kwargs and isinstance(args[0], HashProxy):
# Handle the specific case of a single lazy input argument to avoid
# loading it from disk for as long as possible.
return process_proxy((args[0],), (orig_asarray,))[0]
return orig_asarray(*args, **kwargs)
np.asarray = lazy_series_asarray
def save_ale_1d(
model,
X_train,
column,
train_response=None,
monte_carlo=True,
monte_carlo_rep=100,
monte_carlo_ratio=1000,
monte_carlo_hull=True,
verbose=True,
center=False,
figure_saver=None,
sub_dir="ale",
fig=None,
ax=None,
ale_factor_exp=0,
x_factor_exp=0,
x_ndigits=2,
x_rotation=0,
x_skip=4,
):
assert monte_carlo
if fig is None and ax is None:
fig = plt.figure(figsize=(7.5, 4.5))
elif fig is None:
fig = ax.get_figure()
if ax is None:
ax = plt.axes()
out = alepython.ale.ale_plot(
model=model,
train_set=X_train,
features=column,
bins=20,
train_response=train_response,
monte_carlo=monte_carlo,
monte_carlo_rep=monte_carlo_rep,
monte_carlo_ratio=monte_carlo_ratio,
monte_carlo_hull=monte_carlo_hull,
plot_quantiles=False,
quantile_axis=True,
rugplot_lim=0,
scilim=0.6,
return_data=True,
return_mc_data=True,
verbose=verbose,
center=center,
rng=np.random.default_rng(0),
fig=plt.figure(), # Create dummy figure.
ax=None,
)
temp_fig, _, (quantiles, ale), mc_data = out
plt.close(temp_fig)
mc_hull_points = alepython.ale._compute_mc_hull_poly_points(
mc_data,
np.linspace(
np.min([mc_quantiles[0] for mc_quantiles, mc_ale in mc_data]),
np.max([mc_quantiles[-1] for mc_quantiles, mc_ale in mc_data]),
150,
),
)
ax.add_patch(
Polygon(
mc_hull_points,
facecolor="C0",
alpha=0.7,
label=str(column),
zorder=2,
)
)
min_x = np.min(mc_hull_points[:, 0])
max_x = np.max(mc_hull_points[:, 0])
min_y =
|
np.min(mc_hull_points[:, 1])
|
numpy.min
|
"""
Sun discipline for CADRE: Sun Position ECI component.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
from openmdao.api import ExplicitComponent
class SunPositionECIComp(ExplicitComponent):
"""
Compute the position vector from Earth to Sun in Earth-centered inertial frame.
"""
# constants
d2r = np.pi/180.
def initialize(self):
self.options.declare('num_nodes', types=(int, ),
desc="Number of time points.")
def setup(self):
nn = self.options['num_nodes']
# Inputs
self.add_input('LD', 5233.5, units='d',
desc='Launch day.')
self.add_input('t', np.zeros((nn, )), units='s',
desc='Time vector from simulation.')
# Outputs
self.add_output('r_e2s_I', np.zeros((nn, 3)), units='km',
desc='Position vector from Earth to Sun in Earth-centered '
'inertial frame over time.')
self.declare_partials('r_e2s_I', 'LD')
rows =
|
np.arange(nn*3)
|
numpy.arange
|
import numpy as np
import torch
import random
import logging
import os
import torch.multiprocessing as mp
import torch.distributed as dist
import subprocess
import pickle
import shutil
from scipy import stats as s
import numba as nb
#from .evaluate_panoptic import class_inv_lut
def SemKITTI2train_single(label):
return label - 1 # uint8 trick: 0 - 1 = 255
def SemKITTI2train(label):
if isinstance(label, list):
return [SemKITTI2train_single(a) for a in label]
else:
return SemKITTI2train_single(label)
def grp_range_torch(a,dev):
idx = torch.cumsum(a,0)
id_arr = torch.ones(idx[-1],dtype = torch.int64,device=dev)
id_arr[0] = 0
id_arr[idx[:-1]] = -a[:-1]+1
return torch.cumsum(id_arr,0)
# generate array like [0,1,2,3,4,5,0,1,2,3,4,5,6] where each 0-n gives id to points inside the same grid
def parallel_FPS(np_cat_fea,K):
return nb_greedy_FPS(np_cat_fea,K)
# @nb.jit('b1[:](f4[:,:],i4)',nopython=True,cache=True)
def nb_greedy_FPS(xyz,K):
start_element = 0
sample_num = xyz.shape[0]
sum_vec = np.zeros((sample_num,1),dtype = np.float32)
xyz_sq = xyz**2
for j in range(sample_num):
sum_vec[j,0] = np.sum(xyz_sq[j,:])
pairwise_distance = sum_vec + np.transpose(sum_vec) - 2*np.dot(xyz,
|
np.transpose(xyz)
|
numpy.transpose
|
"""
IMPORTS
"""
import tensorflow as tf
import numpy as np
import ProjectionLayer as pj
"""
def euclidean_distance_loss(y_true, y_pred):
return K.sqrt(K.sum(K.square(y_pred - y_true), axis=-1))
"""
def identity_block(X, f, filters, step) :
"""
:param X:
:param f:
:param filters:
:param step:
:return:
"""
# Retrieve Filters
F1, F2 = filters
# Save the input value. We'll need this later to add back to the main path.
X_shortcut = X
# First component of main path
X = tf.keras.layers.Conv2D(filters=F1, kernel_size=(1, 1), strides=(1, 1), padding='valid', name='Conv_'+str(step)+'_1a',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X)
X = tf.keras.layers.BatchNormalization(axis=3, name='Batch_Conv'+str(step)+'_1a')(X)
X = tf.keras.layers.Activation('relu', name='Relu_Conv_'+str(step)+'_1a')(X)
# Second component of main path
X = tf.keras.layers.Conv2D(filters=F1, kernel_size=(f, f), strides=(1, 1), padding='same', name='Conv_'+str(step)+'_2a',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X)
X = tf.keras.layers.BatchNormalization(axis=3, name='Batch_Conv'+str(step)+'_2a')(X)
X = tf.keras.layers.Activation('relu', name='Relu_Conv_'+str(step)+'_2a')(X)
# Third component of main path
X = tf.keras.layers.Conv2D(filters=F2, kernel_size=(1, 1), strides=(1, 1), padding='valid', name='Conv_'+str(step)+'_3a',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X)
X = tf.keras.layers.BatchNormalization(axis=3, name='Batch_Conv'+str(step)+'_3a')(X)
# Final step: Add shortcut value to main path, and pass it through a RELU activation
X = tf.keras.layers.Add(name='Product_Conv_'+str(step)+'_3a')([X, X_shortcut])
X = tf.keras.layers.Activation('relu', name='Relu_Conv_'+str(step)+'_3a')(X)
return X
def convolutional_block(X, f, filters, step, s=2):
"""
:param X:
:param f:
:param filters:
:param step:
:param s:
:return:
"""
# Retrieve Filters
F1, F2 = filters
# Save the input value
X_shortcut = X
##### MAIN PATH #####
# First component of main path
X = tf.keras.layers.Conv2D(filters=F1, kernel_size=(1, 1), strides=(s, s), padding='valid', name='Conv_'+str(step)+'_1a',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X)
X = tf.keras.layers.BatchNormalization(axis=3, name='Batch_Conv'+str(step)+'_1a')(X)
X = tf.keras.layers.Activation('relu', name='Relu_Conv_'+str(step)+'_1a')(X)
# Second component of main path
X = tf.keras.layers.Conv2D(filters=F1, kernel_size=(f, f), strides=(1, 1), padding='same', name='Conv_'+str(step)+'_2a',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X)
X = tf.keras.layers.BatchNormalization(axis=3, name='Batch_Conv'+str(step)+'_2a')(X)
X = tf.keras.layers.Activation('relu', name='Relu_Conv_'+str(step)+'_2a')(X)
# Third component of main path
X = tf.keras.layers.Conv2D(filters=F2, kernel_size=(1, 1), strides=(1, 1), padding='valid', name='Conv_'+str(step)+'_3a',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X)
X = tf.keras.layers.BatchNormalization(axis=3, name='Batch_Conv'+str(step)+'_3a')(X)
##### SHORTCUT PATH ####
X_shortcut = tf.keras.layers.Conv2D(F2, (1, 1), strides=(s, s), padding='valid', name='Conv_'+str(step)+'_1b',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X_shortcut)
# Final step: Add shortcut value to main path, and pass it through a RELU activation
X = tf.keras.layers.Add(name='Product_Conv_'+str(step)+'_3a')([X, X_shortcut])
X = tf.keras.layers.Activation('relu', name='Relu_Conv_'+str(step)+'_3a')(X)
return X
class RegNet34() :
def __init__(self,input_shape=(256, 256, 3), heatmap_shape=(32,32)):
"""
:param input_shape:
:param heatmap_shape:
"""
step=1
# Define the input as a tensor with shape input_shape
X_input = tf.keras.layers.Input(input_shape)
# Zero-Padding
X = tf.keras.layers.ZeroPadding2D((3, 3))(X_input)
# Stage 1
X = tf.keras.layers.Conv2D(64, (7, 7), strides=(2, 2), name='Conv_'+str(step)+'_1a', padding='valid', kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X)
X = tf.keras.layers.BatchNormalization(axis=3, name='Batch_Conv'+str(step)+'_1a')(X)
X = tf.keras.layers.Activation('relu', name='Relu_Conv_'+str(step)+'_1a')(X)
X = tf.keras.layers.MaxPooling2D((3, 3), strides=(2, 2), name='MaxPool_'+str(step)+'_1a')(X)
# Stage 2
step+=1
X = convolutional_block(X, f=3,step=step, filters=[64, 256], s=1)
step += 1
X = identity_block(X, 3, [64, 256],step=step)
step += 1
X = identity_block(X, 3, [64, 256],step=step)
# Stage 3
step += 1
X = convolutional_block(X, f=3, step=step,filters=[128, 512], s=2)
step += 1
X = identity_block(X, 3, [128, 512],step=step)
step += 1
X = identity_block(X, 3, [128, 512],step=step)
# Stage 4
step += 1
X = convolutional_block(X, f=3, step=step,filters=[256, 1024], s=2)
step += 1
X = identity_block(X, 3, [256, 1024],step=step)
step += 1
X = identity_block(X, 3, [256, 1024],step=step)
step += 1
X = identity_block(X, 3, [256, 1024],step=step)
# Stage 5
step += 1
X = tf.keras.layers.Conv2D(512, (3, 3), strides=(2, 2), padding='same', name='Conv_'+str(step)+'_1a',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X)
X = tf.keras.layers.BatchNormalization(axis=3, name='Batch_Conv'+str(step)+'_1a')(X)
X = tf.keras.layers.Activation('relu', name='Relu_Conv_'+str(step)+'_1a')(X)
# Stage 6
step += 1
X = tf.keras.layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='Conv_'+str(step)+'_1a',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X)
X = tf.keras.layers.BatchNormalization(axis=3, name='Batch_Conv'+str(step)+'_1a')(X)
X = tf.keras.layers.Activation('relu', name='Relu_Conv_'+str(step)+'_1a')(X)
# Spliting
X_2D = X
# 2D Heatmaps Generation
step += 1
X_2D = tf.keras.layers.Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='Conv_'+str(step)+'_2D',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X_2D)
X_2D = tf.keras.layers.Conv2DTranspose(21, (4, 4), strides=(2, 2), padding='same', name='DEConv_'+str(step)+'_2D_1a',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X_2D)
X_2D = tf.keras.layers.Conv2DTranspose(21, (4, 4), strides=(2, 2), padding='same', name='DEConv_'+str(step)+'_2D_2a',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X_2D)
# 3D prediction
X_3D = tf.keras.layers.Flatten(name='Flatten_'+str(step)+'_1a')(X)
X_3D = tf.keras.layers.Dense(200,name='Dense_'+str(step)+'_1a')(X_3D)
X_3D = tf.keras.layers.Dense(63,name='Dense_'+str(step)+'_2a')(X_3D)
X_3D = tf.keras.layers.Reshape((21, 1, 3),name='Reshape_'+str(step)+'_1a')(X_3D)
temp = tf.keras.layers.Reshape((21, 3),name='Reshape_'+str(step)+'_2a')(X_3D)
# Proj Layer
projLayer = pj.ProjLayer(heatmap_shape, name='projlayer')(temp)
heatmaps_pred3D = pj.RenderingLayer(heatmap_shape, coeff=1, name='renderinglayer')(projLayer)
heatmaps_pred3D_reshape = pj.ReshapeChannelToLast(heatmap_shape, name='reshapelayer')(heatmaps_pred3D)
# Rendering
step += 1
X_rendered = tf.keras.layers.Conv2D(64, (3, 3), strides=(2, 2), padding='same', name='Conv_'+str(step)+'_rendering',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(heatmaps_pred3D_reshape)
X_rendered = tf.keras.layers.BatchNormalization(axis=3, name='Batch_Conv'+str(step)+'_1a')(X_rendered)
X_rendered = tf.keras.layers.Activation('relu', name='Relu_Conv_'+str(step)+'_1a')(X_rendered)
step += 1
X_rendered = tf.keras.layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same', name='Conv_'+str(step)+'_rendering',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X_rendered)
X_rendered = tf.keras.layers.BatchNormalization(axis=3, name='Batch_Conv'+str(step)+'_1a')(X_rendered)
X_rendered = tf.keras.layers.Activation('relu', name='Relu_Conv_'+str(step)+'_1a')(X_rendered)
step += 1
X_concat = tf.keras.layers.concatenate([X, X_rendered],name='Concat_'+str(step)+'_1a')
X_concat = tf.keras.layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='Conv_'+str(step)+'_rendering',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X_concat)
X_concat = tf.keras.layers.BatchNormalization(axis=3, name='Batch_Conv'+str(step)+'_1a')(X_concat)
X_concat = tf.keras.layers.Activation('relu', name='Relu_Conv_'+str(step)+'_1a')(X_concat)
# Final Heatmap
step += 1
X_heatmap = tf.keras.layers.Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='Conv_'+str(step)+'_heatmap',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(X_concat)
X_heatmap = tf.keras.layers.Conv2DTranspose(21, (4, 4), strides=(2, 2), padding='same', name='DEConv_'+str(step)+'_heatmap_1a',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(
X_heatmap)
X_heatmap = tf.keras.layers.Conv2DTranspose(21, (4, 4), strides=(2, 2), padding='same', name='DEConv_'+str(step)+'_heatmap_2a',
kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))(
X_heatmap)
# Final 3D Joints
X_concat = tf.keras.layers.Flatten(name='Flatten_'+str(step)+'_1a')(X_concat)
X_3Dj = tf.keras.layers.Dense(200,name='Dense_'+str(step)+'_1a')(X_concat)
X_3Dj = tf.keras.layers.Dense(63,name='Dense_'+str(step)+'_2a')(X_3Dj)
X_3Dj = tf.keras.layers.Reshape((21, 1, 3),name='Reshape_'+str(step)+'_1a')(X_3Dj)
# Create model
self.model = tf.keras.Model(inputs=X_input, outputs=[X_3D,X_3Dj,X_heatmap], name='RegNet34')
def train_on_batch(self, epoch, generator) :
"""
:param epoch:
:param generator:
:return:
"""
for i in range(0, epoch):
image, crop_param, joint_3d, joint_3d_rate, joint_2d = generator.generate_batch()
joint_3d_rate = np.reshape(joint_3d_rate, (-1, 21, 1, 3))
result = self.model.train_on_batch(x=[image], y=[joint_3d_rate, joint_3d_rate, joint_2d])
self.test_on_batch(generator, i + 1)
def test_on_batch(self, generator, epoch):
"""
:param generator:
:param epoch:
:return:
"""
min_loss = [10000.0, 10000., 100000., 100000., 100000., 100000., 100000.]
sum_result = [0.0, 0., 0., 0., 0., 0., 0.]
image, crop_param, joint_3d, joint_3d_rate, joint_2d = generator.generate_batch()
joint_3d_rate = np.reshape(joint_3d_rate, (-1, 21, 1, 3))
result = self.model.test_on_batch(x=[image], y=[joint_3d_rate, joint_3d_rate, joint_2d])
result =
|
np.asarray(result)
|
numpy.asarray
|
#!/usr/bin/env python
# Filename: watershed_segment
"""
introduction: segment a grey image
authors: <NAME>
email:<EMAIL>
add time: 1 March, 2021
"""
import os,sys
from optparse import OptionParser
deeplabforRS = os.path.expanduser('~/codes/PycharmProjects/DeeplabforRS')
sys.path.insert(0, deeplabforRS)
import vector_gpd
import raster_io
import basic_src.io_function as io_function
import basic_src.map_projection as map_projection
import basic_src.basic as basic
import basic_src.timeTools as timeTools
import vector_features
import raster_statistic
import cv2
import numpy as np
import pandas as pd
import geopandas as gpd
import re
import dem_common
code_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
sys.path.insert(0,code_dir)
sys.path.insert(0,os.path.join(code_dir,'tools')) # for some modules in this folder
from tools.grey_image_segment import segment_a_grey_image
from tools.seg_polygonize_cal_attributes import polygonize_label_images
def post_processing_subsidence(in_shp):
polygons = vector_gpd.read_polygons_gpd(in_shp)
# get shapeinfo
# poly_shapeinfo_list = []
save_polyons = []
for poly in polygons:
# get INarea, INperimete, WIDTH, HEIGHT, ratio_w_h, hole_count
# shapeinfo = vector_gpd.calculate_polygon_shape_info(poly) # error: 'MultiPolygon' object has no attribute 'interiors'
# poly_shapeinfo_list.append(shapeinfo)
# if shapeinfo['INarea'] < 40: # remove the one with area smaller than 40 m^2
if poly.area < 90: # remove the one with area smaller than 40 m^2
continue
save_polyons.append(poly)
save_pd = pd.DataFrame({'Polygon': save_polyons})
wkt = map_projection.get_raster_or_vector_srs_info_wkt(in_shp)
save_shp = io_function.get_name_by_adding_tail(in_shp,'post')
vector_gpd.save_polygons_to_files(save_pd,'Polygon',wkt,save_shp)
def segment_subsidence_on_dem_diff(dem_diff_tif, save_dir):
out_pre = os.path.splitext(os.path.basename(dem_diff_tif))[0]
# read images
one_band_img, nodata = raster_io.read_raster_one_band_np(dem_diff_tif)
# segmentation by threshold (may have too many noise)
# mean = np.nanmean(one_band_img)
# print("mean value is: %.4f"%mean)
# one_band_img = one_band_img - mean # cannot use mean which may affect by some Outliers
out_labels = np.zeros_like(one_band_img,dtype=np.uint8)
out_labels[ one_band_img < -2 ] = 1 # end in a lot of noise, change to -2, -1 results in a lot of polygons
# apply median filter
out_labels = cv2.medianBlur(out_labels, 3) # with kernal=3
# save the label
if os.path.isdir(save_dir) is False:
io_function.mkdir(save_dir)
label_path = os.path.join(save_dir, out_pre + '_label.tif')
raster_io.save_numpy_array_to_rasterfile(out_labels, label_path, dem_diff_tif, nodata=0)
# convert the label to shapefile
out_shp = os.path.join(save_dir, out_pre + '.shp')
command_string = 'gdal_polygonize.py -8 %s -b 1 -f "ESRI Shapefile" %s' % (label_path, out_shp)
res = os.system(command_string)
if res != 0:
sys.exit(1)
# post-processing
post_processing_subsidence(out_shp)
def get_mean_from_array(in_array, nodata,range=None):
data_1d = in_array.flatten()
data_1d = data_1d[ data_1d != nodata]
data_1d = data_1d[~np.isnan(data_1d)] # remove nan value
value = np.mean(data_1d)
return value
def get_dem_diff_8bit(dem_diff_path):
# find 8bit one
tif_8bit = io_function.get_name_by_adding_tail(dem_diff_path, '8bit')
demD_8bit= os.path.join(dem_common.grid_dem_diffs_8bit_dir, os.path.basename(tif_8bit))
if os.path.isfile(demD_8bit) is False:
basic.outputlogMessage('error, 8bit DEM diff not exists: %s '%demD_8bit)
return None
return demD_8bit
def get_save_dir(dem_diff_path):
grid_id = int(re.findall('grid\d+',os.path.basename(dem_diff_path))[0][4:])
save_dir = os.path.join(dem_common.grid_dem_diffs_segment_dir, 'segment_result_grid%d'%grid_id)
return save_dir
def merge_polygons_patchBYpatch(patch_DN_range_txt, in_polygons, polygon_DNs, process_num=1):
patch_DN_range_list = [ int(item) for item in io_function.read_list_from_txt(patch_DN_range_txt)]
# print(patch_DN_range_list)
# divide polygons
patch_polygons = {}
range_count = len(patch_DN_range_list) - 1
for idx in range(range_count):
patch_polygons[idx] = []
for poly, dn in zip(in_polygons, polygon_DNs):
for idx in range(range_count):
if dn > patch_DN_range_list[idx] and dn <= patch_DN_range_list[idx+1]:
patch_polygons[idx].append(poly)
# merge polygon patch by patch
polygons_patch_merge = []
for idx in range(range_count):
print(timeTools.get_now_time_str(), 'will merge %d polygons for %d th patch'%(len(patch_polygons[idx]),idx))
if len(patch_polygons[idx]) < 2:
polygons_patch_merge.extend(patch_polygons[idx])
continue
adjacent_matrix = vector_gpd.build_adjacent_map_of_polygons(patch_polygons[idx], process_num=process_num)
if adjacent_matrix is False:
polygons_patch_merge.extend(patch_polygons[idx])
continue
merged_polygons = vector_features.merge_touched_polygons(patch_polygons[idx], adjacent_matrix)
polygons_patch_merge.extend(merged_polygons)
# merge polygon all
print(timeTools.get_now_time_str(), 'will merge %d polygons for the entire raster' % (len(polygons_patch_merge)))
adjacent_matrix = vector_gpd.build_adjacent_map_of_polygons(polygons_patch_merge, process_num=process_num)
if adjacent_matrix is False:
return polygons_patch_merge
last_merged_polygons = vector_features.merge_touched_polygons(polygons_patch_merge, adjacent_matrix)
return last_merged_polygons
def merge_polygon_rasterize(ref_raster, in_polygons):
# rasterize to raster
save_raster = os.path.basename(io_function.get_name_by_adding_tail(ref_raster,'merge'))
raster_io.burn_polygons_to_a_raster(ref_raster,in_polygons,1,save_raster)
# set nodata
if raster_io.set_nodata_to_raster_metadata(save_raster,0) is False:
raise IOError('Set nodata failed for %s'%save_raster)
# polygonize
out_shp = vector_gpd.raster2shapefile(save_raster, connect8=True)
if out_shp is None:
raise IOError('polygonzied failed for %s' % save_raster)
# read polygon
merge_polygons = vector_gpd.read_polygons_gpd(out_shp,b_fix_invalid_polygon=False)
print(timeTools.get_now_time_str(),'Get %d merged polygons'%len(merge_polygons))
return merge_polygons
def filter_merge_polygons(in_shp,merged_shp,wkt, min_area,max_area,dem_diff_tif,dem_diff_thread_m,process_num):
if os.path.isfile(merged_shp):
basic.outputlogMessage('%s exists, skip'%merged_shp)
return merged_shp
# read polygons and label from segment algorithm, note: some polygons may have the same label
# polygons, demD_mean_list = vector_gpd.read_polygons_attributes_list(in_shp,'demD_mean')
polygons, attributes = vector_gpd.read_polygons_attributes_list(in_shp,['demD_mean','DN'])
demD_mean_list = attributes[0]
DN_list = attributes[1]
print('Read %d polygons'%len(polygons))
if demD_mean_list is None:
raise ValueError('demD_mean not in %s, need to remove it and then re-create'%in_shp)
# replace nan values as 0
demD_mean_list =
|
np.nan_to_num(demD_mean_list)
|
numpy.nan_to_num
|
#!/usr/bin/env python
# coding: utf-8
from sys import argv
import pandas as pd
import datetime
import numba
import numpy as np
import copy
from random import *
from sympy.utilities.iterables import multiset_permutations
from collections import defaultdict
ORIGIN_COLS_TO_RENAME = {
"Мерчендайзер (ФИО)": "merch",
"Сеть": "chain",
"Адрес ТТ": "address",
"Время в ТТ": "time",
"Рын. формат": "format",
"Понедельник": "mon",
"Вторник": "tue",
"Среда": "wed",
"Четверг": "thu",
"Пятница": "fri",
"Суббота": "sat",
"Воскресенье": "sun",
"Кол-во визитов в неделю": "amount",
}
PLAN_COLS_TO_RENAME = {
"Код ТТ": "TT_code",
"Активность": "activity",
"Наименование клиента": "chain",
"Адрес клиента": "address",
"Количество посещений": "amount",
"Закрепленный за клиентом ТП": "merch",
"Продолжительность посещения": "duration",
}
# MATRIX_FILE = "input_matrix.csv"
# ORIGIN_FILE = "input_origin.csv"
# PLAN_FILE = "input_plan.csv"
# report_1 = "aa.csv"
# data_out = "bb.csv"
MATRIX_FILE = argv[1]
ORIGIN_FILE = argv[2]
PLAN_FILE = argv[3]
report_1 = argv[4]
data_out = argv[5]
# ENCODE = "UTF-8"
ENCODE = "Windows-1251"
def time_to_minutes(string):
"""
:param string: string in format hh:mm:ss.
:return: integer equivalent in minutes.
"""
try:
h, m, s = string.split(":")
return int(h) * 60 + int(m) + int(s) / 60
except ValueError:
# print(string)
return -1
@numba.jit()
def return_times(way):
"""
Function gets current way and return different time metrics.
:param way: way, for what we want to calculate time metrics.
:returns:
:store_time: float - spend in stores;
:way_time: float - time in the way;
:total_time: float - timetotal time;
:total_check: boolean - True, if the total time < 9 hours and 30 minutes, else False;
:store_check: boolean - True, if the store time > 5 hours, else False.
"""
store_time = np.sum(durations[way])
way_time = 0
for start, end in zip(way[:-1], way[1:]):
way_time += 1.0 * matrix[start, end] / 1000
total_time = store_time + way_time
# print(total_time)
total_check = total_time < 571
store_check = store_time > 299
return total_time, store_time, way_time, total_check, store_check
def annealing(cur_order, iters=30, delta_t=0.001, t_max=100):
"""
Conduct simulated annealing optimisation method.
:param cur_order: np.array - current order (one, that we want to optimize);
:param iters: int - max iterations amount;
:param delta_t: float - delta of temperature;
:param t_max: - max temperature.
:return:
:best_order: np.array - optimized order.
:best_cost: float - cost for this optimized order.
"""
t_max = max(t_max, 2) # for protection
n = len(cur_order)
cur_order = copy.deepcopy(cur_order)
cur_cost, _, _, _, _ = return_times(cur_order)
best_order = copy.deepcopy(cur_order)
best_cost = cur_cost
for _ in range(iters):
for T in np.arange(t_max, 1, -1 * delta_t):
i1 = randint(0, n - 1)
i2 = randint(i1 + 1, n)
cur_order[i1:i2] = np.flip(cur_order[i1:i2])
new_cost, _, _, _, _ = return_times(cur_order)
dE = cur_cost - new_cost
if dE > 0 and
|
np.exp(-dE / T)
|
numpy.exp
|
import enum
import logging
from collections import namedtuple
from typing import Optional, Tuple, Sequence
import numpy as np
import scipy.stats
from ...algorithms.correlmatrix import correlmatrix_cython
from ...algorithms.schilling import cormap_pval, longest_run
SchillingResult = namedtuple('SchillingResult', ('statistic', 'pvalue'))
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class OutlierMethod(enum.Enum):
ZScore = 'Z-score'
ZScoreMod = 'Modified Z-score'
IQR = 'Interquartile Range'
class OutlierTest:
score: np.ndarray
method: OutlierMethod
threshold: float
outlierverdict: np.ndarray
correlmatrix: np.ndarray
fsns: Optional[np.ndarray] = None
def __init__(self, method: OutlierMethod, threshold: float, curves: Optional[np.ndarray] = None,
correlmatrix: Optional[np.ndarray] = None, fsns: Optional[Sequence[int]] = None):
if curves is not None:
self.correlmatrix = correlmatrix_cython(curves[:, 1, :], curves[:, 2, :])
elif correlmatrix is not None:
self.correlmatrix = correlmatrix
else:
raise ValueError('Either `curves` or `correlmatrix` argument is needed.')
self.score = np.diagonal(self.correlmatrix)
self.method = method
self.threshold = threshold
self.outlierverdict = np.zeros(self.score.shape, np.bool)
if fsns is not None:
self.fsns = np.array(fsns)
self.markOutliers()
def acceptanceInterval(self) -> Tuple[float, float]:
if self.method in [OutlierMethod.ZScore, OutlierMethod.ZScoreMod]:
return -self.threshold, self.threshold
elif self.method == OutlierMethod.IQR:
q1, q3 = np.percentile(self.score, [25, 75])
iqr = q3 - q1
return q1 - iqr * self.threshold, q3 + iqr * self.threshold
else:
assert False
def markOutliers(self) -> np.ndarray:
if self.correlmatrix.shape[0] < 3:
logger.warning('Cannot do outlier detection for less than 3 measurements.')
return np.zeros(self.score.shape, dtype=np.bool)
if self.method == OutlierMethod.ZScore:
self.outlierverdict = (np.abs(self.score -
|
np.nanmean(self.score)
|
numpy.nanmean
|
# old from constants---------------------
import numpy as np
import scipy.sparse as ss
import scipy.stats as stat
def invert_to_dense(sparse_mat):
dense = sparse_mat.todense()
u, s, vh = np.linalg.svd(dense)
# raw_inv = np.linalg.inv(dense)
pinv_dense = clean_pinv(dense)
# back = clean_pinv(pinv_dense)
# dense[dense < 1e-3] = 0
# pinv_norm = np.linalg.pinv(dense, rcond=1e-3, hermitian=True)
return pinv_dense
def clean_pinv(mat):
error = 1e-6
copy_mat = mat.copy()
u, s, vh = np.linalg.svd(copy_mat)
# copy_mat[copy_mat < error] = 0.0
pinv_mat = np.linalg.pinv(copy_mat, rcond=error, hermitian=True)
# pinv_mat[pinv_mat < error] = 0.0
return pinv_mat
def to_sparse(dense_mat,
error=1e-6):
dense_mat[abs(dense_mat) < error] = 0.0
out = ss.csc_matrix(dense_mat)
return out
def gaussian_kernel_original(x_val, sd):
diff = np.linalg.norm(x_val)
inter = (-((diff) ** 2)
/ (2 * sd ** 2))
out = np.exp(inter)
# Should be a float
return out
def gaussian_kernel_given_diffs(diffs, sd2):
"""
:param diffs: a 2d array with each row denoting vector (calculated diff)
:param sd: sd not squared
:return: A 1d array of calculated gaussians
"""
norm2_squared = np.sum((diffs ** 2), axis=1)
inter = (-((norm2_squared))
/ (2 * sd2))
out = np.exp(inter)
# Should be a float
return out
def gaussian_kernel_naive(pixel_row, pixel_col, sd2):
"""
Gaussian for precomputing gaussian
:param pixel_row:
:param pixel_col:
:param sd2:
:return:
"""
diff = pixel_row ** 2 + pixel_col ** 2
inter = (-(diff)
/ (2 * sd2))
out = np.exp(inter)
return out
def gaussian_kernel_input2_sd2(input2, sd2):
"""
Takes an array of squared norm of diffs
:param input2: an 1D array of squared norm of diffs
:param sd2:
:return:
"""
inter = (-(input2)
/ (2 * sd2))
out = np.e ** inter
return out
#----------------NEW STUFF--------------------------
# import constants_maker as constants
import datetime
import os
import matplotlib.pyplot as plt
def flatten_image(image):
return image.flatten()
def unflatten_image(image, ncols):
return image.reshape((-1, ncols))
def faster_norm_squared(arr):
return np.sum(arr * arr)
def get_pixel_by_centers_matrix(all_pixels, all_centers, sd2):
"""
:param all_pixels: an array of all pixels ex. [[0,0],[0,1]...
:param all_centers: an array of all centers ex. [[0,0],[0,1]...
:return: (n_pixels, n_centers) array with evaluated gaussian values relative to each center
"""
n_pixels = np.shape(all_pixels)[0]
n_centers = np.shape(all_centers)[0]
pixels_pixels = np.repeat(all_pixels,n_centers,axis=0)
centers_centers = np.tile(all_centers,(n_pixels,1))
diff_vector = pixels_pixels - centers_centers
gaussian_out = gaussian_kernel_given_diffs(diff_vector, sd2)
reshaped_gauss = gaussian_out.reshape((n_pixels,n_centers))
return reshaped_gauss
def get_sparse_pixel_by_centers(all_pixels, all_centers, sd2,
sparse_type=ss.csc_matrix,
error=1e-6):
out = get_pixel_by_centers_matrix(all_pixels=all_pixels,
all_centers=all_centers,
sd2=sd2)
out[np.abs(out) < error] = 0.0
s_out = sparse_type(out)
return s_out
def get_pixel_by_centers_matrix_mul_only(all_pixels, all_centers, sd2):
one_col2 = np.ones((2, 1))
n_pixels = np.shape(all_pixels)[0]
n_centers = np.shape(all_centers)[0]
p_norm_squared = (all_pixels ** 2) @ one_col2
c_norm_squared = (all_centers ** 2) @ one_col2
p_norm_squared_repeated = p_norm_squared @ np.ones((1, n_centers))
c_norm_squared_repeated = (c_norm_squared @
|
np.ones((1, n_pixels))
|
numpy.ones
|
import cv2, sys
from matplotlib import pyplot as plt
import numpy as np
# 이미지 블러처리 외곽선 검출
# 참고 https://youbidan.tistory.com/19
# image = cv2.imread('Open/t02/2.jpg')
#
# image_gray = cv2.imread('Open/t02/2.jpg', cv2.IMREAD_GRAYSCALE)
# blur = cv2.GaussianBlur(image_gray, ksize=(31,31), sigmaX=0)
# ret, thresh1 = cv2.threshold(blur, 127, 255, cv2.THRESH_BINARY)
# edged = cv2.Canny(blur, 10,80)
# kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
# closed = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel)
# contours, _ = cv2.findContours(closed.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# total = 0
# contours_image = cv2.drawContours(image, contours, -1, (0,255,0), 3)
#
# plt.imshow(blur)
#
# plt.show()
def fit_rotated_ellipse(data):
xs = data[:,0].reshape(-1,1)
ys = data[:,1].reshape(-1,1)
J = np.mat( np.hstack((xs*ys,ys**2,xs, ys, np.ones_like(xs,dtype=float))) )
Y = np.mat(-1*xs**2)
P= (J.T * J).I * J.T * Y
a = 1.0; b= P[0,0]; c= P[1,0]; d = P[2,0]; e= P[3,0]; f=P[4,0];
# To do implementation
#a,b,c,d,e,f 를 통해 theta, 중심(cx,cy) , 장축(major), 단축(minor) 등을 뽑아 낼 수 있어요
theta = 0.5* np.arctan(b/(a-c))
cx = (2*c*d - b*e)/(b**2-4*a*c)
cy = (2*a*e - b*d)/(b**2-4*a*c)
cu = a*cx**2 + b*cx*cy + c*cy**2 -f
w= np.sqrt(cu/(a*np.cos(theta)**2 + b* np.cos(theta)*np.sin(theta) + c*np.sin(theta)**2))
h= np.sqrt(cu/(a*
|
np.sin(theta)
|
numpy.sin
|
"""Numba-compiled functions.
Provides an arsenal of Numba-compiled functions that are used by accessors
and in many other parts of the backtesting pipeline, such as technical indicators.
These only accept NumPy arrays and other Numba-compatible types.
The module can be accessed directly via `vbt.nb`.
```python-repl
>>> import numpy as np
>>> import vectorbt as vbt
>>> # vectorbt.generic.nb.rolling_mean_1d_nb
>>> vbt.nb.rolling_mean_1d_nb(np.array([1, 2, 3, 4]), 2)
array([nan, 1.5, 2.5, 3.5])
```
!!! note
vectorbt treats matrices as first-class citizens and expects input arrays to be
2-dim, unless function has suffix `_1d` or is meant to be input to another function.
Data is processed along index (axis 0).
Rolling functions with `minp=None` have `min_periods` set to the window size.
All functions passed as argument should be Numba-compiled."""
import numpy as np
from numba import njit, generated_jit
from numba.np.numpy_support import as_dtype
from numba.typed import Dict
from numba.core.types import Omitted
from vectorbt import _typing as tp
from vectorbt.generic.enums import DrawdownStatus, drawdown_dt
@njit(cache=True)
def shuffle_1d_nb(a: tp.Array1d, seed: tp.Optional[int] = None) -> tp.Array1d:
"""Shuffle each column in `a`.
Specify `seed` to make output deterministic."""
if seed is not None:
np.random.seed(seed)
return np.random.permutation(a)
@njit(cache=True)
def shuffle_nb(a: tp.Array2d, seed: tp.Optional[int] = None) -> tp.Array2d:
"""2-dim version of `shuffle_1d_nb`."""
if seed is not None:
np.random.seed(seed)
out = np.empty_like(a, dtype=a.dtype)
for col in range(a.shape[1]):
out[:, col] = np.random.permutation(a[:, col])
return out
@generated_jit(nopython=True, cache=True)
def set_by_mask_1d_nb(a: tp.Array1d, mask: tp.Array1d, value: tp.Scalar) -> tp.Array1d:
"""Set each element to a value by boolean mask."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
value_dtype = as_dtype(value)
else:
a_dtype = a.dtype
value_dtype = np.array(value).dtype
dtype = np.promote_types(a_dtype, value_dtype)
def _set_by_mask_1d_nb(a, mask, value):
out = a.astype(dtype)
out[mask] = value
return out
if not nb_enabled:
return _set_by_mask_1d_nb(a, mask, value)
return _set_by_mask_1d_nb
@generated_jit(nopython=True, cache=True)
def set_by_mask_nb(a: tp.Array2d, mask: tp.Array2d, value: tp.Scalar) -> tp.Array2d:
"""2-dim version of `set_by_mask_1d_nb`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
value_dtype = as_dtype(value)
else:
a_dtype = a.dtype
value_dtype = np.array(value).dtype
dtype = np.promote_types(a_dtype, value_dtype)
def _set_by_mask_nb(a, mask, value):
out = a.astype(dtype)
for col in range(a.shape[1]):
out[mask[:, col], col] = value
return out
if not nb_enabled:
return _set_by_mask_nb(a, mask, value)
return _set_by_mask_nb
@generated_jit(nopython=True, cache=True)
def set_by_mask_mult_1d_nb(a: tp.Array1d, mask: tp.Array1d, values: tp.Array1d) -> tp.Array1d:
"""Set each element in one array to the corresponding element in another by boolean mask.
`values` should be of the same shape as in `a`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
value_dtype = as_dtype(values.dtype)
else:
a_dtype = a.dtype
value_dtype = values.dtype
dtype = np.promote_types(a_dtype, value_dtype)
def _set_by_mask_mult_1d_nb(a, mask, values):
out = a.astype(dtype)
out[mask] = values[mask]
return out
if not nb_enabled:
return _set_by_mask_mult_1d_nb(a, mask, values)
return _set_by_mask_mult_1d_nb
@generated_jit(nopython=True, cache=True)
def set_by_mask_mult_nb(a: tp.Array2d, mask: tp.Array2d, values: tp.Array2d) -> tp.Array2d:
"""2-dim version of `set_by_mask_mult_1d_nb`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
value_dtype = as_dtype(values.dtype)
else:
a_dtype = a.dtype
value_dtype = values.dtype
dtype = np.promote_types(a_dtype, value_dtype)
def _set_by_mask_mult_nb(a, mask, values):
out = a.astype(dtype)
for col in range(a.shape[1]):
out[mask[:, col], col] = values[mask[:, col], col]
return out
if not nb_enabled:
return _set_by_mask_mult_nb(a, mask, values)
return _set_by_mask_mult_nb
@njit(cache=True)
def fillna_1d_nb(a: tp.Array1d, value: tp.Scalar) -> tp.Array1d:
"""Replace NaNs with value.
Numba equivalent to `pd.Series(a).fillna(value)`."""
return set_by_mask_1d_nb(a, np.isnan(a), value)
@njit(cache=True)
def fillna_nb(a: tp.Array2d, value: tp.Scalar) -> tp.Array2d:
"""2-dim version of `fillna_1d_nb`."""
return set_by_mask_nb(a, np.isnan(a), value)
@generated_jit(nopython=True, cache=True)
def bshift_1d_nb(a: tp.Array1d, n: int = 1, fill_value: tp.Scalar = np.nan) -> tp.Array1d:
"""Shift backward by `n` positions.
Numba equivalent to `pd.Series(a).shift(n)`.
!!! warning
Shift backward means looking ahead."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
if isinstance(fill_value, Omitted):
fill_value_dtype = np.asarray(fill_value.value).dtype
else:
fill_value_dtype = as_dtype(fill_value)
else:
a_dtype = a.dtype
fill_value_dtype = np.array(fill_value).dtype
dtype = np.promote_types(a_dtype, fill_value_dtype)
def _bshift_1d_nb(a, n, fill_value):
out = np.empty_like(a, dtype=dtype)
out[-n:] = fill_value
out[:-n] = a[n:]
return out
if not nb_enabled:
return _bshift_1d_nb(a, n, fill_value)
return _bshift_1d_nb
@generated_jit(nopython=True, cache=True)
def bshift_nb(a: tp.Array2d, n: int = 1, fill_value: tp.Scalar = np.nan) -> tp.Array2d:
"""2-dim version of `bshift_1d_nb`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
if isinstance(fill_value, Omitted):
fill_value_dtype = np.asarray(fill_value.value).dtype
else:
fill_value_dtype = as_dtype(fill_value)
else:
a_dtype = a.dtype
fill_value_dtype = np.array(fill_value).dtype
dtype = np.promote_types(a_dtype, fill_value_dtype)
def _bshift_nb(a, n, fill_value):
out = np.empty_like(a, dtype=dtype)
for col in range(a.shape[1]):
out[:, col] = bshift_1d_nb(a[:, col], n=n, fill_value=fill_value)
return out
if not nb_enabled:
return _bshift_nb(a, n, fill_value)
return _bshift_nb
@generated_jit(nopython=True, cache=True)
def fshift_1d_nb(a: tp.Array1d, n: int = 1, fill_value: tp.Scalar = np.nan) -> tp.Array1d:
"""Shift forward by `n` positions.
Numba equivalent to `pd.Series(a).shift(n)`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
if isinstance(fill_value, Omitted):
fill_value_dtype = np.asarray(fill_value.value).dtype
else:
fill_value_dtype = as_dtype(fill_value)
else:
a_dtype = a.dtype
fill_value_dtype = np.array(fill_value).dtype
dtype = np.promote_types(a_dtype, fill_value_dtype)
def _fshift_1d_nb(a, n, fill_value):
out = np.empty_like(a, dtype=dtype)
out[:n] = fill_value
out[n:] = a[:-n]
return out
if not nb_enabled:
return _fshift_1d_nb(a, n, fill_value)
return _fshift_1d_nb
@generated_jit(nopython=True, cache=True)
def fshift_nb(a: tp.Array2d, n: int = 1, fill_value: tp.Scalar = np.nan) -> tp.Array2d:
"""2-dim version of `fshift_1d_nb`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
if isinstance(fill_value, Omitted):
fill_value_dtype = np.asarray(fill_value.value).dtype
else:
fill_value_dtype = as_dtype(fill_value)
else:
a_dtype = a.dtype
fill_value_dtype = np.array(fill_value).dtype
dtype = np.promote_types(a_dtype, fill_value_dtype)
def _fshift_nb(a, n, fill_value):
out = np.empty_like(a, dtype=dtype)
for col in range(a.shape[1]):
out[:, col] = fshift_1d_nb(a[:, col], n=n, fill_value=fill_value)
return out
if not nb_enabled:
return _fshift_nb(a, n, fill_value)
return _fshift_nb
@njit(cache=True)
def diff_1d_nb(a: tp.Array1d, n: int = 1) -> tp.Array1d:
"""Return the 1-th discrete difference.
Numba equivalent to `pd.Series(a).diff()`."""
out = np.empty_like(a, dtype=np.float_)
out[:n] = np.nan
out[n:] = a[n:] - a[:-n]
return out
@njit(cache=True)
def diff_nb(a: tp.Array2d, n: int = 1) -> tp.Array2d:
"""2-dim version of `diff_1d_nb`."""
out = np.empty_like(a, dtype=np.float_)
for col in range(a.shape[1]):
out[:, col] = diff_1d_nb(a[:, col], n=n)
return out
@njit(cache=True)
def pct_change_1d_nb(a: tp.Array1d, n: int = 1) -> tp.Array1d:
"""Return the percentage change.
Numba equivalent to `pd.Series(a).pct_change()`."""
out = np.empty_like(a, dtype=np.float_)
out[:n] = np.nan
out[n:] = a[n:] / a[:-n] - 1
return out
@njit(cache=True)
def pct_change_nb(a: tp.Array2d, n: int = 1) -> tp.Array2d:
"""2-dim version of `pct_change_1d_nb`."""
out = np.empty_like(a, dtype=np.float_)
for col in range(a.shape[1]):
out[:, col] = pct_change_1d_nb(a[:, col], n=n)
return out
@njit(cache=True)
def ffill_1d_nb(a: tp.Array1d) -> tp.Array1d:
"""Fill NaNs by propagating last valid observation forward.
Numba equivalent to `pd.Series(a).fillna(method='ffill')`."""
out = np.empty_like(a, dtype=a.dtype)
lastval = a[0]
for i in range(a.shape[0]):
if np.isnan(a[i]):
out[i] = lastval
else:
lastval = out[i] = a[i]
return out
@njit(cache=True)
def ffill_nb(a: tp.Array2d) -> tp.Array2d:
"""2-dim version of `ffill_1d_nb`."""
out = np.empty_like(a, dtype=a.dtype)
for col in range(a.shape[1]):
out[:, col] = ffill_1d_nb(a[:, col])
return out
@generated_jit(nopython=True, cache=True)
def nanprod_nb(a):
"""Numba-equivalent of `np.nanprod` along axis 0."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
else:
a_dtype = a.dtype
dtype = np.promote_types(a_dtype, int)
def _nanprod_nb(a):
out = np.empty(a.shape[1], dtype=dtype)
for col in range(a.shape[1]):
out[col] = np.nanprod(a[:, col])
return out
if not nb_enabled:
return _nanprod_nb(a)
return _nanprod_nb
@generated_jit(nopython=True, cache=True)
def nancumsum_nb(a):
"""Numba-equivalent of `np.nancumsum` along axis 0."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
else:
a_dtype = a.dtype
dtype = np.promote_types(a_dtype, int)
def _nancumsum_nb(a):
out = np.empty(a.shape, dtype=dtype)
for col in range(a.shape[1]):
out[:, col] = np.nancumsum(a[:, col])
return out
if not nb_enabled:
return _nancumsum_nb(a)
return _nancumsum_nb
@generated_jit(nopython=True, cache=True)
def nancumprod_nb(a):
"""Numba-equivalent of `np.nancumprod` along axis 0."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
else:
a_dtype = a.dtype
dtype = np.promote_types(a_dtype, int)
def _nancumprod_nb(a):
out = np.empty(a.shape, dtype=dtype)
for col in range(a.shape[1]):
out[:, col] = np.nancumprod(a[:, col])
return out
if not nb_enabled:
return _nancumprod_nb(a)
return _nancumprod_nb
@njit(cache=True)
def nancnt_nb(a: tp.Array2d) -> tp.Array1d:
"""Compute count while ignoring NaNs."""
out = np.empty(a.shape[1], dtype=np.int_)
for col in range(a.shape[1]):
out[col] = np.sum(~np.isnan(a[:, col]))
return out
@generated_jit(nopython=True, cache=True)
def nansum_nb(a):
"""Numba-equivalent of `np.nansum` along axis 0."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
else:
a_dtype = a.dtype
dtype = np.promote_types(a_dtype, int)
def _nansum_nb(a):
out = np.empty(a.shape[1], dtype=dtype)
for col in range(a.shape[1]):
out[col] = np.nansum(a[:, col])
return out
if not nb_enabled:
return _nansum_nb(a)
return _nansum_nb
@njit(cache=True)
def nanmin_nb(a: tp.Array2d) -> tp.Array1d:
"""Numba-equivalent of `np.nanmin` along axis 0."""
out = np.empty(a.shape[1], dtype=a.dtype)
for col in range(a.shape[1]):
out[col] = np.nanmin(a[:, col])
return out
@njit(cache=True)
def nanmax_nb(a: tp.Array2d) -> tp.Array1d:
"""Numba-equivalent of `np.nanmax` along axis 0."""
out = np.empty(a.shape[1], dtype=a.dtype)
for col in range(a.shape[1]):
out[col] = np.nanmax(a[:, col])
return out
@njit(cache=True)
def nanmean_nb(a: tp.Array2d) -> tp.Array1d:
"""Numba-equivalent of `np.nanmean` along axis 0."""
out = np.empty(a.shape[1], dtype=np.float_)
for col in range(a.shape[1]):
out[col] = np.nanmean(a[:, col])
return out
@njit(cache=True)
def nanmedian_nb(a: tp.Array2d) -> tp.Array1d:
"""Numba-equivalent of `np.nanmedian` along axis 0."""
out = np.empty(a.shape[1], dtype=np.float_)
for col in range(a.shape[1]):
out[col] = np.nanmedian(a[:, col])
return out
@njit(cache=True)
def nanstd_1d_nb(a: tp.Array1d, ddof: int = 0) -> float:
"""Numba-equivalent of `np.nanstd`."""
cnt = a.shape[0] - np.count_nonzero(np.isnan(a))
rcount = max(cnt - ddof, 0)
if rcount == 0:
return np.nan
return np.sqrt(np.nanvar(a) * cnt / rcount)
@njit(cache=True)
def nanstd_nb(a: tp.Array2d, ddof: int = 0) -> tp.Array1d:
"""2-dim version of `nanstd_1d_nb`."""
out = np.empty(a.shape[1], dtype=np.float_)
for col in range(a.shape[1]):
out[col] = nanstd_1d_nb(a[:, col], ddof=ddof)
return out
# ############# Rolling functions ############# #
@njit(cache=True)
def rolling_min_1d_nb(a: tp.Array1d, window: int, minp: tp.Optional[int] = None) -> tp.Array1d:
"""Return rolling min.
Numba equivalent to `pd.Series(a).rolling(window, min_periods=minp).min()`."""
if minp is None:
minp = window
if minp > window:
raise ValueError("minp must be <= window")
out = np.empty_like(a, dtype=np.float_)
for i in range(a.shape[0]):
minv = a[i]
cnt = 0
for j in range(max(i - window + 1, 0), i + 1):
if np.isnan(a[j]):
continue
if np.isnan(minv) or a[j] < minv:
minv = a[j]
cnt += 1
if cnt < minp:
out[i] = np.nan
else:
out[i] = minv
return out
@njit(cache=True)
def rolling_min_nb(a: tp.Array2d, window: int, minp: tp.Optional[int] = None) -> tp.Array2d:
"""2-dim version of `rolling_min_1d_nb`."""
out = np.empty_like(a, dtype=np.float_)
for col in range(a.shape[1]):
out[:, col] = rolling_min_1d_nb(a[:, col], window, minp=minp)
return out
@njit(cache=True)
def rolling_max_1d_nb(a: tp.Array1d, window: int, minp: tp.Optional[int] = None) -> tp.Array1d:
"""Return rolling max.
Numba equivalent to `pd.Series(a).rolling(window, min_periods=minp).max()`."""
if minp is None:
minp = window
if minp > window:
raise ValueError("minp must be <= window")
out = np.empty_like(a, dtype=np.float_)
for i in range(a.shape[0]):
maxv = a[i]
cnt = 0
for j in range(max(i - window + 1, 0), i + 1):
if np.isnan(a[j]):
continue
if np.isnan(maxv) or a[j] > maxv:
maxv = a[j]
cnt += 1
if cnt < minp:
out[i] = np.nan
else:
out[i] = maxv
return out
@njit(cache=True)
def rolling_max_nb(a: tp.Array2d, window: int, minp: tp.Optional[int] = None) -> tp.Array2d:
"""2-dim version of `rolling_max_1d_nb`."""
out = np.empty_like(a, dtype=np.float_)
for col in range(a.shape[1]):
out[:, col] = rolling_max_1d_nb(a[:, col], window, minp=minp)
return out
@njit(cache=True)
def rolling_mean_1d_nb(a: tp.Array1d, window: int, minp: tp.Optional[int] = None) -> tp.Array1d:
"""Return rolling mean.
Numba equivalent to `pd.Series(a).rolling(window, min_periods=minp).mean()`."""
if minp is None:
minp = window
if minp > window:
raise ValueError("minp must be <= window")
out = np.empty_like(a, dtype=np.float_)
cumsum_arr = np.zeros_like(a)
cumsum = 0
nancnt_arr = np.zeros_like(a)
nancnt = 0
for i in range(a.shape[0]):
if np.isnan(a[i]):
nancnt = nancnt + 1
else:
cumsum = cumsum + a[i]
nancnt_arr[i] = nancnt
cumsum_arr[i] = cumsum
if i < window:
window_len = i + 1 - nancnt
window_cumsum = cumsum
else:
window_len = window - (nancnt - nancnt_arr[i - window])
window_cumsum = cumsum - cumsum_arr[i - window]
if window_len < minp:
out[i] = np.nan
else:
out[i] = window_cumsum / window_len
return out
@njit(cache=True)
def rolling_mean_nb(a: tp.Array2d, window: int, minp: tp.Optional[int] = None) -> tp.Array2d:
"""2-dim version of `rolling_mean_1d_nb`."""
out = np.empty_like(a, dtype=np.float_)
for col in range(a.shape[1]):
out[:, col] = rolling_mean_1d_nb(a[:, col], window, minp=minp)
return out
@njit(cache=True)
def rolling_std_1d_nb(a: tp.Array1d, window: int, minp: tp.Optional[int] = None, ddof: int = 0) -> tp.Array1d:
"""Return rolling standard deviation.
Numba equivalent to `pd.Series(a).rolling(window, min_periods=minp).std(ddof=ddof)`."""
if minp is None:
minp = window
if minp > window:
raise ValueError("minp must be <= window")
out = np.empty_like(a, dtype=np.float_)
cumsum_arr = np.zeros_like(a)
cumsum = 0
cumsum_sq_arr = np.zeros_like(a)
cumsum_sq = 0
nancnt_arr = np.zeros_like(a)
nancnt = 0
for i in range(a.shape[0]):
if np.isnan(a[i]):
nancnt = nancnt + 1
else:
cumsum = cumsum + a[i]
cumsum_sq = cumsum_sq + a[i] ** 2
nancnt_arr[i] = nancnt
cumsum_arr[i] = cumsum
cumsum_sq_arr[i] = cumsum_sq
if i < window:
window_len = i + 1 - nancnt
window_cumsum = cumsum
window_cumsum_sq = cumsum_sq
else:
window_len = window - (nancnt - nancnt_arr[i - window])
window_cumsum = cumsum - cumsum_arr[i - window]
window_cumsum_sq = cumsum_sq - cumsum_sq_arr[i - window]
if window_len < minp or window_len == ddof:
out[i] = np.nan
else:
mean = window_cumsum / window_len
out[i] = np.sqrt(np.abs(window_cumsum_sq - 2 * window_cumsum *
mean + window_len * mean ** 2) / (window_len - ddof))
return out
@njit(cache=True)
def rolling_std_nb(a: tp.Array2d, window: int, minp: tp.Optional[int] = None, ddof: int = 0) -> tp.Array2d:
"""2-dim version of `rolling_std_1d_nb`."""
out = np.empty_like(a, dtype=np.float_)
for col in range(a.shape[1]):
out[:, col] = rolling_std_1d_nb(a[:, col], window, minp=minp, ddof=ddof)
return out
@njit(cache=True)
def ewm_mean_1d_nb(a: tp.Array1d, span: int, minp: int = 0, adjust: bool = False) -> tp.Array1d:
"""Return exponential weighted average.
Numba equivalent to `pd.Series(a).ewm(span=span, min_periods=minp, adjust=adjust).mean()`.
Adaptation of `pd._libs.window.aggregations.window_aggregations.ewma` with default arguments."""
if minp is None:
minp = span
if minp > span:
raise ValueError("minp must be <= span")
N = len(a)
out = np.empty(N, dtype=np.float_)
if N == 0:
return out
com = (span - 1) / 2.0
alpha = 1. / (1. + com)
old_wt_factor = 1. - alpha
new_wt = 1. if adjust else alpha
weighted_avg = a[0]
is_observation = (weighted_avg == weighted_avg)
nobs = int(is_observation)
out[0] = weighted_avg if (nobs >= minp) else np.nan
old_wt = 1.
for i in range(1, N):
cur = a[i]
is_observation = (cur == cur)
nobs += is_observation
if weighted_avg == weighted_avg:
old_wt *= old_wt_factor
if is_observation:
# avoid numerical errors on constant series
if weighted_avg != cur:
weighted_avg = ((old_wt * weighted_avg) + (new_wt * cur)) / (old_wt + new_wt)
if adjust:
old_wt += new_wt
else:
old_wt = 1.
elif is_observation:
weighted_avg = cur
out[i] = weighted_avg if (nobs >= minp) else np.nan
return out
@njit(cache=True)
def ewm_mean_nb(a: tp.Array2d, span: int, minp: int = 0, adjust: bool = False) -> tp.Array2d:
"""2-dim version of `ewm_mean_1d_nb`."""
out = np.empty_like(a, dtype=np.float_)
for col in range(a.shape[1]):
out[:, col] = ewm_mean_1d_nb(a[:, col], span, minp=minp, adjust=adjust)
return out
@njit(cache=True)
def ewm_std_1d_nb(a: tp.Array1d, span: int, minp: int = 0, adjust: bool = False, ddof: int = 0) -> tp.Array1d:
"""Return exponential weighted standard deviation.
Numba equivalent to `pd.Series(a).ewm(span=span, min_periods=minp).std(ddof=ddof)`.
Adaptation of `pd._libs.window.aggregations.window_aggregations.ewmcov` with default arguments."""
if minp is None:
minp = span
if minp > span:
raise ValueError("minp must be <= span")
N = len(a)
out = np.empty(N, dtype=np.float_)
if N == 0:
return out
com = (span - 1) / 2.0
alpha = 1. / (1. + com)
old_wt_factor = 1. - alpha
new_wt = 1. if adjust else alpha
mean_x = a[0]
mean_y = a[0]
is_observation = ((mean_x == mean_x) and (mean_y == mean_y))
nobs = int(is_observation)
if not is_observation:
mean_x = np.nan
mean_y = np.nan
out[0] = np.nan
cov = 0.
sum_wt = 1.
sum_wt2 = 1.
old_wt = 1.
for i in range(1, N):
cur_x = a[i]
cur_y = a[i]
is_observation = ((cur_x == cur_x) and (cur_y == cur_y))
nobs += is_observation
if mean_x == mean_x:
sum_wt *= old_wt_factor
sum_wt2 *= (old_wt_factor * old_wt_factor)
old_wt *= old_wt_factor
if is_observation:
old_mean_x = mean_x
old_mean_y = mean_y
# avoid numerical errors on constant series
if mean_x != cur_x:
mean_x = ((old_wt * old_mean_x) +
(new_wt * cur_x)) / (old_wt + new_wt)
# avoid numerical errors on constant series
if mean_y != cur_y:
mean_y = ((old_wt * old_mean_y) +
(new_wt * cur_y)) / (old_wt + new_wt)
cov = ((old_wt * (cov + ((old_mean_x - mean_x) *
(old_mean_y - mean_y)))) +
(new_wt * ((cur_x - mean_x) *
(cur_y - mean_y)))) / (old_wt + new_wt)
sum_wt += new_wt
sum_wt2 += (new_wt * new_wt)
old_wt += new_wt
if not adjust:
sum_wt /= old_wt
sum_wt2 /= (old_wt * old_wt)
old_wt = 1.
elif is_observation:
mean_x = cur_x
mean_y = cur_y
if nobs >= minp:
numerator = sum_wt * sum_wt
denominator = numerator - sum_wt2
if denominator > 0.:
out[i] = ((numerator / denominator) * cov)
else:
out[i] = np.nan
else:
out[i] = np.nan
return np.sqrt(out)
@njit(cache=True)
def ewm_std_nb(a: tp.Array2d, span: int, minp: int = 0, adjust: bool = False, ddof: int = 0) -> tp.Array2d:
"""2-dim version of `ewm_std_1d_nb`."""
out = np.empty_like(a, dtype=np.float_)
for col in range(a.shape[1]):
out[:, col] = ewm_std_1d_nb(a[:, col], span, minp=minp, adjust=adjust, ddof=ddof)
return out
# ############# Expanding functions ############# #
@njit(cache=True)
def expanding_min_1d_nb(a: tp.Array1d, minp: int = 1) -> tp.Array1d:
"""Return expanding min.
Numba equivalent to `pd.Series(a).expanding(min_periods=minp).min()`."""
out = np.empty_like(a, dtype=np.float_)
minv = a[0]
cnt = 0
for i in range(a.shape[0]):
if np.isnan(minv) or a[i] < minv:
minv = a[i]
if not np.isnan(a[i]):
cnt += 1
if cnt < minp:
out[i] = np.nan
else:
out[i] = minv
return out
@njit(cache=True)
def expanding_min_nb(a: tp.Array2d, minp: int = 1) -> tp.Array2d:
"""2-dim version of `expanding_min_1d_nb`."""
out = np.empty_like(a, dtype=np.float_)
for col in range(a.shape[1]):
out[:, col] = expanding_min_1d_nb(a[:, col], minp=minp)
return out
@njit(cache=True)
def expanding_max_1d_nb(a: tp.Array1d, minp: int = 1) -> tp.Array1d:
"""Return expanding max.
Numba equivalent to `pd.Series(a).expanding(min_periods=minp).max()`."""
out = np.empty_like(a, dtype=np.float_)
maxv = a[0]
cnt = 0
for i in range(a.shape[0]):
if np.isnan(maxv) or a[i] > maxv:
maxv = a[i]
if not np.isnan(a[i]):
cnt += 1
if cnt < minp:
out[i] = np.nan
else:
out[i] = maxv
return out
@njit(cache=True)
def expanding_max_nb(a: tp.Array2d, minp: int = 1) -> tp.Array2d:
"""2-dim version of `expanding_max_1d_nb`."""
out =
|
np.empty_like(a, dtype=np.float_)
|
numpy.empty_like
|
import cv2
import numpy as np
from googleapiclient.discovery import build
from google.oauth2.credentials import Credentials
from google.oauth2 import service_account
# Google sheets link : https://docs.google.com/spreadsheets/d/1kLxCUNyn39Q1-B3KkvbxDz2Vc4NFZVxhb0AwKGzeZbI/edit#gid=0
# Retrieves colour from spreadsheet (edit SAMPLE_SPREADSHEET_ID based on spreadsheet URL)
def getSpreadsheetColour(SERVICE_ACCOUNT_FILE = 'key.json', SCOPES = ['https://www.googleapis.com/auth/spreadsheets'], SAMPLE_SPREADSHEET_ID = '1kLxCUNyn39Q1-B3KkvbxDz2Vc4NFZVxhb0AwKGzeZbI') :
creds = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES)
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID, range="sheet1!A1:J10").execute()
values = result.get('values', [])
return values[0][0] # Colour retrieved from cell A1
# Processes colour from string to numpy.ndarray format containing RGB limits (to be fed to cv2.inRange)
def getColorCode(col1) :
if col1 == 'red' :
lower = np.uint8([[[0,115,145]]])
upper = np.uint8([[[9,255,255]]])
elif col1 == 'green' :
lower = np.uint8([[[35,75,115]]])
upper =
|
np.uint8([[[55,255,255]]])
|
numpy.uint8
|
import cPickle
import os
import sys
import scipy as sc
import operator
import numpy as np
import pandas as pd
from scipy import sparse
import xgboost as xgb
from sklearn import model_selection, preprocessing, ensemble
from sklearn.metrics import log_loss
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from bs4 import BeautifulSoup
from sklearn.metrics import mean_squared_error
#reload(sys)
#sys.setdefaultencoding('utf8')
#r = re.compile(r"\s")
from sklearn.preprocessing import LabelEncoder
import re
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
import nltk
from scipy.stats import boxcox
from sklearn.decomposition import TruncatedSVD
import datetime as dt
from nltk.stem.porter import *
import gc
import math
from collections import Counter
nfold = 5
with open("../pickle03.pkl", "rb") as f:
(train_df,test_df,train_y,features_to_use,features_to_use_ln,ntrain,test_df_listing_id) = cPickle.load( f)
train_test = pd.concat((train_df, test_df), axis=0).reset_index(drop=True)
###############Model Build and Predict
param = {}
param['objective'] = 'multi:softprob'
param['eta'] = 0.01
param['max_depth'] = 6
param['silent'] = 1
param['num_class'] = 3
param['eval_metric'] = "mlogloss"
param['min_child_weight'] = 1
param['subsample'] = .75
param['colsample_bytree'] = .8
param['seed'] = 12345
### Ftrs+Desc Ftrs+Ftr Count Vec
features_to_use_ln=[
'listing_id',
##LOG Price variant
'lg_price','per_bed_price','per_bath_price','per_bed_price_dev','per_bath_price_dev',
u'building_id', u'display_address', u'manager_id', u'street_address','bed_bath','street', 'avenue', 'east', 'west', 'north','south', 'other_address', 'bathrooms_cat', 'bedroom_cat','lat_cat','lon_cat', #'lat_cat_rnd','lon_cat_rnd'#,
'per_bed_bath_price','bedPerBath','bedBathDiff','bedBathSum','bedsPerc','per_bed_price_rat','per_bath_price_rat','manager_id_interest_level_high0','building_id_interest_level_high0','manager_id_interest_level_medium0','building_id_interest_level_medium0'
]
cv_scores = []
bow = CountVectorizer(stop_words='english', max_features=200, ngram_range=(1,1),min_df=2, max_df=.85)
bow.fit(train_test["features_2"])
oob_valpred = np.zeros((train_df.shape[0],3))
oob_tstpred = np.zeros((test_df.shape[0],3))
i=0
kf = model_selection.KFold(n_splits=nfold, shuffle=True, random_state=12345)
for dev_index, val_index in kf.split(range(train_y.shape[0])):
dev_X, val_X = train_df.iloc[dev_index,:], train_df.iloc[val_index,:]
dev_y, val_y = train_y[dev_index], train_y[val_index]
#tr_sparse_2 = bow.transform(dev_X["features_2"])
#val_sparse_2 = bow.transform(val_X["features_2"])
#te_sparse_2 = bow.transform(test_df["features_2"])
#train_X2 = sparse.hstack([dev_X[features_to_use_ln],tr_sparse_2]).tocsr()#,tr_sparse_d
#val_X2 = sparse.hstack([val_X[features_to_use_ln],val_sparse_2]).tocsr()#,val_sparse_d
#test_X2 = sparse.hstack([test_df[features_to_use_ln], te_sparse_2]).tocsr()
train_X2 = dev_X[features_to_use_ln].values
val_X2 = val_X[features_to_use_ln].values
test_X2 = test_df[features_to_use_ln].values
print(train_X2.shape)
num_rounds =10000
plst = list(param.items())
xgtrain = xgb.DMatrix(train_X2, label=dev_y)
xgval = xgb.DMatrix(val_X2, label=val_y)
xgtest = xgb.DMatrix(test_X2)
watchlist = [ (xgtrain,'train'), (xgval, 'val') ]
model = xgb.train(plst, xgtrain, num_rounds, watchlist, early_stopping_rounds=50)
best_iteration = model.best_iteration+1
model = xgb.train(plst, xgtrain, best_iteration, watchlist, early_stopping_rounds=50)
preds = model.predict(xgval)
oob_valpred[val_index,...] = preds
cv_scores.append(log_loss(val_y, preds))
print(cv_scores)
print(np.mean(cv_scores))
print(np.std(cv_scores))
predtst = model.predict(xgtest)
oob_tstpred += predtst
oob_tstpred /= nfold
out_df = pd.DataFrame(oob_tstpred)
out_df.columns = ["high", "medium", "low"]
out_df["listing_id"] = test_df_listing_id
out_df.to_csv("../xgb_lblenc_lgprice_fewFTR.csv", index=False)
with open("../xgb_lblenc_lgprice_fewFTR.pkl", "wb") as f:
cPickle.dump((oob_tstpred,oob_valpred), f, -1)
### Ftrs+Desc Ftrs+Ftr Count Vec
cv_scores = []
bow = CountVectorizer(stop_words='english', ngram_range=(1,1),min_df=2, max_df=.85)
bow.fit(train_test["features_2"])
bow2 = TfidfVectorizer(stop_words='english', ngram_range=(1,3),min_df=3, max_df=.75,analyzer='word',token_pattern=r'\w{1,}',use_idf=1,smooth_idf=1,sublinear_tf=1)#max_features=100,
bow2.fit(train_test["description"])
oob_valpred = np.zeros((train_df.shape[0],3))
oob_tstpred = np.zeros((test_df.shape[0],3))
i=0
kf = model_selection.KFold(n_splits=nfold, shuffle=True, random_state=12345)
for dev_index, val_index in kf.split(range(train_y.shape[0])):
dev_X, val_X = train_df.iloc[dev_index,:], train_df.iloc[val_index,:]
dev_y, val_y = train_y[dev_index], train_y[val_index]
tr_sparse_2 = bow.transform(dev_X["features_2"])
val_sparse_2 = bow.transform(val_X["features_2"])
te_sparse_2 = bow.transform(test_df["features_2"])
tr_sparse_3 = bow2.transform(dev_X["description"])
val_sparse_3 = bow2.transform(val_X["description"])
te_sparse_3 = bow2.transform(test_df["description"])
train_X2 = sparse.hstack([tr_sparse_2, tr_sparse_3]).tocsr()#,tr_sparse_d
val_X2 = sparse.hstack([val_sparse_2, val_sparse_3]).tocsr()#,val_sparse_d
test_X2 = sparse.hstack([te_sparse_2,te_sparse_3]).tocsr()
# train_X2 = dev_X[features_to_use_ln]
# val_X2 = val_X[features_to_use_ln]
print(train_X2.shape)
num_rounds =10000
plst = list(param.items())
xgtrain = xgb.DMatrix(train_X2, label=dev_y)
xgval = xgb.DMatrix(val_X2, label=val_y)
xgtest = xgb.DMatrix(test_X2)
watchlist = [ (xgtrain,'train'), (xgval, 'val') ]
model = xgb.train(plst, xgtrain, num_rounds, watchlist, early_stopping_rounds=50)
best_iteration = model.best_iteration+1
model = xgb.train(plst, xgtrain, best_iteration, watchlist, early_stopping_rounds=50)
preds = model.predict(xgval)
oob_valpred[val_index,...] = preds
cv_scores.append(log_loss(val_y, preds))
print(cv_scores)
print(np.mean(cv_scores))
print(
|
np.std(cv_scores)
|
numpy.std
|
"""trt_yolo.py
This script demonstrates how to do real-time object detection with
TensorRT optimized YOLO engine.
"""
import os
import time
import argparse
import cv2
import numpy as np
import pycuda.autoinit # This is needed for initializing CUDA driver
#import local classes and their functions
from utils.yolo_classes import get_cls_dict
from utils.camera import add_camera_args, Camera
from utils.display import open_window, set_display, show_fps
from utils.visualization import BBoxVisualization
from utils.yolo_with_plugins import TrtYOLO
import matplotlib.pyplot as plt
from deep_sort.application_util import preprocessing
from deep_sort.deep_sort import nn_matching
from deep_sort.deep_sort.detection import Detection
from deep_sort.deep_sort.tracker import Tracker
from deep_sort.tools import generate_detections as gdet
WINDOW_NAME = 'TrtYOLODemo'
class_names = [c.strip() for c in open('./deep_sort/labels/coco.names').readlines()]
np.random.seed(100)
COLORS = np.random.randint(0, 255, size=(200, 3),
dtype="uint8")
def parse_args():
"""Parse input arguments."""
desc = ('Capture and display live camera video, while doing '
'real-time object detection with TensorRT optimized '
'YOLO model on Jetson')
parser = argparse.ArgumentParser(description=desc)
parser = add_camera_args(parser)
parser.add_argument(
'-c', '--category_num', type=int, default=80,
help='number of object categories [80]')
parser.add_argument(
'-m', '--model', type=str, required=True,
help=('[yolov3-tiny|yolov3|yolov3-spp|yolov4-tiny|yolov4|'
'yolov4-csp|yolov4x-mish]-[{dimension}], where '
'{dimension} could be either a single number (e.g. '
'288, 416, 608) or 2 numbers, WxH (e.g. 416x256)'))
parser.add_argument(
'-l', '--letter_box', action='store_true',
help='inference with letterboxed image [False]')
args = parser.parse_args()
return args
def convert_tlbr_tlwh(bboxes):
list_of_boxes = []
test_array = np.zeros(4)
for box in bboxes:
box_list = []
x, y, x2, y2 = box[0], box[1], box[2], box[3]
w = x2 - x
h = y2 - y
box_list.append(x)
box_list.append(y)
box_list.append(w)
box_list.append(h)
if not np.array_equal(box, [0,0,0,0]):
list_of_boxes.append(box_list)
return list_of_boxes
def loop_and_detect(cam, encoder, tracker, trt_yolo, conf_th, vis):
"""Continuously capture images from camera and do object detection.
# Arguments
cam: the camera instance (video source).
trt_yolo: the TRT YOLO object detector instance.
conf_th: confidence/score threshold for object detection.
vis: for visualization.
"""
counter = []
#full_screen is set to false by default
full_scrn = False
#fps is set at 0 by default
fps = 0.0
#create time variable for measuring the frames per second in real time
tic = time.time()
#while loop to perform inference
while True:
t1 = time.time()
#determine if window is closed or not ????
#break the loop if window is closed
if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
break
#create img object from a reading of the camera frame
img = cam.read()
#break loop if the camera frame is none
if img is None:
break
#create bounding box coordinate, detection confidence, and class id from the detect function of the trt_yolo object.
boxes, confs, clss = trt_yolo.detect(img, conf_th)
classes = clss
names = []
##for i in range(len(classes)):
# names.append(class_names[int(classes[i])])
xywh_boxes = convert_tlbr_tlwh(boxes)
features = encoder(img, xywh_boxes)
detections = [Detection(bbox, confs, d_clss, feature) for bbox, confs, d_clss, feature in zip(xywh_boxes, confs, clss, features)]
# Run non-maxima suppression.
boxs = np.array([d.tlwh for d in detections])
scores =
|
np.array([d.confidence for d in detections])
|
numpy.array
|
# usr/bin/env python3
# Run as PYTHONPATH=. python3 examples/atlas_hzz.py
# In case you use CUDA, you may have to find the libnvvm.so on your system manually
import os, time, glob, argparse, multiprocessing
import numba
import sys
import numpy as np
import uproot
import hepaccelerate
import hepaccelerate.kernels as kernels
from hepaccelerate.utils import Results, Dataset, Histogram, choose_backend
import hepaccelerate.backend_cpu as backend_cpu
import matplotlib
import infofile
import json
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import copy
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from scipy.stats import wasserstein_distance
from plot_utils import plot_hist_ratio
ha = None
lumi =10.0
# define our analysis function
def analyze_data_function(data, parameters):
ret = Results()
ha = parameters["ha"]
num_events = data["num_events"]
lep = data["Lep"]
lep.hepaccelerate_backend = ha
lep.attrs_data["pt"] = lep.lep_pt
lep.attrs_data["eta"] = lep.lep_eta
lep.attrs_data["phi"] = lep.lep_phi
lep.attrs_data["charge"] = lep.lep_charge
lep.attrs_data["type"] = lep.lep_type
lep_mass = np.zeros_like(lep["pt"],dtype=nplib.float32)
lep_mass = np.where(lep["type"]==11,0.511,lep_mass)
lep_mass = np.where(lep["type"]==13,105.65837,lep_mass)
lep.attrs_data["mass"] = lep_mass
mask_events = nplib.ones(lep.numevents(), dtype=nplib.bool)
lep_ele = lep["type"] == 11
lep_muon = lep["type"] == 13
ele_Iso = np.logical_and(lep_ele,np.logical_and( lep.lep_ptcone30/lep.pt < 0.15 , lep.lep_etcone20/lep.pt < 0.20))
muon_Iso = np.logical_and(lep_muon ,np.logical_and( lep.lep_ptcone30/lep.pt < 0.15 ,lep.lep_etcone20/lep.pt < 0.30))
pass_iso = np.logical_or(ele_Iso, muon_Iso)
lep.attrs_data["pass_iso"] = pass_iso
num_lep_event = kernels.sum_in_offsets(
backend,
lep.offsets,
lep.masks["all"],
mask_events,
lep.masks["all"],
nplib.int8,
)
mask_events_4lep = num_lep_event == 4
lep_attrs = [ "pt", "eta", "phi", "charge","type","mass", "pass_iso"]#, "ptcone30", "etcone20"]
lep0 = lep.select_nth(0, mask_events_4lep, lep.masks["all"], attributes=lep_attrs)
lep1 = lep.select_nth(1, mask_events_4lep, lep.masks["all"], attributes=lep_attrs)
lep2 = lep.select_nth(2, mask_events_4lep, lep.masks["all"], attributes=lep_attrs)
lep3 = lep.select_nth(3, mask_events_4lep, lep.masks["all"], attributes=lep_attrs)
mask_event_sumchg_zero = (lep0["charge"]+lep1["charge"]+lep2["charge"]+lep3["charge"] == 0)
sum_lep_type = lep0["type"]+lep1["type"]+lep2["type"]+lep3["type"]
all_pass_iso = (lep0["pass_iso"] & lep1["pass_iso"] & lep2["pass_iso"] & lep3["pass_iso"])
mask_event_sum_lep_type = np.logical_or((sum_lep_type == 44),np.logical_or((sum_lep_type == 48),(sum_lep_type == 52) ) )
mask_events = mask_events & mask_event_sumchg_zero & mask_events_4lep & mask_event_sum_lep_type & all_pass_iso
mask_lep1_passing_pt = lep1["pt"] > parameters["leading_lep_ptcut"]
mask_lep2_passing_pt = lep2["pt"] > parameters["lep_ptcut"]
mask_events = mask_events & mask_lep1_passing_pt & mask_lep2_passing_pt
l0 = to_cartesian(lep0)
l1 = to_cartesian(lep1)
l2 = to_cartesian(lep2)
l3 = to_cartesian(lep3)
llll = {k: l0[k] + l1[k] + l2[k] + l3[k] for k in ["px", "py", "pz", "e"]}
llll_sph = to_spherical(llll)
llll_sph["mass"] = llll_sph["mass"]/1000. # Convert to GeV
#import pdb;pdb.set_trace();
# compute a weighted histogram
weights = nplib.ones(num_events, dtype=nplib.float32)
## Add xsec weights based on sample name
if parameters["is_mc"]:
weights = data['eventvars']['mcWeight']*data['eventvars']['scaleFactor_PILEUP']*data['eventvars']['scaleFactor_ELE']*data['eventvars']['scaleFactor_MUON']*data['eventvars']['scaleFactor_LepTRIGGER']
info = infofile.infos[parameters["sample"]]
weights *= (lumi*1000*info["xsec"])/(info["sumw"]*info["red_eff"])
bins = nplib.linspace(110, 150, 11, dtype=nplib.float32)
hist_m4lep= Histogram(
*kernels.histogram_from_vector(
backend,
llll_sph["mass"][mask_events],
weights[mask_events],
bins,
)
)
# save it to the output
ret["hist_m4lep"] = hist_m4lep
return ret
def to_cartesian(arrs):
pt = arrs["pt"]
eta = arrs["eta"]
phi = arrs["phi"]
mass = arrs["mass"]
px, py, pz, e = backend.spherical_to_cartesian(pt, eta, phi, mass)
return {"px": px, "py": py, "pz": pz, "e": e}
def rapidity(e, pz):
return 0.5*
|
np.log((e + pz) / (e - pz))
|
numpy.log
|
"""
A collection of tests for svdunfold
"""
import pytest
import numpy as np
import svdunfold
import helpers
def test_exception_for_rows_in_response_matrix():
"""Test if exception is thrown when number of bins in b is not the same
as the rows in the response matrix
"""
x_ini = np.histogram(np.zeros(10), bins=5)
b = np.histogram(np.zeros(10), bins=5)
A = np.zeros((4, 5))
cov = np.zeros((5, 5))
with pytest.raises(AssertionError, match=r".*Wrong dimensions.*"):
svdunfold.SVDunfold(x_ini, b, A, cov)
def test_exception_for_columns_in_response_matrix():
"""Test if exception is thrown when number of bins in x_ini is not the same
as the columns in the response matrix
"""
x_ini = np.histogram(np.zeros(10), bins=5)
b = np.histogram(np.zeros(10), bins=5)
A = np.zeros((5, 4))
cov = np.zeros((5, 5))
with pytest.raises(AssertionError, match=r".*Wrong dimensions.*"):
svdunfold.SVDunfold(x_ini, b, A, cov)
def test_exception_for_symmetric_covariance():
"""Test if exception is thrown when the covariance matrix is not symmetric"""
x_ini = np.histogram(np.zeros(10), bins=5)
b = np.histogram(np.zeros(10), bins=5)
A = np.zeros((5, 5))
cov = np.zeros((5, 5))
cov[0, 1] = 1
with pytest.raises(AssertionError, match=r".*is not symmetric.*"):
svdunfold.SVDunfold(x_ini, b, A, cov)
def test_contruct_c_matrix_3d():
"""Test if 3d second derivative matrix is constructed correctly"""
c_matrix = np.array([[-1, 1, 0], [1, -2, 1], [0, 1, -1]])
assert np.array_equal(c_matrix, helpers.calc_second_deriv_matrix(3, 0))
def test_contruct_c_matrix_7d():
"""Test if 7d second derivative matrix is constructed correctly"""
c_matrix = np.array([[-1, 1, 0, 0, 0, 0, 0],
[1, -2, 1, 0, 0, 0, 0],
[0, 1, -2, 1, 0, 0, 0],
[0, 0, 1, -2, 1, 0, 0],
[0, 0, 0, 1, -2, 1, 0],
[0, 0, 0, 0, 1, -2, 1],
[0, 0, 0, 0, 0, 1, -1]])
assert np.array_equal(
c_matrix, helpers.calc_second_deriv_matrix(7, 0))
def test_contruct_c_matrix_7d_with_xi():
"""Test if 7d second derivative matrix with x=0.1 is constructed correctly"""
c_matrix = np.array([[-0.9, 1, 0, 0, 0, 0, 0],
[1, -1.9, 1, 0, 0, 0, 0],
[0, 1, -1.9, 1, 0, 0, 0],
[0, 0, 1, -1.9, 1, 0, 0],
[0, 0, 0, 1, -1.9, 1, 0],
[0, 0, 0, 0, 1, -1.9, 1],
[0, 0, 0, 0, 0, 1, -0.9]])
assert np.array_equal(
c_matrix, helpers.calc_second_deriv_matrix(7, 0.1))
def test_svd_on_covariance_matrix():
"""Test svd on covariance matrix"""
x_ini = np.histogram(np.zeros(10), bins=5)
b = np.histogram(np.zeros(10), bins=3)
A = np.zeros((3, 5))
cov = np.array([[4, 0, 0], [0, 25, 0], [0, 0, 16]])
unfold = svdunfold.SVDunfold(x_ini, b, A, cov)
Q_test = np.array([[0., 0., 1.], [1., 0., 0.], [0., 1., 0.]])
r_test = np.array([5., 4., 2.])
QT_test = np.array([[0., 1., 0.], [0., 0., 1.], [1., 0., 0.]])
Q, r, QT = unfold._perform_svd_on_covariance()
assert np.array_equal(Q, Q_test)
assert np.array_equal(r, r_test)
assert np.array_equal(QT, QT_test)
def test_transformed_b_measured_dimension():
"""Test if the dimensions of the transformed measured array are correct"""
x_ini = np.histogram(np.zeros(10), bins=5)
b = np.histogram(np.zeros(10), bins=4)
A = np.zeros((4, 5))
cov = np.zeros((4, 4))
Q = np.zeros((4, 4))
r = np.ones(4)
unfold = svdunfold.SVDunfold(x_ini, b, A, cov)
b_tilde = unfold._transform_b_measured(Q, r)
assert np.size(b_tilde) == 4
def test_transformed_measured_distribution():
"""Test if the transformed measured distribution is correct"""
x_ini = np.histogram(np.zeros(10), bins=3)
b = np.histogram([6, 7, 8, 9, 10], bins=3)
A = np.zeros((3, 3))
cov = cov = np.array([[4, 0, 0], [0, 25, 0], [0, 0, 16]])
Q = np.array([[0., 0., 1.], [1., 0., 0.], [0., 1., 0.]])
r = np.array([5., 4., 2.])
unfold = svdunfold.SVDunfold(x_ini, b, A, cov)
b_tilde = unfold._transform_b_measured(Q, r)
assert np.array_equal(np.array([2. / 5., 2. / 4., 1. / 2.]), b_tilde)
def test_transformed_response_correct_dimensions():
"""Test if the dimensions of the transposed response matrix are correct"""
x_ini = np.histogram(np.zeros(10), bins=3)
b = np.histogram(np.zeros(10), bins=7)
A = np.zeros((7, 3))
cov = np.eye(7)
Q = np.zeros((7, 7))
r = np.ones(7)
unfold = svdunfold.SVDunfold(x_ini, b, A, cov)
A_tilde = unfold._transform_response_matrix(Q, r)
assert A_tilde.shape == (7, 3)
def test_transformed_response_correct_values():
"""Test if the transformed response matrix is correct"""
x_ini = np.histogram(np.zeros(10), bins=3)
b = np.histogram(np.zeros(10), bins=3)
A = np.histogram2d([1, 2, 3, 4, 5], [6, 7, 8, 9, 10], bins=3)[0]
cov = np.array([[4, 0, 0], [0, 25, 0], [0, 0, 16]])
Q = np.array([[0., 0., 1.], [1., 0., 0.], [0., 1., 0.]])
r =
|
np.array([5., 4., 2.])
|
numpy.array
|
# Program by <NAME>
# www.github.com/agupta231
# Feb 2017
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import numpy
import svgwrite
from config import Config
class Cut:
def __init__(self, iteration, cut_type):
self.iteration = iteration
self.length = Config.initial_cube_size * Config.iteration_multiplier ** (iteration - 1)
self.type = cut_type
self.id = numpy.random.randint(0, 999999999)
self.__generate_tabs()
def generate_bounding_box(self, drawing, starting_pos, shape_id):
dwg = drawing.g(id=shape_id, style="font-size: 0.5")
dwg.add(drawing.rect(
insert=tuple(starting_pos),
size=(str(self.length), str(self.length)),
stroke_width=Config.stroke_thickness,
stroke=Config.cube_color,
fill="none"
))
dwg.add(drawing.text(
str(shape_id),
insert=tuple(starting_pos),
))
return dwg
def generate_cut(self, drawing, starting_pos):
self.drawing = drawing
if self.type == "a":
return self.__gen_cut_a(starting_pos)
elif self.type == "b":
return self.__gen_cut_b(starting_pos)
elif self.type == "c":
return self.__gen_cut_c(starting_pos)
elif self.type == "a90":
return self.__gen_cut_a90(starting_pos)
elif self.type == "b90":
return self.__gen_cut_b90(starting_pos)
elif self.type == "c90":
return self.__gen_cut_c90(starting_pos)
else:
return None
def __generate_tabs(self):
if math.floor(self.length) >= 3:
self.tab_count = math.floor(self.length)
if self.tab_count % 2 != 1:
self.tab_count -= 1
else:
self.tab_count = 3
self.tab_count = int(self.tab_count)
self.tab_width = self.length / self.tab_count
def __gen_cut_a(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = starting_pos + numpy.array([self.tab_width, Config.material_thickness])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Left Edge
last_pos = starting_pos + numpy.array([Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Bottom Edge
last_pos = starting_pos + numpy.array([self.tab_width, self.length - Config.material_thickness])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Right Edge
last_pos = starting_pos + numpy.array([self.length - Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Top left corner
last_pos = starting_pos + numpy.array([Config.material_thickness, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, -(self.tab_width - Config.material_thickness)])))
last_pos += numpy.array([0, -(self.tab_width - Config.material_thickness)])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Top right corner
last_pos = starting_pos + numpy.array([self.length - self.tab_width, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
last_pos += numpy.array([self.tab_width - Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width - Config.material_thickness])))
# Bottom left corner
last_pos = starting_pos + numpy.array([self.tab_width, self.length - Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([-(self.tab_width - Config.material_thickness), 0])))
last_pos += numpy.array([-(self.tab_width - Config.material_thickness), 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -(self.tab_width - Config.material_thickness)])))
# Bottom right corner
last_pos = starting_pos + numpy.array([self.length - Config.material_thickness, self.length - self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width - Config.material_thickness])))
last_pos += numpy.array([0, self.tab_width - Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([-(self.tab_width - Config.material_thickness), 0])))
return shape
def __gen_cut_b(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = list(starting_pos)
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
# Left Edge
last_pos = list(starting_pos)
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
else:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
# Bottom Edge
last_pos = list(starting_pos) + numpy.array([0, self.length])
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
# Right Edge
last_pos = list(starting_pos) + numpy.array([self.length, 0])
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
else:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
return shape
def __gen_cut_c(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = list(starting_pos) + numpy.array([self.tab_width, 0])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Bottom Edge
last_pos = list(starting_pos) + numpy.array([self.tab_width, self.length])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Left Edge
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Right Edge
last_pos = list(starting_pos) + numpy.array([self.length - Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Top left corner
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, -self.tab_width])))
last_pos += numpy.array([0, -self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Top right corner
last_pos = list(starting_pos) + numpy.array([self.length - self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
last_pos += numpy.array([self.tab_width - Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
# Bottom left corner
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.length - self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Bottom right corner
last_pos = list(starting_pos) + numpy.array([self.length - Config.material_thickness, self.length - self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-(self.tab_width - Config.material_thickness), 0])))
return shape
def __gen_cut_a90(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = starting_pos + numpy.array([self.tab_width, Config.material_thickness])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Left Edge
last_pos = starting_pos + numpy.array([Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Bottom Edge
last_pos = starting_pos + numpy.array([self.tab_width, self.length - Config.material_thickness])
for i in xrange(int(math.floor((self.tab_count - 2) / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Right Edge
last_pos = starting_pos + numpy.array([self.length - Config.material_thickness, self.tab_width])
for i in xrange(int(math.floor((self.tab_count - 2) / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Top left corner
last_pos = starting_pos + numpy.array([Config.material_thickness, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, -(self.tab_width - Config.material_thickness)])))
last_pos += numpy.array([0, -(self.tab_width - Config.material_thickness)])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Top right corner
last_pos = starting_pos + numpy.array([self.length - self.tab_width, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
last_pos += numpy.array([self.tab_width - Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width - Config.material_thickness])))
# Bottom left corner
last_pos = starting_pos + numpy.array([self.tab_width, self.length - Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([-(self.tab_width - Config.material_thickness), 0])))
last_pos += numpy.array([-(self.tab_width - Config.material_thickness), 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -(self.tab_width - Config.material_thickness)])))
# Bottom right cutout
last_pos = starting_pos + numpy.array([self.length - Config.material_thickness, (self.length - self.tab_width) / 2])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width / 2])))
last_pos += numpy.array([0, self.tab_width / 2])
shape.add(self.__gen_line(last_pos, numpy.array([-self.length / 2, 0])))
last_pos += numpy.array([-self.length / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.length / 2])))
last_pos += numpy.array([0, self.length / 2])
shape.add(self.__gen_line(last_pos, numpy.array([-self.tab_width / 2, 0])))
last_pos += numpy.array([-self.tab_width / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
return shape
def __gen_cut_b90(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = list(starting_pos)
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
# Left Edge
last_pos = list(starting_pos)
for i in xrange(int(math.floor(self.tab_count / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
else:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
# Bottom Edge
last_pos = list(starting_pos) + numpy.array([self.length, self.length])
for i in xrange(int(math.floor(self.tab_count / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-self.tab_width, 0])))
last_pos += numpy.array([-self.tab_width, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([-self.tab_width, 0])))
last_pos += numpy.array([-self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
# Right Edge
last_pos = list(starting_pos) + numpy.array([self.length, 0])
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
else:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
# Bottom Left cutout
last_pos = list(starting_pos) + numpy.array([0, (self.length - self.tab_width) / 2])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width / 2])))
last_pos += numpy.array([0, self.tab_width / 2])
shape.add(self.__gen_line(last_pos, numpy.array([self.length / 2 - Config.material_thickness, 0])))
last_pos += numpy.array([self.length / 2 - Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.length / 2 - Config.material_thickness])))
last_pos += numpy.array([0, self.length / 2 - Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width / 2, 0])))
last_pos += numpy.array([self.tab_width / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
return shape
def __gen_cut_c90(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = list(starting_pos) + numpy.array([self.tab_width, 0])
for i in xrange(int(math.floor((self.tab_count - 2) / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Bottom Edge
last_pos = list(starting_pos) + numpy.array([self.tab_width, self.length])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Left Edge
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Right Edge
last_pos = list(starting_pos) + numpy.array([self.length - Config.material_thickness, self.length - self.tab_width])
for i in xrange(int(math.floor((self.tab_count - 2) / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -self.tab_width])))
last_pos += numpy.array([0, -self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, -self.tab_width])))
last_pos += numpy.array([0, -self.tab_width])
# Top left corner
last_pos = list(starting_pos) +
|
numpy.array([Config.material_thickness, self.tab_width])
|
numpy.array
|
"""This module contains functions that coordinate
the calling of functions in the module ModelFunctions.py
that calculate the variation of concentration
with time according to tracer kinetic models.
The function ModelSelector coordinates the execution of the
appropriate function according to the model selected on the GUI.
The function, CurveFit calls the Model function imported from
the lmfit Python package to fit any of the models in ModelFunctions.py
to actual concentration/time data.
Initially curve fitting was done using scipy.optimize.curve_fit but
lmfit was found to be more suitable. The code pertaining to the scipy
implementation has been commented out.
"""
#from scipy.optimize import curve_fit
from lmfit import Parameters, Model
import numpy as np
import logging
import importlib
#Although a dynamic import of ModelFunctions is done in the 2 functions in this module
#an import has to be done here, so that Model Functions is included when a compiled
#version of this program is created using Pyinstaller.
import ModelFunctions
logger = logging.getLogger(__name__)
def ModelSelector(functionName: str,
moduleName: str,
inletType:str,
times,
AIFConcentration,
parameterArray,
constantsString,
VIFConcentration=[]):
"""Function called in the GUI of the model fitting
application to select & run the function corresponding
to each model and return a list of
calculated concentrations.
Input Parameters
----------------
functionName - Name of the function corresponding to the model.
inletType - String variable indicating if the model is single or
dual compartment. The value 'single' indicates single compartment.
The value 'dual' indicates dual compartment.
time - NumPy Array of time values stored as floats. Created from a
Python list.
AIFConcentration - NumPy Array of concentration values stored as
floats. Created from a Python list.
These concentrations are the Arterial Input Function input
to the model.
parameterArray - list of model input parameter values.
constantsString - String representation of a dictionary of constant
name:value pairs used to convert concentrations predicted by the
models to MR signal values.
VIFConcentration - Optional NumPy Array of concentration values stored as floats.
Created from a Python list. These concentrations are the Venous
Input Function input to the model.
Returns
------
Returns a list of MR signals calculated using the selected model at the times in the array time.
"""
logger.info("In ModelFunctionsHelper.ModelSelector. Called with model {} and parameters {}".format(functionName, parameterArray))
try:
if inletType == 'single':
timeInputConcs2DArray =
|
np.column_stack((times, AIFConcentration))
|
numpy.column_stack
|
import os
import sys
import pickle
import numpy as np
import PIL as pil
import face_recognition
# 内部数据结构: 键值对 =>
# {
# "id_1": np.ndarray,
# "id_2": np.ndarray,
# "id_3": np.ndarray,
# ...
# "id_N": np.ndarray
# }
class FaceData():
# 构造函数:初始化
def __init__(self, path = './face_core/face.data', flag = None):
# 存盘指示器(析构函数通过该值判断是否要重新保存数据)
# 0 => 操作不改变数据(查询)
# 1 => 操作改变了数据(增删)
self.counter = 0
# 数据路径
self.face_data_path = os.path.join(sys.path[0], path)
# 初始化(清空)
if(flag == "init"):
self.face_data = { }
self.counter = 1
# 初始化(正常启动)
else:
try:
# 人脸数据库(二进制存取)
f = open(self.face_data_path, 'rb')
self.face_data = pickle.load(f)
f.close()
except:
raise "Error: open face.data failed or unknown format"
# API 函数:通过存盘指示器判断是否要重新存盘(数据有无更动)
# 注:不要使用 __del__
def dataSaver(self):
if(self.counter == 1):
f = open(self.face_data_path, 'wb')
pickle.dump(self.face_data, f)
f.close()
else:
print("in faces database: Nothing to do")
# API 函数:人脸识别
# 参数:图片路径或图片二进制数据
# 返回:人脸数据库中目标UID与目标人脸框(前端显示)
def recognizeFace(self, unknown_img_path = None, unknown_img_obj = None):
unknown_image = None
# 不同参数 => 不同处理方式
if(unknown_img_path != None and unknown_img_obj == None):
# 拼接完整路径
p = os.path.join(sys.path[0],
'face_core/face_images/' + unknown_img_path)
# 读取本地图片
unknown_image = face_recognition.load_image_file(p)
elif(unknown_img_path == None and unknown_img_obj != None):
unknown_image = pil.Image.open(unknown_img_obj)
# 'RGB' (8-bit RGB, 3 channels) or 'L' (black and white)
unknown_image = unknown_image.convert('RGB')
unknown_image = np.array(unknown_image)
else:
raise "Error: face_class -> recognizeFace"
# 返回数据格式 => 列表(List)
unknown_encoding = face_recognition.face_encodings(unknown_image)
# 限制条件:人脸识别限制(单一人脸)
if(len(unknown_encoding) == 1):
# 核查目标人脸是否在数据库中(注册用户 or 陌生人)
face_uname = self.__checkFaceEncoding(unknown_encoding[0])
if(face_uname == None):
# 返回 unknown UID
return {'uid': 'unknown'}
else:
face_locations = face_recognition.face_locations(unknown_image)
for (top, right, bottom, left) in face_locations:
# 返回UID与人脸坐标
return {
'uid': face_uname,
'locations': {
'tops': top,
'right': right,
'bottom': bottom,
'left': left
}
}
else:
# 返回 noface
return {'uid': 'noface'}
# API 函数:查询人脸数据库中的注册信息
# 参数:UID
# 返回:
def showFace(self, uid = None):
if(uid == None):
print(self.face_data);
else:
print(self.face_data[uid]);
# API 函数:添加人脸数据
def addFace(self, uid, img_path = None, img_obj = None):
target_image = None
# 不同参数 => 不同处理方式
if(img_path != None and img_obj == None):
target_image = face_recognition.load_image_file(img_path)
elif(img_path == None and img_obj != None):
target_image = pil.Image.open(img_obj)
# 'RGB' (8-bit RGB, 3 channels) or 'L' (black and white)
target_image = target_image.convert('RGB')
target_image =
|
np.array(target_image)
|
numpy.array
|
import numpy as np
import fastwer
import os
f = open("./result/gt_result.txt", 'r')
# f = open("./result/fastspeech_result.txt", 'r')
# f = open("./result/gst_result.txt", 'r')
a_err_list = []
d_err_list = []
f_err_list = []
h_err_list = []
n_err_list = []
sa_err_list = []
su_err_list = []
temp_err = []
cur_emo = None
while True:
line = f.readline()
if not line: break
items = line.split("|")
text = items[1]
# GT
items = items[0].replace(".wav", "").split("_")
emo = items[1]
f_name = "{}_{}_{}".format(items[0], items[1], items[2])
# GST
# items = items[0].replace(".wav", "").replace(".npy", "").split("_")
# emo = items[2]
# f_name = "{}_{}_{}".format(items[1], items[2], items[3])
# FS
# items = items[0].replace(".wav", "").replace(".npy", "").split("_")
# emo = items[3]
# f_name = "{}_{}_{}".format(items[2], items[3], items[4])
# n = np.load("/hd0/hs_oh/emotion_korea/{}/{}.npz".format(emo, f_name), 'r')
f_path = "/hd0/cb_im/emotion_kor/{}_3000_16000Hz/txt/{}.txt".format(emo, f_name)
if not os.path.exists(f_path):
print("NO FILE: {}".format(f_path))
continue
n = open(f_path, 'r')
# gt = str(n["text"])
gt = n.readline()
gt = gt.replace(" ", "")
gt = gt.replace(".", "")
hypo = text.strip()
hypo = hypo.replace(" ", "")
hypo = hypo.replace(".", "")
print(gt)
print(hypo)
cer = fastwer.score([hypo], [gt], char_level=True)
if emo == "ang":
a_err_list.append(cer)
if emo == "dis":
d_err_list.append(cer)
if emo == "fea":
f_err_list.append(cer)
if emo == "neu":
n_err_list.append(cer)
if emo == "hap":
h_err_list.append(cer)
if emo == "sad":
sa_err_list.append(cer)
if emo == "sur":
su_err_list.append(cer)
n.close()
# wer = fastwer.score([hypo], [gt])
# print(gt)
# print(hypo)
# print(cer)
# err_list.append(cer)
# print(wer)
a_err = np.mean(a_err_list)
print(len(a_err_list), "ang", a_err)
d_err = np.mean(d_err_list)
print(len(d_err_list), "dis", d_err)
f_err = np.mean(f_err_list)
print(len(f_err_list), "fea", f_err)
n_err = np.mean(n_err_list)
print(len(n_err_list), "neu", n_err)
h_err = np.mean(h_err_list)
print(len(h_err_list), "hap", h_err)
sa_err = np.mean(sa_err_list)
print(len(sa_err_list), "sad", sa_err)
su_err =
|
np.mean(su_err_list)
|
numpy.mean
|
import os, torch
import torchvision
import numpy as np
import scipy.io as sio
from utils import *
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset, WeightedRandomSampler
def smallnorb(args, dataset_paths):
transf = {'train': transforms.Compose([
transforms.ToPILImage(),
transforms.RandomCrop((args.crop_dim, args.crop_dim)),
transforms.ColorJitter(brightness=args.brightness/255., contrast=args.contrast),
transforms.ToTensor(),
Standardize()]),
# transforms.Normalize((0.75239172, 0.75738262), (0.1758033 , 0.17200065))]),
'test': transforms.Compose([
transforms.ToPILImage(),
transforms.CenterCrop((args.crop_dim, args.crop_dim)),
transforms.ToTensor(),
Standardize()])}
# transforms.Normalize((0.75239172, 0.75738262), (0.1758033 , 0.17200065))])}
config = {'train': True, 'test': False}
datasets = {i: smallNORB(dataset_paths[i], transform=transf[i],
shuffle=config[i]) for i in config.keys()}
# return data, labels dicts for new train set and class-balanced valid set
data, labels = random_split(data=datasets['train'].data,
labels=datasets['train'].labels,
n_classes=5,
n_samples_per_class=np.unique(
datasets['train'].labels, return_counts=True)[1] // 5) # % of train set per class
# define transforms for train set (without valid data)
transf['train_'] = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomCrop((args.crop_dim, args.crop_dim)),
transforms.ColorJitter(brightness=args.brightness/255., contrast=args.contrast),
transforms.ToTensor(),
Standardize()])
# transforms.Normalize((0.75239172, 0.75738262), (0.1758033 , 0.17200065))])
# define transforms for class-balanced valid set
transf['valid'] = transforms.Compose([
transforms.ToPILImage(),
transforms.CenterCrop((args.crop_dim, args.crop_dim)),
transforms.ToTensor(),
Standardize()])
# transforms.Normalize((0.75239172, 0.75738262), (0.1758033 , 0.17200065))])
# save original full training set
datasets['train_valid'] = datasets['train']
# make new training set without validation samples
datasets['train'] = CustomDataset(data=data['train'],
labels=labels['train'], transform=transf['train_'])
# make class balanced validation set
datasets['valid'] = CustomDataset(data=data['valid'],
labels=labels['valid'], transform=transf['valid'])
config = {'train': True, 'train_valid': True,
'valid': False, 'test': False}
dataloaders = {i: DataLoader(datasets[i], shuffle=config[i], pin_memory=True,
num_workers=8, batch_size=args.batch_size) for i in config.keys()}
return dataloaders
class smallNORB(Dataset):
''' In:
data_path (string): path to the dataset split folder, i.e. train/valid/test
transform (callable, optional): transform to be applied on a sample.
Out:
sample (dict): sample data and respective label'''
def __init__(self, data_path, shuffle=True, transform=None):
self.data_path = data_path
self.shuffle = shuffle
self.transform = transform
self.data, self.labels = [], []
# get path for each class folder
for class_label_idx, class_name in enumerate(os.listdir(data_path)):
class_path = os.path.join(data_path, class_name)
# get name of each file per class and respective class name/label index
for _, file_name in enumerate(os.listdir(class_path)):
img = np.load(os.path.join(data_path, class_name, file_name))
# Out ← [H, W, C] ← [C, H, W]
if img.shape[0] < img.shape[1]:
img = np.moveaxis(img, 0, -1)
self.data.extend([img])
self.labels.append(class_label_idx)
self.data = np.array(self.data, dtype=np.uint8)
self.labels = np.array(self.labels)
if self.shuffle:
# shuffle the dataset
idx = np.random.permutation(self.data.shape[0])
self.data = self.data[idx]
self.labels = self.labels[idx]
def __len__(self):
return self.data.shape[0]
def __getitem__(self, idx):
if self.transform:
image = self.transform(self.data[idx])
return image, self.labels[idx] # (X, Y)
def mnist(args, dataset_paths):
''' Loads the MNIST dataset.
Returns: train/valid/test set split dataloaders.
'''
transf = {'train': transforms.Compose([
transforms.RandomCrop((args.crop_dim, args.crop_dim), padding=args.padding),
transforms.ToTensor(),
transforms.Normalize((0.13066047,), (0.30810780,))
]),
'test': transforms.Compose([
transforms.Pad(np.maximum(0, (args.crop_dim-28) // 2)),
transforms.ToTensor(),
transforms.Normalize((0.13066047,), (0.30810780,))
])}
config = {'train': True, 'test': False}
datasets = {i: torchvision.datasets.MNIST(root=dataset_paths[i], transform=transf[i],
train=config[i], download=True) for i in config.keys()}
# split train into train and class-balanced valid set
data, labels = random_split(data=datasets['train'].data,
labels=datasets['train'].targets,
n_classes=10,
n_samples_per_class=np.repeat(500, 10)) # 500 per class
# define transforms for train set (without valid data)
transf['train_'] = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomCrop((args.crop_dim, args.crop_dim), padding=args.padding),
transforms.ToTensor(),
transforms.Normalize((0.13066047,), (0.30810780,))
])
# define transforms for class-balanced valid set
transf['valid'] = transforms.Compose([
transforms.ToPILImage(),
transforms.Pad(np.maximum(0, (args.crop_dim-28) // 2)),
transforms.ToTensor(),
transforms.Normalize((0.13066047,), (0.30810780,))
])
# save original full training set
datasets['train_valid'] = datasets['train']
# make new training set without validation samples
datasets['train'] = CustomDataset(data=data['train'],
labels=labels['train'], transform=transf['train_'])
# make class balanced validation set
datasets['valid'] = CustomDataset(data=data['valid'],
labels=labels['valid'], transform=transf['valid'])
config = {'train': True, 'train_valid': True,
'valid': False, 'test': False}
dataloaders = {i: DataLoader(datasets[i], num_workers=8, pin_memory=True,
batch_size=args.batch_size, shuffle=config[i]) for i in config.keys()}
if args.test_affNIST:
working_dir = os.path.join(os.path.split(os.getcwd())[0], 'data', 'affNIST')
aff_transf = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
transforms.Normalize((0.13066047,), (0.30810780,))])
datasets['affNIST_test'] = affNIST(data_path=os.path.join(working_dir,'test'),
transform=aff_transf)
dataloaders['affNIST_test'] = DataLoader(datasets['affNIST_test'], pin_memory=True,
num_workers=8, batch_size=args.batch_size, shuffle=False)
return dataloaders
def svhn(args, dataset_paths):
''' Loads the SVHN dataset.
Returns: train/valid/test set split dataloaders.
'''
transf = {
'train': transforms.Compose([
transforms.RandomCrop((args.crop_dim, args.crop_dim), padding=args.padding),
transforms.ColorJitter(brightness=args.brightness, contrast=args.contrast),
transforms.ToTensor(),
# Standardize()]),
transforms.Normalize((0.4376821, 0.4437697, 0.47280442),
(0.19803012, 0.20101562, 0.19703614))]),
# 'extra': transforms.Compose([
# transforms.RandomCrop((args.crop_dim, args.crop_dim), padding=args.padding),
# transforms.ColorJitter(brightness=args.brightness, contrast=args.contrast),
# transforms.ToTensor(),
# # Standardize()]),
# transforms.Normalize((0.4376821, 0.4437697, 0.47280442),
# (0.19803012, 0.20101562, 0.19703614)),
'test': transforms.Compose([
transforms.ToTensor(),
# Standardize()])}
transforms.Normalize((0.4376821, 0.4437697, 0.47280442),
(0.19803012, 0.20101562, 0.19703614))])
}
# config = {'train': True, 'extra': True, 'test': False}
config = {'train': True, 'test': False}
datasets = {i: torchvision.datasets.SVHN(root=dataset_paths[i], transform=transf[i],
split=i, download=True) for i in config.keys()}
# weighted sampler weights for full(f) training set
f_s_weights = sample_weights(datasets['train'].labels)
# return data, labels dicts for new train set and class-balanced valid set
data, labels = random_split(data=datasets['train'].data,
labels=datasets['train'].labels,
n_classes=10,
n_samples_per_class=np.repeat(1000, 10).reshape(-1))
# define transforms for train set (without valid data)
transf['train_'] = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomCrop((args.crop_dim, args.crop_dim), padding=args.padding),
transforms.ColorJitter(brightness=args.brightness, contrast=args.contrast),
transforms.ToTensor(),
# Standardize()])
transforms.Normalize((0.4376821, 0.4437697, 0.47280442),
(0.19803012, 0.20101562, 0.19703614))])
# define transforms for class-balanced valid set
transf['valid'] = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
# Standardize()])
transforms.Normalize((0.4376821, 0.4437697, 0.47280442),
(0.19803012, 0.20101562, 0.19703614))])
# save original full training set
datasets['train_valid'] = datasets['train']
# make channels last and convert to np arrays
data['train'] = np.moveaxis(np.array(data['train']), 1, -1)
data['valid'] = np.moveaxis(np.array(data['valid']), 1, -1)
# make new training set without validation samples
datasets['train'] = CustomDataset(data=data['train'],
labels=labels['train'], transform=transf['train_'])
# make class balanced validation set
datasets['valid'] = CustomDataset(data=data['valid'],
labels=labels['valid'], transform=transf['valid'])
# weighted sampler weights for new training set
s_weights = sample_weights(datasets['train'].labels)
config = {
'train': WeightedRandomSampler(s_weights,
num_samples=len(s_weights), replacement=True),
'train_valid': WeightedRandomSampler(f_s_weights,
num_samples=len(f_s_weights), replacement=True),
'valid': None, 'test': None}
dataloaders = {i: DataLoader(datasets[i], sampler=config[i],
num_workers=8, pin_memory=True, drop_last=True,
batch_size=args.batch_size) for i in config.keys()}
#NOTE: comment these to use the weighted sampler dataloaders above instead
config = {'train': True, 'train_valid': True,
'valid': False, 'test': False}
dataloaders = {i: DataLoader(datasets[i], num_workers=8, pin_memory=True,
batch_size=args.batch_size, shuffle=config[i]) for i in config.keys()}
return dataloaders
class affNIST(Dataset):
''' In:
data_path (string): path to the dataset split folder, i.e. train/valid/test
transform (callable, optional): transform to be applied on a sample.
Out:
image, label: sample data and respective label'''
def __init__(self, data_path, shuffle=True, transform=None):
self.data_path = data_path
self.shuffle = shuffle
self.transform = transform
self.split = self.data_path.split('/')[-1]
if self.split == 'train':
for i, file in enumerate(os.listdir(data_path)):
# load dataset .mat file batch
self.dataset = sio.loadmat(os.path.join(data_path, file))
# concatenate the 32 .mat files to make full dataset
if i == 0:
self.data = np.array(self.dataset['affNISTdata']['image'][0][0])
self.labels = np.array(self.dataset['affNISTdata']['label_int'][0][0])
else:
self.data = np.concatenate((self.data,
np.array(self.dataset['affNISTdata']['image'][0][0])), axis=1)
self.labels = np.concatenate((self.labels,
|
np.array(self.dataset['affNISTdata']['label_int'][0][0])
|
numpy.array
|
'''
Train and test bidirectional language models.
'''
import os
import time
import json
import re
import tensorflow as tf
import numpy as np
import pickle
# from tensorflow.python.ops.init_ops import glorot_uniform_initializer
from .models import MultimodalModel
from .data import Vocabulary, UnicodeCharsVocabulary, InvalidNumberOfCharacters, BidirectionalLMDataset
tf.logging.set_verbosity(tf.logging.INFO)
def print_variable_summary():
import pprint
variables = sorted([[v.name, v.get_shape()] for v in tf.global_variables()])
pprint.pprint(variables)
def average_gradients(tower_grads, batch_size, options):
# calculate average gradient for each shared variable across all GPUs
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
# We need to average the gradients across each GPU.
g0, v0 = grad_and_vars[0]
if g0 is None:
# no gradient for this variable, skip it
average_grads.append((g0, v0))
continue
if isinstance(g0, tf.IndexedSlices):
# If the gradient is type IndexedSlices then this is a sparse
# gradient with attributes indices and values.
# To average, need to concat them individually then create
# a new IndexedSlices object.
indices = []
values = []
for g, v in grad_and_vars:
indices.append(g.indices)
values.append(g.values)
all_indices = tf.concat(indices, 0)
avg_values = tf.concat(values, 0) / len(grad_and_vars)
# deduplicate across indices
av, ai = _deduplicate_indexed_slices(avg_values, all_indices)
grad = tf.IndexedSlices(av, ai, dense_shape=g0.dense_shape)
else:
# a normal tensor can just do a simple average
grads = []
for g, v in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# the Variables are redundant because they are shared
# across towers. So.. just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
assert len(average_grads) == len(list(zip(*tower_grads)))
return average_grads
def summary_gradient_updates(grads, opt, lr):
'''get summary ops for the magnitude of gradient updates'''
# strategy:
# make a dict of variable name -> [variable, grad, adagrad slot]
vars_grads = {}
for v in tf.trainable_variables():
vars_grads[v.name] = [v, None, None]
for g, v in grads:
vars_grads[v.name][1] = g
vars_grads[v.name][2] = opt.get_slot(v, 'accumulator')
# now make summaries
ret = []
for vname, (v, g, a) in vars_grads.items():
if g is None:
continue
if isinstance(g, tf.IndexedSlices):
# a sparse gradient - only take norm of params that are updated
values = tf.gather(v, g.indices)
updates = lr * g.values
if a is not None:
updates /= tf.sqrt(tf.gather(a, g.indices))
else:
values = v
updates = lr * g
if a is not None:
updates /= tf.sqrt(a)
values_norm = tf.sqrt(tf.reduce_sum(v * v)) + 1.0e-7
updates_norm = tf.sqrt(tf.reduce_sum(updates * updates))
ret.append(
tf.summary.scalar('UPDATE/' + vname.replace(":", "_"), updates_norm / values_norm))
return ret
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = tf.unique(indices)
summed_values = tf.unsorted_segment_sum(
values, new_index_positions,
tf.shape(unique_indices)[0])
return (summed_values, unique_indices)
def _get_feed_dict_from_X(X, start, end, model, char_inputs, bidirectional):
feed_dict = {}
if not char_inputs:
token_ids = X['token_ids'][start:end]
feed_dict[model.token_ids] = token_ids
else:
# character inputs
char_ids = X['tokens_characters'][start:end]
feed_dict[model.tokens_characters] = char_ids
if bidirectional:
if not char_inputs:
feed_dict[model.token_ids_reverse] = \
X['token_ids_reverse'][start:end]
else:
feed_dict[model.tokens_characters_reverse] = \
X['tokens_characters_reverse'][start:end]
if 'tokens_acoustic' in X :
feed_dict[model.tokens_acoustic] = X['tokens_acoustic'][start:end]
if bidirectional:
feed_dict[model.tokens_acoustic_reverse] = X['tokens_acoustic_reverse'][start:end]
# else :
# feed_dict[model.tokens_acoustic] = np.zeros((256,1,MAX_ACOUSTIC_SIZE,74))
# if bidirectional:
# feed_dict[model.tokens_acoustic_reverse] = np.zeros((256,1,MAX_ACOUSTIC_SIZE,74))
# now the targets with weights
next_id_placeholders = [[model.next_token_id, '']]
if bidirectional:
next_id_placeholders.append([model.next_token_id_reverse, '_reverse'])
for id_placeholder, suffix in next_id_placeholders:
name = 'next_token_id' + suffix
feed_dict[id_placeholder] = X[name][start:end]
return feed_dict
def train(options, data, n_gpus, tf_save_dir, tf_log_dir,
restart_ckpt_file=None):
if not os.path.exists(tf_save_dir):
os.makedirs(tf_save_dir)
# not restarting so save the options
with open(os.path.join(tf_save_dir, 'options.json'), 'w') as fout:
fout.write(json.dumps(options))
with tf.device('/cpu:0'):
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# set up the optimizer
lr = options.get('learning_rate', options['learning_rate'])
opt = tf.train.AdagradOptimizer(learning_rate=lr,
initial_accumulator_value=1.0)
# calculate the gradients on each GPU
tower_grads = []
models = []
train_perplexity = tf.get_variable(
'train_perplexity', [],
initializer=tf.constant_initializer(0.0), trainable=False)
norm_summaries = []
for k in range(n_gpus):
with tf.device('/gpu:%d' % k):
with tf.variable_scope('lm', reuse=k > 0):
# calculate the loss for one model replica and get
# lstm states
model = MultimodalModel(options, True, lm_training=isinstance(data, BidirectionalLMDataset))
loss = model.total_loss
models.append(model)
# get gradients
grads = opt.compute_gradients(
loss * options['unroll_steps'],
aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE,
)
tower_grads.append(grads)
# keep track of loss across all GPUs
train_perplexity += loss
print_variable_summary()
# calculate the mean of each gradient across all GPUs
grads = average_gradients(tower_grads, options['batch_size'], options)
grads, norm_summary_ops = clip_grads(grads, options, True, global_step)
norm_summaries.extend(norm_summary_ops)
input_summaries = [tf.summary.scalar("acoustic_input/input", tf.reduce_mean(model.tokens_acoustic))]
# input_summaries.append(tf.summary.scalar("acoustic_input/embedding_acoustic", tf.reduce_mean(model.embedding_acoustic)))
# var1 = [var for var in tf.global_variables() if "lm/MME/CNN_ACO/W_cnn_0:0" in var.name][0]
# input_summaries.append(tf.summary.scalar("acoustic_input/aco_cnn_weight", tf.reduce_mean(var1)))
# print(var1.name)
# var1 = [var for var in tf.global_variables() if "lm/CNN/W_cnn_0:0" in var.name][0]
# print(var1.name)
# input_summaries.append(tf.summary.scalar("acoustic_input/lex_cnn_weight", tf.reduce_mean(var1)))
# log the training perplexity
train_perplexity = tf.exp(train_perplexity / n_gpus)
perplexity_summmary = tf.summary.scalar(
'train_perplexity', train_perplexity)
# some histogram summaries. all models use the same parameters
# so only need to summarize one
histogram_summaries = [
tf.summary.histogram('token_embedding', models[0].embedding)
]
# tensors of the output from the LSTM layer
lstm_out = tf.get_collection('lstm_output_embeddings')
histogram_summaries.append(
tf.summary.histogram('lstm_embedding_0', lstm_out[0]))
if options.get('bidirectional', False):
# also have the backward embedding
histogram_summaries.append(
tf.summary.histogram('lstm_embedding_1', lstm_out[1]))
# apply the gradients to create the training operation
train_op = opt.apply_gradients(grads, global_step=global_step)
# histograms of variables
for v in tf.global_variables():
histogram_summaries.append(tf.summary.histogram(v.name.replace(":", "_"), v))
# get the gradient updates -- these aren't histograms, but we'll
# only update them when histograms are computed
histogram_summaries.extend(
summary_gradient_updates(grads, opt, lr))
saver = tf.train.Saver(tf.global_variables(), max_to_keep=2)
summary_op = tf.summary.merge(
[perplexity_summmary] + norm_summaries + input_summaries
)
hist_summary_op = tf.summary.merge(histogram_summaries)
init = tf.initialize_all_variables()
# do the training loop
bidirectional = options.get('bidirectional', False)
with tf.Session(config=tf.ConfigProto(
allow_soft_placement=True)) as sess:
sess.run(init)
# load the checkpoint data if needed
if restart_ckpt_file is not None:
try :
loader = tf.train.Saver()
loader.restore(sess, restart_ckpt_file)
print("Loaded from restart checkpoint")
except :
variables_to_restore = [v for v in tf.global_variables() if 'MME' not in v.name]
loader = tf.train.Saver(variables_to_restore)
loader.restore(sess, restart_ckpt_file)
print("Loaded from restart checkpoint")
summary_writer = tf.summary.FileWriter(tf_log_dir, sess.graph)
# For each batch:
# Get a batch of data from the generator. The generator will
# yield batches of size batch_size * n_gpus that are sliced
# and fed for each required placeholer.
#
# We also need to be careful with the LSTM states. We will
# collect the final LSTM states after each batch, then feed
# them back in as the initial state for the next batch
batch_size = options['batch_size']
unroll_steps = options['unroll_steps']
n_train_tokens = options.get('n_train_tokens', 768648884)
n_tokens_per_batch = batch_size * unroll_steps * n_gpus
n_batches_per_epoch = int(n_train_tokens / n_tokens_per_batch)
n_batches_total = options['n_epochs'] * n_batches_per_epoch
print("Training for %s epochs and %s batches" % (
options['n_epochs'], n_batches_total))
# get the initial lstm states
init_state_tensors = []
final_state_tensors = []
for model in models:
init_state_tensors.extend(model.init_lstm_state)
final_state_tensors.extend(model.final_lstm_state)
char_inputs = 'char_cnn' in options
if char_inputs:
max_chars = options['char_cnn']['max_characters_per_token']
print("Using char inputs")
if not char_inputs:
feed_dict = {
model.token_ids:
np.zeros([batch_size, unroll_steps], dtype=np.int64)
for model in models
}
else:
feed_dict = {
model.tokens_characters:
np.zeros([batch_size, unroll_steps, max_chars],
dtype=np.int32)
for model in models
}
feed_dict.update({
model.tokens_acoustic: np.zeros([batch_size, unroll_steps, options['acou_cnn']['max_acoustic_size_per_token'], options['acou_cnn']['acoustics']['dim']])
for model in models
})
if bidirectional:
if not char_inputs:
feed_dict.update({
model.token_ids_reverse:
np.zeros([batch_size, unroll_steps], dtype=np.int64)
for model in models
})
else:
feed_dict.update({
model.tokens_characters_reverse:
np.zeros([batch_size, unroll_steps, max_chars],
dtype=np.int32)
for model in models
})
feed_dict.update({
model.tokens_acoustic_reverse: np.zeros([batch_size, unroll_steps, options['acou_cnn']['max_acoustic_size_per_token'], options['acou_cnn']['acoustics']['dim']])
for model in models
})
init_state_values, init_embed_acoustic = sess.run([init_state_tensors, model.embedding_acoustic], feed_dict=feed_dict)
t1 = time.time()
end_training = False
for epoch_no in range(1,options['n_epochs']+1) :
data_gen = data.iter_batches(batch_size * n_gpus, unroll_steps)
for batch_no, batch in enumerate(data_gen, start=1):
# slice the input in the batch for the feed_dict
X = batch
feed_dict = {}
for s1, s2 in zip(init_state_tensors, init_state_values) :
feed_dict.update({t: v for t, v in zip(s1, s2)})
for k in range(n_gpus):
model = models[k]
start = k * batch_size
end = (k + 1) * batch_size
feed_dict.update(
_get_feed_dict_from_X(X, start, end, model,
char_inputs, bidirectional)
)
if 'tokens_acoustic' not in X:
# Add dummy acoustic
feed_dict.update({
model.tokens_acoustic:
np.zeros([batch_size, unroll_steps, options['acou_cnn']['max_acoustic_size_per_token'], 74]),
model.tokens_acoustic_reverse:
np.zeros([batch_size, unroll_steps, options['acou_cnn']['max_acoustic_size_per_token'], 74])
})
# This runs the train_op, summaries and the "final_state_tensors"
# which just returns the tensors, passing in the initial
# state tensors, token ids and next token ids
if batch_no % 500 != 0:
ret = sess.run(
[train_op, summary_op, train_perplexity] +
final_state_tensors,
feed_dict=feed_dict
)
# first three entries of ret are:
# train_op, summary_op, train_perplexity
# last entries are the final states -- set them to
# init_state_values
# for next batch
init_state_values = ret[3:]
else:
# also run the histogram summaries
ret = sess.run(
[train_op, summary_op, train_perplexity, hist_summary_op, model.embedding_acoustic_gated] +
final_state_tensors,
feed_dict=feed_dict
)
init_state_values = ret[5:]
if batch_no % 500 == 0:
summary_writer.add_summary(ret[3], batch_no)
if batch_no % 100 == 0:
# write the summaries to tensorboard and display perplexity
summary_writer.add_summary(ret[1], batch_no)
print("Epoch %s Batch %s, train_perplexity=%s" % (epoch_no, batch_no, ret[2]))
print("Total time: %s" % (time.time() - t1))
if (batch_no % 1250 == 0) or (batch_no == n_batches_total):
# save the model
print("Saving model")
checkpoint_path = os.path.join(tf_save_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=global_step)
# embed_acoustic = sess.run(model.embedding_acoustic, feed_dict=feed_dict)
if batch_no == n_batches_total:
# done training!
end_training = True
print("Saving model at end of epoch")
checkpoint_path = os.path.join(tf_save_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=global_step)
if end_training :
print("End of training")
break
def clip_by_global_norm_summary(t_list, clip_norm, norm_name, variables):
# wrapper around tf.clip_by_global_norm that also does summary ops of norms
# compute norms
# use global_norm with one element to handle IndexedSlices vs dense
norms = [tf.global_norm([t]) for t in t_list]
# summary ops before clipping
summary_ops = []
for ns, v in zip(norms, variables):
name = 'norm_pre_clip/' + v.name.replace(":", "_")
summary_ops.append(tf.summary.scalar(name, ns))
# clip
clipped_t_list, tf_norm = tf.clip_by_global_norm(t_list, clip_norm)
# summary ops after clipping
norms_post = [tf.global_norm([t]) for t in clipped_t_list]
for ns, v in zip(norms_post, variables):
name = 'norm_post_clip/' + v.name.replace(":", "_")
summary_ops.append(tf.summary.scalar(name, ns))
summary_ops.append(tf.summary.scalar(norm_name, tf_norm))
return clipped_t_list, tf_norm, summary_ops
def clip_grads(grads, options, do_summaries, global_step):
# grads = [(grad1, var1), (grad2, var2), ...]
def _clip_norms(grad_and_vars, val, name):
# grad_and_vars is a list of (g, v) pairs
grad_tensors = [g for g, v in grad_and_vars]
vv = [v for g, v in grad_and_vars]
scaled_val = val
if do_summaries:
clipped_tensors, g_norm, so = clip_by_global_norm_summary(
grad_tensors, scaled_val, name, vv)
else:
so = []
clipped_tensors, g_norm = tf.clip_by_global_norm(
grad_tensors, scaled_val)
ret = []
for t, (g, v) in zip(clipped_tensors, grad_and_vars):
ret.append((t, v))
return ret, so
all_clip_norm_val = options['all_clip_norm_val']
ret, summary_ops = _clip_norms(grads, all_clip_norm_val, 'norm_grad')
assert len(ret) == len(grads)
return ret, summary_ops
def test(options, ckpt_file, data, batch_size=256):
'''
Get the test set perplexity!
'''
bidirectional = options.get('bidirectional', False)
char_inputs = 'char_cnn' in options
if char_inputs:
max_chars = options['char_cnn']['max_characters_per_token']
max_acou = options['acou_cnn']['max_acoustic_size_per_token']
acou_dim = options['acou_cnn']['acoustics']['dim']
unroll_steps = 1
config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=config) as sess:
with tf.device('/gpu:0'), tf.variable_scope('lm'):
test_options = dict(options)
# NOTE: the number of tokens we skip in the last incomplete
# batch is bounded above batch_size * unroll_steps
test_options['batch_size'] = batch_size
test_options['unroll_steps'] = unroll_steps
test_options['dropout'] = 0
# model = LanguageModel(test_options, False)
model = MultimodalModel(test_options, False)
# we use the "Saver" class to load the variables
loader = tf.train.Saver()
loader.restore(sess, ckpt_file)
# model.total_loss is the op to compute the loss
# perplexity is exp(loss)
init_state_tensors = model.init_lstm_state
final_state_tensors = model.final_lstm_state
if not char_inputs:
feed_dict = {
model.token_ids:
np.zeros([batch_size, unroll_steps], dtype=np.int64)
}
if bidirectional:
feed_dict.update({
model.token_ids_reverse:
np.zeros([batch_size, unroll_steps], dtype=np.int64)
})
else:
feed_dict = {
model.tokens_acoustic:
np.zeros([batch_size, unroll_steps, max_acou, acou_dim],
dtype=np.float),
model.tokens_characters:
np.zeros([batch_size, unroll_steps, max_chars],
dtype=np.int32)
}
if bidirectional:
feed_dict.update({
model.tokens_characters_reverse:
np.zeros([batch_size, unroll_steps, max_chars],
dtype=np.int32)
})
feed_dict.update({
model.tokens_acoustic: np.zeros([batch_size, unroll_steps, test_options['acou_cnn']['max_acoustic_size_per_token'], test_options['acou_cnn']['acoustics']['dim']])
})
if bidirectional :
feed_dict.update({
model.tokens_acoustic_reverse: np.zeros([batch_size, unroll_steps, test_options['acou_cnn']['max_acoustic_size_per_token'], test_options['acou_cnn']['acoustics']['dim']])
})
init_state_values = sess.run(
init_state_tensors,
feed_dict=feed_dict)
t1 = time.time()
batch_losses = []
total_loss = 0.0
try :
for batch_no, batch in enumerate(
data.iter_batches(batch_size, unroll_steps), start=1):
# slice the input in the batch for the feed_dict
X = batch
feed_dict = {}
for s1, s2 in zip(init_state_tensors, init_state_values) :
feed_dict.update({t: v for t, v in zip(s1, s2)})
feed_dict.update(
_get_feed_dict_from_X(X, 0, X['token_ids'].shape[0], model,
char_inputs, bidirectional)
)
if 'tokens_acoustic' not in X:
# Add dummy acoustic
feed_dict.update({
model.tokens_acoustic:
np.zeros([batch_size, unroll_steps, options['acou_cnn']['max_acoustic_size_per_token'], 74]),
model.tokens_acoustic_reverse:
np.zeros([batch_size, unroll_steps, options['acou_cnn']['max_acoustic_size_per_token'], 74])
})
ret = sess.run(
[model.total_loss, final_state_tensors],
feed_dict=feed_dict
)
loss, init_state_values = ret
batch_losses.append(loss)
batch_perplexity = np.exp(loss)
total_loss += loss
avg_perplexity = np.exp(total_loss / batch_no)
print("batch=%s, batch_perplexity=%s, avg_perplexity=%s, time=%s" %
(batch_no, batch_perplexity, avg_perplexity, time.time() - t1))
except StopIteration :
pass
avg_loss = np.mean(batch_losses)
print("FINSIHED! AVERAGE PERPLEXITY = %s" % np.exp(avg_loss))
return np.exp(avg_loss)
def extract(options, ckpt_file, data, batch_size=256, unroll_steps=20, outfile='extracted_dataset.pkl'):
'''
Extract embeddings from the model
'''
bidirectional = options.get('bidirectional', False)
char_inputs = 'char_cnn' in options
if char_inputs:
max_chars = options['char_cnn']['max_characters_per_token']
max_acou = options['acou_cnn']['max_acoustic_size_per_token']
acou_dim = options['acou_cnn']['acoustics']['dim']
config = tf.ConfigProto(allow_soft_placement=True)
new_dataset = {}
new_dataset['embeddings'] = {}
new_dataset['labels'] = {}
with tf.Session(config=config) as sess:
with tf.device('/gpu:0'), tf.variable_scope('lm'):
test_options = dict(options)
# NOTE: the number of tokens we skip in the last incomplete
# batch is bounded above batch_size * unroll_steps
test_options['batch_size'] = batch_size
test_options['unroll_steps'] = unroll_steps
test_options['dropout'] = 0
# model = LanguageModel(test_options, False)
model = MultimodalModel(test_options, False)
# we use the "Saver" class to load the variables
loader = tf.train.Saver()
loader.restore(sess, ckpt_file)
# model.total_loss is the op to compute the loss
# perplexity is exp(loss)
init_state_tensors = model.init_lstm_state
final_state_tensors = model.final_lstm_state
if not char_inputs:
feed_dict = {
model.token_ids:
np.zeros([batch_size, unroll_steps], dtype=np.int64)
}
if bidirectional:
feed_dict.update({
model.token_ids_reverse:
np.zeros([batch_size, unroll_steps], dtype=np.int64)
})
else:
feed_dict = {
model.tokens_acoustic:
np.zeros([batch_size, unroll_steps, max_acou, acou_dim],
dtype=np.float),
model.tokens_characters:
np.zeros([batch_size, unroll_steps, max_chars],
dtype=np.int32)
}
if bidirectional:
feed_dict.update({
model.tokens_characters_reverse:
np.zeros([batch_size, unroll_steps, max_chars],
dtype=np.int32)
})
feed_dict.update({
model.tokens_acoustic: np.zeros([batch_size, unroll_steps, test_options['acou_cnn']['max_acoustic_size_per_token'], test_options['acou_cnn']['acoustics']['dim']])
})
if bidirectional :
feed_dict.update({
model.tokens_acoustic_reverse: np.zeros([batch_size, unroll_steps, test_options['acou_cnn']['max_acoustic_size_per_token'], test_options['acou_cnn']['acoustics']['dim']])
})
init_state_values = sess.run(
init_state_tensors,
feed_dict=feed_dict)
t1 = time.time()
batch_losses = []
total_loss = 0.0
for batch_no, batch in enumerate(
data.iter_sentences(batch_size, unroll_steps), start=1):
# slice the input in the batch for the feed_dict
X = batch
feed_dict = {}
for s1, s2 in zip(init_state_tensors, init_state_values) :
feed_dict.update({t: v for t, v in zip(s1, s2)})
# feed_dict = {t: v for t, v in zip(
# init_state_tensors, init_state_values)}
feed_dict.update(
_get_feed_dict_from_X(X, 0, X['token_ids'].shape[0], model,
char_inputs, bidirectional)
)
ret = sess.run(
[final_state_tensors, model.elmo_outputs],
feed_dict=feed_dict
)
init_state_values, elmo_outputs = ret
for i in range(batch_size):
sentence_length = batch['lengths'][i]
if sentence_length == 0:
print(f"Skipping 0 length sentence for key {batch['keys'][i]}")
continue
key = batch['keys'][i]
new_dataset['embeddings'][key] = [
|
np.average(elmo_outputs[j][i, 0:sentence_length], axis=0)
|
numpy.average
|
import os
import sys
ROOT_DIR = os.path.abspath("../")
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR,"Mask_RCNN"))
import h5py
import numpy as np
from preprocessing.utils import make_colormap,find_start_count, save_instances
from preprocessing.discard import class_and_size_discard
from sort.sort import Sort, KalmanBoxTracker
from scipy.spatial import distance
from shutil import copyfile
from sklearn.utils.linear_assignment_ import linear_assignment
def find_nn(query, array):
nn = 0
min_dist = float('inf')
for i in range(array.shape[0]):
dist = distance.euclidean(query, array[i, :])
if (dist < min_dist):
nn = i
min_dist = dist
return nn
def iou(bb_test,bb_gt):
"""
Computes IUO between two bboxes in the form [x1,y1,x2,y2]
"""
xx1 = np.maximum(bb_test[0], bb_gt[0])
yy1 = np.maximum(bb_test[1], bb_gt[1])
xx2 = np.minimum(bb_test[2], bb_gt[2])
yy2 = np.minimum(bb_test[3], bb_gt[3])
w = np.maximum(0., xx2 - xx1)
h = np.maximum(0., yy2 - yy1)
wh = w * h
o = wh / ((bb_test[2]-bb_test[0])*(bb_test[3]-bb_test[1])
+ (bb_gt[2]-bb_gt[0])*(bb_gt[3]-bb_gt[1]) - wh)
return(o)
def iou_track(data_file):
f = h5py.File(data_file, "r+")
start_count = find_start_count(list(f.keys()))
first_frame = "frame{}".format(start_count)
track_n = f[first_frame]['masks'].shape[2]
index_array = np.arange(1,track_n+1)
index_array = np.stack((index_array,index_array),axis =1)
f.create_dataset("{}/IDs".format(first_frame), data=index_array)
for i in range(start_count+1, f['frame_number'].value[0]):
frame = "frame{}".format(i)
previous_frame = "frame{}".format(i-1)
previous_mask_n = f[previous_frame]['masks'].shape[2]
current_mask_n = f[frame]['masks'].shape[2]
index_array = np.zeros((current_mask_n,2))
ious = np.zeros((current_mask_n, previous_mask_n))
for mask_id in range(current_mask_n):
current_box = f[frame]['rois'].value[mask_id,:]
ious[mask_id,:] = np.array([iou(f[previous_frame]['rois'].value[previous_id,:], current_box) for previous_id in range(f[previous_frame]['rois'].shape[0])])
assignments = linear_assignment(-ious)
assigned_ids = []
for assignment in assignments:
assigned_ids.append(assignment[0])
if (ious[assignment[0], assignment[1]] > 0):
index_array[assignment[0], :] = f[previous_frame]['IDs'].value[assignment[1], 0]
else:
track_n += 1
index_array[assignment[0], :] = track_n
if (len(assignments) < ious.shape[0]):
missing_ids = [i for i in range(current_mask_n) if i not in assigned_ids]
for missing_id in missing_ids:
track_n += 1
index_array[missing_id, :] = track_n
f.create_dataset("{}/IDs".format(frame), data=index_array)
f["tracks_n"][0] = track_n
f.close()
def track(data_file, reverse= False, verbose = 0):
if(verbose==1):
print("Opening File...")
f = h5py.File(data_file, "r+")
mot_tracker = Sort()
tracks_n = f["tracks_n"].value[0]
start_count = find_start_count(list(f.keys()))
if(not reverse):
frame_indices = range(start_count, f['frame_number'].value[0])
else:
frame_indices = reversed(range(start_count, f['frame_number'].value[0]))
if(verbose==1):
print("Starting loop...")
for i in frame_indices :
frame = "frame{}".format(i)
bbox_handle = f[frame]['rois']
detection = bbox_handle.value
scores = f[frame]['scores'].value
number_of_masks = scores.shape[0]
detection_with_scores = np.hstack((detection, np.reshape(scores, (-1, 1))))
if(verbose== 1):
print("detections with scores:")
print(detection_with_scores)
track_bbs_ids = mot_tracker.update(detection_with_scores)
if(verbose== 1):
print("tracked bbs:")
print(track_bbs_ids)
# Associate the track_BBs with the original bbs
# for each of the track bbs
# find the nearest neighbour in the original detections
# associate the ID with the index of the original detection
index_array = np.zeros(number_of_masks)
if verbose==1 : print("number of masks {}".format(number_of_masks))
for track in track_bbs_ids:
nn_index = find_nn(track[:-1], detection)
index_array[nn_index] = track[-1]
if(verbose==1):
print("The index array is")
print(index_array)
max_idx = np.amax(index_array) if number_of_masks > 0 else 0
if(max_idx> tracks_n):
tracks_n = max_idx
ID_dataset_key = "{}/IDs".format(frame)
if(ID_dataset_key in f):
f[ID_dataset_key][:,1]= index_array
else:
f.create_dataset(ID_dataset_key,(index_array.shape[0],2))
f[ID_dataset_key][:, 0] = index_array
f["tracks_n"][0] = tracks_n
KalmanBoxTracker.count = 0
f.close()
def recursive_update(f,change_from,change_to, col_idx, i, start_count):
max_idx = f['frame_number'].value[0] + start_count -1
for j in range(i, f['frame_number'].value[0]):
if(i > max_idx):
break
frame = "frame{}".format(j)
IDs = f[frame]['IDs'].value
if(np.any(IDs[:,col_idx]==change_from)):
idx = np.where(IDs[:, col_idx] == change_from)
f[frame]['IDs'][idx[0][0],col_idx]=change_to
else:
break
def full_recursive_update(f,change_from,change_to, col_idx, i, start_count):
max_idx = f['frame_number'].value[0] + start_count -1
if(i > max_idx):
return
frame = "frame{}".format(i)
IDs = f[frame]['IDs'].value
if(np.any(IDs[:,col_idx]==change_from)):
idx = np.where(IDs[:, col_idx] == change_from)
f[frame]['IDs'][idx[0][0],col_idx]=change_to
recursive_update(f,change_from,change_to,col_idx,i+1,start_count)
def consolidate_indices(data_file, target_file= None, verbose = 0):
if(target_file==None):
target_file=data_file
else:
if verbose==1 : print("Creating target file...")
#assert not os.path.exists(target_file)
copyfile(data_file, target_file)
f = h5py.File(target_file, "r+")
tracks_n = f["tracks_n"].value
start_count = find_start_count(list(f.keys()))
for i in range(start_count, f['frame_number'].value[0]-1) :
frame1 = "frame{}".format(i)
frame2 = "frame{}".format(i+1)
IDs_1= f[frame1]['IDs'].value
IDs_2 = f[frame2]['IDs'].value
for pair_index,id_pair_1 in enumerate(IDs_1):
if(np.any(id_pair_1 == 0)):
id_pair_1[id_pair_1 == 0] = tracks_n+1
f[frame1]['IDs'][pair_index,:]= id_pair_1
tracks_n+=1
if(np.any(np.all(id_pair_1==IDs_2,axis=1))):
continue
elif(np.any(id_pair_1[0]==IDs_2[:,0])): #and not id_pair_1[0]==0
idx= np.where(IDs_2[:,0]==id_pair_1[0])
change_to = id_pair_1[1]
change_from = IDs_2[idx[0][0],1]
if(change_from==0):
f[frame2]['IDs'][idx[0][0],1]=change_to
else:
recursive_update(f,change_from, change_to,1 ,i+1, start_count)
elif (np.any(id_pair_1[1] == IDs_2[:, 1]) ): #and not id_pair_1[1] == 0
idx=
|
np.where(IDs_2[:, 1] == id_pair_1[1])
|
numpy.where
|
"""
Module containing classes for storing detector data.
``SERPENT`` is capable of producing detectors, or tallies from MCNP,
with a variety of bin structures. These include, but are not limited to,
material regions, reaction types, energy bins, spatial meshes, and
universe bins.
The detectors contained in this module are tasked with storing such
data and proving easy access to the data, as well as the underlying
bins used to define the detector.
Detector Types
--------------
* :class:`~serpentTools.objects.detectors.Detector`
* :class:`~serpentTools.objects.detectors.CartesianDetector`
* :class:`~serpentTools.objects.detectors.HexagonalDetector`
* :class:`~serpentTools.objects.detectors.CylindricalDetector`
* :class:`~serpentTools.objects.detectors.SphericalDetector`
"""
from math import sqrt, pi
from warnings import warn
from numbers import Real
from collections.abc import Mapping
from numpy import (
unique, inf, hstack, arange, log, divide, asfortranarray,
ndarray, asarray,
)
from matplotlib.patches import RegularPolygon
from matplotlib.collections import PatchCollection
from matplotlib.pyplot import gca
from serpentTools.messages import (
warning, SerpentToolsException, error, BAD_OBJ_SUBJ,
debug,
)
from serpentTools.objects.base import NamedObject
from serpentTools.plot import plot, cartMeshPlot
from serpentTools.utils import (
magicPlotDocDecorator, formatPlot, setAx_xlims, setAx_ylims,
addColorbar, normalizerFactory, DETECTOR_PLOT_LABELS,
compareDictOfArrays,
)
from serpentTools.utils.compare import getLogOverlaps
from serpentTools.io.hooks import matlabHook
__all__ = ['Detector', 'CartesianDetector', 'HexagonalDetector',
'CylindricalDetector', 'SphericalDetector']
class Detector(NamedObject):
"""Class for storing detector data with multiple bins
For detectors with spatial meshes, including rectilinear,
hexagonal, cylindrical, or spherical meshes, refer to companion
classes :class:`serpentTools.CartesianDetector`,
:class:`serpentTools.HexagonalDetector`,
:class:`serpentTools.CylindricalDetector`, or
:class:`serpentTools.SphericalDetector`
If simply the tally bins are available, it is recommended
to use the :meth:`fromTallyBins` class method. This will
reshape the data and separate the mean tally [second to last
column] and relative errors [last column].
Parameters
----------
name : str
Name of this detector
bins : numpy.ndarray, optional
Full 2D tally data from detector file, including tallies and
errors in last two columns
tallies : numpy.ndarray, optional
Reshaped tally data such that each dimension corresponds
to a unique bin, such as energy or spatial bin.
errors : numpy.ndarray, optional
Reshaped error data such that each dimension corresponds
to a unique bin, such as energy or spatial bin. Note:
this is a relative error as it would appear in the
output file
indexes : iterable of string, optional
Iterable naming the bins that correspond to reshaped
:attr:`tally` and :attr:`errors`.
grids : dict, optional
Supplemental grids that may be supplied to this detector,
including energy points or spatial coordinates.
Attributes
----------
name : str
Name of this detector
bins : numpy.ndarray or None
Full 2D tally data from detector file, including tallies and
errors in last two columns
tallies : numpy.ndarray or float or None
Reshaped tally data such that each dimension corresponds
to a unique bin, such as energy or spatial bin.
errors : numpy.ndarray or float or None
Reshaped error data such that each dimension corresponds
to a unique bin, such as energy or spatial bin. Note:
this is a relative error as it would appear in the
output file
indexes : tuple or None
Iterable naming the bins that correspond to reshaped
:attr:`tally` and :attr:`errors`. The tuple
``(energy, ymesh, xmesh)`` indicates that :attr:`tallies`
should have three dimensions corresponding to various
energy, y-position, and x-position bins. Must be set
after :attr:`tallies` or :attr:`errors` and agree with
the shape of each
grids : dict
Dictionary containing grid information for binned quantities like
energy or time.
energy : numpy.ndarray or None
Potential underlying energy grid in MeV. Will be ``(n_ene, 3)``, where
``n_ene`` is the number of values in the energy grid. Each
row ``energy[j]`` will be the low point, high point, and mid point
of the energy bin ``j``.
Raises
------
ValueError
If some spatial grid is found in ``indexes`` during creation. This
class is ill-suited for these problems. Refer to the companion
classes mentioned above.
IndexError
If the shapes of ``bins``, ``tallies``, and ``errors`` are inconsistent
"""
DET_COLS = (
'value', 'time', 'energy', 'universe', 'cell', 'material', 'lattice',
'reaction', 'zmesh', 'ymesh', 'xmesh', 'tally', 'error')
_CBAR_LABELS = {
'tallies': 'Tally data',
'errors': 'Relative Uncertainty',
}
def __init__(self, name, bins=None, tallies=None, errors=None,
indexes=None, grids=None):
NamedObject.__init__(self, name)
self._bins = None
self._tallies = None
self._errors = None
self.bins = bins
self.tallies = tallies
self.errors = errors
self._indexes = None
if indexes is not None:
self.indexes = indexes
self.grids = grids
@property
def bins(self):
return self._bins
@bins.setter
def bins(self, value):
if value is None:
self._bins = None
return
# Coerce to numpy array, check shape
# Ensure data is ordered columnwise, and
# the array owns the underlying data
bins = asfortranarray(value).copy()
if len(bins.shape) != 2 or (
len(bins.shape) == 2 and bins.shape[1] not in {12, 13}):
raise ValueError(
"Data does not appear to be Serpent 2 detector data. Shape "
"should be (N, 12) or (N, 13), is {}".format(bins.shape))
# Check that this is not a Serpent 1 detector
if bins[0, -3] != 1:
raise ValueError(
"Data does not appear to be Serpent 2 detector data. Appears "
"to have a scores column, indicated unsupported Serpent 1 "
"data: {}.".format(bins[0]))
self._bins = bins
@property
def tallies(self):
return self._tallies
@tallies.setter
def tallies(self, tallies):
if tallies is None:
self._tallies = tallies
return
if not isinstance(tallies, (Real, ndarray)):
raise TypeError("Tallies must be array or scalar, not {}".format(
type(tallies)))
if self._tallies is None:
self._tallies = tallies
return
# Handle scalar / single values arrays
# TODO Kind of clunky. Maybe a dedicated ScalarDetector?
if isinstance(tallies, Real):
if isinstance(self._tallies, Real):
self._tallies = tallies
else:
raise TypeError("Tallies are current array, not scalar")
else:
if isinstance(self._tallies, Real):
raise TypeError("Tallies are currently scalar, nor array")
if tallies.shape != self._tallies.shape:
raise IndexError(
"Shape of tally data is not consistent. Should be {}, "
"is {}".format(self._tallies.shape, tallies.shape))
self._tallies = tallies
@property
def errors(self):
return self._errors
@errors.setter
def errors(self, errors):
if errors is None:
self._errors = errors
return
if not isinstance(errors, (Real, ndarray)):
raise TypeError(
"Tallies must be array or scalar, not {}".format(type(errors)))
if self._errors is None:
self._errors = errors
return
# Handle scalar / single values arrays
# TODO Kind of clunky. Maybe a dedicated ScalarDetector?
if isinstance(errors, Real):
if isinstance(self._errors, Real):
self._errors = errors
else:
raise TypeError("Tallies are current array, not scalar")
else:
if isinstance(self._errors, Real):
raise TypeError("Tallies are currently scalar, nor array")
if errors.shape != self._errors.shape:
raise IndexError(
"Shape of tally data is not consistent. Should be {}, "
"is {}".format(self._errors.shape, errors.shape))
self._errors = errors
@property
def energy(self):
return self.grids.get("E")
@property
def grids(self):
return self._grids
@grids.setter
def grids(self, grids):
if grids is None:
self._grids = {}
else:
if not isinstance(grids, Mapping):
raise TypeError(
"Grids must be Mapping type, not {}".format(type(grids)))
self._grids = grids
@property
def indexes(self):
return self._indexes
@indexes.setter
def indexes(self, ix):
if self._tallies is None:
if self._errors is None:
raise AttributeError("Tally and error attributes not set")
nItems = len(self._errors.shape)
else:
nItems = len(self._tallies.shape)
if len(ix) != nItems:
raise ValueError(
"Expected {} items for indexes, got {}".format(
nItems, len(ix)))
self._indexes = tuple(ix)
@classmethod
def fromTallyBins(cls, name, bins, grids=None):
"""Create a detector instance from 2D detector data
Parameters
----------
name : str
Name of this detector
bins : numpy.ndarray
2D array taken from Serpent. Expected to have
either 12 or 13 columns, where the latter indicates
a time bin has been added.
grids : dict, optional
Dictionary of underlying energy, space, and/or time data.
Returns
-------
Detector
Raises
------
ValueError
If the tally data does not appear to be Serpent 2 tally data
"""
bins = asfortranarray(bins)
if len(bins.shape) != 2:
raise ValueError(
"Array does not appear to be Serpent tally data. Shape is {}, "
"should be 2D".format(bins.shape))
if bins.shape[1] not in (12, 13):
raise ValueError(
"Array does not appear to be Serpent tally data. Expected 12 "
" or 13 columns, not {}".format(bins.shape[1]))
if grids is None:
grids = {}
elif not isinstance(grids, Mapping):
raise TypeError("Grid data is not dictionary-like")
det = cls(name, bins=bins, grids=grids)
tallies, errors, indexes = det.reshapedBins()
det.tallies = tallies
det.errors = errors
det.indexes = indexes
return det
def reshapedBins(self):
"""Obtain multi-dimensional tally, error, and index data
Returns
-------
tallies : numpy.ndarray
Potentially multi-dimensional array corresponding to
tally data along each bin index
errors : numpy.ndarray
Potentially multi-dimensional array corresponding to
tally relative error along each bin index
indexes : list of str
Ordering of named bin information, e.g. ``"xmesh"``,
``"energy"``, corresponding to axis in ``tallies`` and ``errors``
Examples
--------
A detector is created with a single bin with two bins values.
These could represent tallying two different reaction rates
>>> import numpy
>>> from serpentTools import Detector
>>> bins = numpy.ones((2, 12))
>>> bins[1, 0] = 2
>>> bins[1, 4] = 2
>>> bins[:, -2:] = [
... [5.0, 0.1],
... [10.0, 0.2]]
>>> det = Detector("reshape", bins=bins)
>>> tallies, errors, indexes = det.reshapedBins()
>>> tallies
array([5.0, 10.0])
>>> errors
array([0.1, 0.2])
>>> indexes
["reaction", ]
"""
assert self.bins is not None, "No bin data present on {}".format(self)
if self.bins.shape[0] == 1:
return self.bins[0, -2], self.bins[0, -1], {}
shape = []
indexes = []
# See if the time column has been inserted
nameStart = 2 if self.bins.shape[1] == 12 else 1
for colIx, indexName in enumerate(self.DET_COLS[nameStart:-2],
start=1):
uniqueVals = unique(self.bins[:, colIx])
if len(uniqueVals) > 1:
indexes.append(indexName)
shape.append(len(uniqueVals))
tallies = self.bins[:, -2].reshape(shape)
errors = self.bins[:, -1].reshape(shape)
return tallies, errors, indexes
def slice(self, fixed, data='tallies'):
"""
Return a view of the reshaped array where certain axes are fixed
Parameters
----------
fixed: dict
dictionary to aid in the restriction on the multidimensional
array. Keys correspond to the various grids present in
:attr:`indexes` while the values are used to
data: {'tallies', 'errors'}
Which data set to slice
Returns
-------
:class:`numpy.ndarray`
View into the respective data where certain dimensions
have been removed
Raises
------
AttributeError
If ``data`` is not supported
"""
if data not in {"tallies", "errors"}:
raise AttributeError(
'Data argument {} not in allowed options'
'\ntallies, errors')
work = getattr(self, data)
if work is None:
raise AttributeError("{} not setup on {}".format(data, self))
if not fixed:
return work
return work[self._getSlices(fixed)]
def _getSlices(self, fixed):
"""
Return a list of slice operators for each axis in reshaped data
Parameters
----------
fixed: dict or None
Dictionary where keys are strings pointing to dimensions in
"""
if fixed is None:
return (slice(None), ) * len(self.indexes)
keys = set(fixed)
slices = tuple()
for key in self.indexes:
if key in keys:
slices += fixed[key],
keys.remove(key)
else:
slices += slice(None),
if any(keys):
warning(
'Could not find arguments in index that match the following'
' requested slice keys: {}'.format(', '.join(keys)))
return slices
def _getPlotGrid(self, qty):
# Try with the first letter only
grids = self.grids.get(qty[0].upper())
if grids is None:
# The phi-grid has three letters!
grids = self.grids.get(qty.upper())
if grids is not None:
return
|
hstack((grids[:, 0], grids[-1, 1]))
|
numpy.hstack
|
from rendering import SimpleImageViewer
import numpy as np
from numpy.random import RandomState
from skimage.transform import resize
import time
def neighbour_count(grid):
count = np.zeros_like(grid).astype(np.uint8)
count[1:] += grid[:-1]
count[:-1] += grid[1:]
count[:, 1:] += grid[:, :-1]
count[:, :-1] += grid[:, 1:]
# diagonal neighbours
count[1:, 1:] += grid[:-1, :-1]
count[1:, :-1] += grid[:-1, 1:]
count[:-1, 1:] += grid[1:, :-1]
count[:-1, :-1] += grid[1:, 1:]
return count
class CellularAutomata:
def __init__(self, grid_size):
self.grid_size = grid_size
self.grid = np.zeros((grid_size, grid_size)).astype(np.bool)
self.background = np.ones((grid_size, grid_size, 3), dtype=np.uint8)*255
ints = np.random.randint(0, 244, grid_size**2*3).astype(np.uint8)
repeating = np.random.randint(0, 244, 30)
repeats = int(grid_size/10*grid_size)
repeating = np.tile(repeating, (1, repeats)).reshape(grid_size, grid_size, 3)
self.colors = ints.reshape(grid_size, grid_size, 3)
self.colors = repeating
self.viewer = None
def random_init(self, seed=None):
if seed is None:
seed = np.random.randint(int(1e6))
np_random = RandomState(seed)
self.grid = np_random.choice([0,1], self.grid_size**2).reshape(self.grid_size, self.grid_size).astype(np.bool)
def random_init_middle(self, side_offset, seed=None):
if seed is None:
seed = np.random.randint(int(1e6))
np_random =
|
RandomState(seed)
|
numpy.random.RandomState
|
import itertools
import numpy as np
import pandas as pd
import random
import sys
import utils
__author__ = "<NAME>"
__version__ = "CS224u, Stanford, Spring 2021"
class GloVe:
def __init__(self,
n=100,
xmax=100,
alpha=0.75,
max_iter=100,
eta=0.05,
tol=1e-5,
display_progress=True):
"""
Basic GloVe. This is mainly here as a reference implementation.
We recommend using `torch_glove.py` instead.
Parameters
----------
df : pd.DataFrame or np.array
This must be a square matrix.
n : int (default: 100)
The dimensionality of the output vectors.
xmax : int (default: 100)
Words with frequency greater than this are given weight 1.0.
Words with frequency under this are given weight (c/xmax)**alpha
where c is their count in mat (see the paper, eq. (9)).
alpha : float (default: 0.75)
Exponent in the weighting function (see the paper, eq. (9)).
max_iter : int (default: 100)
Number of training epochs.
eta : float (default: 0.05)
Controls the rate of SGD weight updates.
tol : float (default: 1e-4)
Stopping criterion for the loss.
display_progress : bool (default: True)
Whether to print iteration number and current error to stdout.
"""
self.n = n
self.xmax = xmax
self.alpha = alpha
self.max_iter = max_iter
self.eta = eta
self.tol = tol
self.display_progress = display_progress
def fit(self, df):
"""
Learn the GloVe matrix.
Parameters
----------
df : pd.DataFrame or np.array, shape `(n_vocab, n_vocab)`
This should be a matrix of (possibly scaled) co-occcurrence
counts.
Returns
-------
pd.DataFrame or np.array, shape `(n_vocab, self.n)`
The type will be the same as the user's `df`. If it's a
`pd.DataFrame`, the index will be the same as `df.index`.
"""
X = self.convert_input_to_array(df)
m = X.shape[0]
# Parameters:
W = utils.randmatrix(m, self.n) # Word weights.
C = utils.randmatrix(m, self.n) # Context weights.
B = utils.randmatrix(2, m) # Word and context biases.
# Precomputable GloVe values:
X_log = utils.log_of_array_ignoring_zeros(X)
X_weights = (np.minimum(X, self.xmax) / self.xmax)**self.alpha # eq. (9)
# Learning:
indices = list(range(m))
for iteration in range(self.max_iter):
epoch_error = 0.0
random.shuffle(indices)
for i, j in itertools.product(indices, indices):
if X[i, j] > 0.0:
weight = X_weights[i,j]
# Cost is J' based on eq. (8) in the paper:
diff = W[i].dot(C[j]) + B[0, i] + B[1, j] - X_log[i, j]
fdiff = diff * weight
# Gradients:
wgrad = fdiff * C[j]
cgrad = fdiff * W[i]
wbgrad = fdiff
wcgrad = fdiff
# Updates:
W[i] -= self.eta * wgrad
C[j] -= self.eta * cgrad
B[0, i] -= self.eta * wbgrad
B[1, j] -= self.eta * wcgrad
# One-half squared error term:
epoch_error += 0.5 * weight * (diff**2)
epoch_error /= m
if epoch_error <= self.tol:
utils.progress_bar(
"Converged on iteration {} with error {}".format(
iteration, epoch_error, self.display_progress))
break
utils.progress_bar(
"Finished epoch {} of {}; error is {}".format(
iteration, self.max_iter, epoch_error, self.display_progress))
# Return the sum of the word and context matrices, per the advice
# in section 4.2:
G = W + C
self.embedding = self.convert_output(G, df)
return self.embedding
def score(self, X):
"""
The goal of GloVe is to learn vectors whose dot products are
proportional to the log co-occurrence probability. This score
method assesses that directly using the current `self.embedding`.
Parameters
----------
X : pd.DataFrame or np.array, shape `(self.n_words, self.n_vocab)`
The original count matrix.
Returns
-------
float
The Pearson correlation.
"""
X = self.convert_input_to_array(X)
G = self.convert_input_to_array(self.embedding)
mask = X > 0
M = G.dot(G.T)
X_log = utils.log_of_array_ignoring_zeros(X)
row_log_prob = np.log(X.sum(axis=1))
row_log_prob = np.outer(row_log_prob,
|
np.ones(X.shape[1])
|
numpy.ones
|
from __future__ import absolute_import,division
__filetype__ = "base"
#External Modules
import logging, os, shutil, sys, time, uuid
import numpy as np
from astropy import units as u
from astropy import wcs
from astropy.io import fits
from astropy.table import Table, Column
from copy import deepcopy
from photutils import CircularAperture, aperture_photometry
from photutils.psf.models import GriddedPSFModel
from scipy.ndimage.interpolation import zoom, rotate
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
#Local Modules
from ..utilities import StipsEnvironment
from ..utilities import OffsetPosition
from ..utilities import overlapadd2
from ..utilities import overlapaddparallel
from ..utilities import read_table
from ..utilities import ImageData
from ..utilities import Percenter
from ..utilities import StipsDataTable
from ..utilities import SelectParameter
from ..errors import GetCrProbs, GetCrTemplate, MakeCosmicRay
stips_version = StipsEnvironment.__stips__version__
class AstroImage(object):
"""
The AstroImage class represents a generic astronomical image. The image has the following
data associated with it:
_file : string of file name (including path) containing mem-mapped numpy array.
data : mem-mapped numpy double-precision 2D array of image data, in counts
scale : array of 2 double-precision floating point values, forming X and Y scale in
arcseconds/pixel
wcs : astropy WCS object containing image WCS information.
header : key/value array. Contains FITS header information and metadata
history : array of strings holding the FITS HISTORY section
"""
def __init__(self, **kwargs):
"""
Astronomical image. The __init__ function creates an empty image with all other data values
set to zero.
"""
default = self.INSTRUMENT_DEFAULT
if 'parent' in kwargs:
self.parent = kwargs['parent']
self.logger = self.parent.logger
self.out_path = self.parent.out_path
self.prefix = self.parent.prefix
self.seed = self.parent.seed
self.telescope = self.parent.TELESCOPE.lower()
self.instrument = self.parent.PSF_INSTRUMENT
self.filter = self.parent.filter
self.oversample = self.parent.oversample
self.shape = np.array(self.parent.DETECTOR_SIZE)*self.oversample
self._scale = np.array(self.parent.SCALE)/self.oversample
self.zeropoint = self.parent.zeropoint
self.photflam = self.parent.photflam
self.photplam = self.parent.PHOTPLAM[self.filter]
background = self.parent.background
self.psf_grid_size = self.parent.psf_grid_size
self.psf_commands = self.parent.psf_commands
small_subarray = self.parent.small_subarray
self.cat_type = self.parent.cat_type
self.set_celery = self.parent.set_celery
self.get_celery = self.parent.get_celery
self.convolve_size = self.parent.convolve_size
self.memmap = self.parent.memmap
else:
self.parent = None
if 'logger' in kwargs:
self.logger = kwargs['logger']
else:
self.logger = logging.getLogger('__stips__')
log_level = SelectParameter("log_level")
self.logger.setLevel(getattr(logging, log_level))
if not len(self.logger.handlers):
stream_handler = logging.StreamHandler(sys.stderr)
format = '%(asctime)s %(levelname)s: %(message)s'
stream_handler.setFormatter(logging.Formatter(format))
self.logger.addHandler(stream_handler)
self.out_path = SelectParameter('out_path', kwargs)
self.oversample = SelectParameter('oversample', kwargs)
shape = kwargs.get('shape', default['shape'])
self.shape = np.array(shape) * self.oversample
self._scale = kwargs.get('scale', np.array(default['scale']))
self.prefix = kwargs.get('prefix', '')
self.cat_type = SelectParameter('cat_type', kwargs)
self.set_celery = kwargs.get('set_celery', None)
self.get_celery = kwargs.get('get_celery', None)
self.seed = SelectParameter('seed', kwargs)
small_subarray = kwargs.get('small_subarray', False)
self.zeropoint = kwargs.get('zeropoint', default['zeropoint'])
self.photflam = kwargs.get('photflam', default['photflam'])
self.photplam = kwargs.get('photplam', default['photplam'])
background = SelectParameter('background', kwargs)
self.telescope = kwargs.get('telescope', default['telescope'])
self.instrument = kwargs.get('instrument', default['instrument'])
self.filter = kwargs.get('filter', default['filter'])
self.psf_grid_size = SelectParameter('psf_grid_size', kwargs)
self.psf_commands = kwargs.get('psf_commands', '')
self.convolve_size = SelectParameter('convolve_size', kwargs)
self.memmap = SelectParameter('memmap', kwargs)
if self.get_celery is None:
self.get_celery = lambda: ""
if self.set_celery is None:
self.set_celery = lambda x: None
# documentation says
# - 1 for FITS/Fortran
# - 0 for numpy/C
# although the ultimate output format is FITS, within an AstroImage data is stored
# as a numpy array, so origin=0 is the way to go.
self.wcs_origin = 0
#Set unique ID and figure out where the numpy memmap will be stored
self.name = kwargs.get('detname', default['detector'][self.instrument])
self.detector = self.name
if self.memmap:
fname = self.prefix+"_"+uuid.uuid4().hex+"_"+self.name+".tmp"
self.fname = os.path.join(self.out_path, fname)
if self.psf_commands is None:
self.psf_commands = ''
psf = kwargs.get('psf', True)
if psf:
self.make_psf()
data = kwargs.get('data', None)
if data is not None:
base_shape = np.array(data.shape)
else:
#restrict data size to PSF size
if small_subarray:
if not hasattr(self, 'psf'):
msg = "{}: Unable to set image size to PSF size when image "
msg += "has no valid PSF."
raise ValueError(msg.format(self.name))
base_shape = self.psf_shape
else:
base_shape = self.shape
self._init_dat(base_shape, self.psf_shape, data)
#Get WCS values if present, or set up a default
self.wcs = self._getWcs(**kwargs)
self._prepRaDec()
#Header
self.header = kwargs.get('header', kwargs.get('imh', {}))
self._prepHeader()
if 'exptime' in self.header:
self.exptime = self.header['exptime']
else:
self.exptime = kwargs.get('exptime', 1.)
self.updateHeader('exptime', self.exptime)
#History
self.history = kwargs.get('history', [])
#Special values for Sersic profile generation
self.profile_multiplier = kwargs.get('profile_multiplier', 100.)
self.noise_floor = max(background, kwargs.get('noise_floor', 1.))
def __del__(self):
if hasattr(self, 'fname') and os.path.exists(self.fname):
os.remove(self.fname)
def copy(self):
other = AstroImage(out_path=self.out_path, detname=self.name, wcs=self.wcs, header=self.header, history=self.history,
xsize=self.xsize, ysize=self.ysize, zeropoint=self.zeropoint, photflam=self.photflam,
logger=self.logger)
try:
if os.path.exists(other.fname):
os.remove(other.fname)
shutil.copy(self.fname, other.fname)
except Exception as e:
if os.path.exists(other.fname):
remove(other.fname)
raise e
return other
@classmethod
def initFromFits(cls, file, **kwargs):
"""
Takes the entire image from the FITS file (i.e. the FITS header overrides everything else).
Looks for optional ext keyword to indicate which FITS extension to use.
"""
img = cls(**kwargs)
if file != '':
try:
with fits.open(file) as inf:
ext = kwargs.get('ext', 0)
dat = inf[ext].data
img._init_dat(base_shape=np.array(dat.shape), data=dat)
my_wcs = wcs.WCS(inf[ext].header)
for k,v in inf[ext].header.items():
if k != '' and k not in my_wcs.wcs.to_header():
img.header[k] = v
img.wcs = img._normalizeWCS(my_wcs)
img._prepRaDec()
img._prepHeader()
img.updateHeader("ASTROIMAGEVALID", True)
img.addHistory("Created from FITS file {}".format(os.path.split(file)[1]))
img._log("info","Created AstroImage {} from FITS file {}".format(img.name,os.path.split(file)[1]))
except IOError as e:
img.updateHeader("ASTROIMAGEVALID", False)
img.addHistory("Attempted to create from invalid FITS file {}".format(os.path.split(file)[1]))
img._log("warning","Attempted to create AstroImage {} from invalid FITS file {}".format(img.name,os.path.split(file)[1]))
img._log("warning","Error is {}".format(e))
return img
@classmethod
def initDataFromFits(cls, file, **kwargs):
"""Takes *only* data from the FITS, and everything else from kwargs (or sets to default)"""
img = cls(**kwargs)
if file != '':
try:
with fits.open(file) as inf:
ext = kwargs.get('ext', 0)
dat = inf[ext].data
img._init_dat(base_shape=np.array(dat.shape), data=dat)
img.wcs = img._getWcs(**kwargs)
img._prepRaDec()
img._prepHeader()
img.updateHeader("ASTROIMAGEVALID", True)
img.addHistory("Data imported from FITS file {}".format(os.path.split(file)[1]))
img._log("info","Created AstroImage {} and imported data from FITS file {}".format(img.name,os.path.split(file)[1]))
except IOError as e:
img.updateHeader("ASTROIMAGEVALID", False)
img.addHistory("Attempted to create from invalid FITS file {}".format(os.path.split(file)[1]))
img._log("warning","Attempted to create AstroImage {} from invalid FITS file {}".format(img.name,os.path.split(file)[1]))
return img
@classmethod
def initFromArray(cls, array, **kwargs):
"""Convenience to initialize an image from a numpy array."""
img = cls(data=array, **kwargs)
img._log("info", "Creating AstroImage {} from array".format(img.name))
return img
@classmethod
def initFromPoints(cls, xs, ys, rates, **kwargs):
"""Convenience to initialize a blank image and add a set of points to it"""
img = cls(**kwargs)
img._log("info", "Creating AstroImage {} from points".format(img.name))
if img.xsize < np.ceil(np.max(xs)) + 1:
img.xsize = np.ceil(np.max(xs)) + 1
if img.ysize < np.ceil(np.max(ys)) + 1:
img.ysize = np.ceil(np.max(ys)) + 1
img.addPoints(xs, ys, rates)
return img
@classmethod
def initFromProfile(cls,posX,posY,flux,n,re,phi,axialRatio,**kwargs):
"""Convenience to initialize a blank image and add a sersic profile to it"""
img = cls(**kwargs)
img._log("info","Creating AstroImage {} from Sersic Profile".format(img.name))
img.addSersicProfile(posX, posY, flux, n, re, phi, axialRatio)
return img
@property
def xsize(self):
return self.shape[1]
@xsize.setter
def xsize(self, size, offset=0):
"""Change the horizontal size. The offset will be applied to the new image before adding"""
self.crop(size, self.ysize, offset, 0)
self.shape = np.array((self.ysize, size))
@property
def ysize(self):
return self.shape[0]
@ysize.setter
def ysize(self, size, offset=0):
"""Change the vertical size. The offset will be applied to the new image before adding"""
self.crop(self.xsize, size, 0, offset)
self.shape = np.array((size, self.xsize))
@property
def xscale(self):
return abs(self.scale[0])*3600.
@property
def yscale(self):
return abs(self.scale[1])*3600.
@property
def scale(self):
return self._scale
@property
def rascale(self):
return abs(self.scale[self.ranum])*3600.
@property
def decscale(self):
return abs(self.scale[self.decnum])
@property
def distorted(self):
return self.wcs.sip is not None
@property
def ra(self):
if self.wcs.wcs.lngtyp == 'RA':
return self.wcs.wcs.crval[self.wcs.wcs.lng]
elif self.wcs.wcs.lattyp == 'RA':
return self.wcs.wcs.crval[self.wcs.wcs.lat]
else:
raise ValueError("WCS has longitude {} and latitude {}. Can't get RA".format(self.wcs.wcs.lngtyp,self.wcs.wcs.lattyp))
@ra.setter
def ra(self,ra):
if self.wcs.wcs.lngtyp == 'RA':
self.wcs.wcs.crval[self.wcs.wcs.lng] = ra%360.
self.addHistory("Set RA to {}".format(ra))
elif self.wcs.wcs.lattyp == 'RA':
self.wcs.wcs.crval[self.wcs.wcs.lat] = ra%360.
self.addHistory("Set RA to {}".format(ra))
else:
raise ValueError("WCS has longitude {} and latitude {}. Can't set RA".format(self.wcs.wcs.lngtyp,self.wcs.wcs.lattyp))
@property
def dec(self):
if self.wcs.wcs.lngtyp == 'DEC':
return self.wcs.wcs.crval[self.wcs.wcs.lng]
elif self.wcs.wcs.lattyp == 'DEC':
return self.wcs.wcs.crval[self.wcs.wcs.lat]
else:
raise ValueError("WCS has longitude {} and latitude {}. Can't get DEC".format(self.wcs.wcs.lngtyp,self.wcs.wcs.lattyp))
@dec.setter
def dec(self,dec):
if self.wcs.wcs.lngtyp == 'DEC':
self.wcs.wcs.crval[self.wcs.wcs.lng] = dec
self.addHistory("Set DEC to {}".format(dec))
elif self.wcs.wcs.lattyp == 'DEC':
self.wcs.wcs.crval[self.wcs.wcs.lat] = dec
self.addHistory("Set DEC to {}".format(dec))
else:
raise ValueError("WCS has longitude {} and latitude {}. Can't set DEC".format(self.wcs.wcs.lngtyp,self.wcs.wcs.lattyp))
@property
def pa(self):
"""WCS is normalized, so PA from angle will work"""
return self._getPA(self.wcs,self.scale,self.decnum)
@pa.setter
def pa(self,pa):
cpa = np.cos(np.radians(pa%360.))
spa = np.sin(np.radians(pa%360.))
self.wcs.wcs.pc = np.array([[cpa, -spa], [spa, cpa]])
self.addHistory("Set PA to {}".format(pa))
@property
def celery_state(self):
if self.get_celery is not None:
return self.get_celery()
return ""
@celery_state.setter
def celery_state(self, state):
if self.set_celery is not None:
self.set_celery(state)
@property
def hdu(self):
"""Output AstroImage as a FITS Primary HDU"""
with ImageData(self.fname, self.shape, mode='r+', memmap=self.memmap) as dat:
hdu = fits.PrimaryHDU(dat, header=self.wcs.to_header(relax=True))
if 'CDELT1' not in hdu.header:
hdu.header['CDELT1'] = self.scale[0]/3600.
hdu.header['CDELT2'] = self.scale[0]/3600.
# Apparently astropy refuses to add the identity matrix to a header
if ('PC1_1' not in hdu.header) and ('CD1_1' not in hdu.header):
hdu.header['PC1_1'] = 1.
hdu.header['PC1_2'] = 0.
hdu.header['PC2_1'] = 0.
hdu.header['PC2_2'] = 1.
if sys.version_info[0] >= 3:
for k,v in self.header.items():
if k != "ASTROIMAGEVALID":
hdu.header[k] = v
else:
for k,v in self.header.iteritems():
if k != "ASTROIMAGEVALID":
hdu.header[k] = v
for item in self.history:
hdu.header.add_history(item)
self._log("info","Created Primary HDU from AstroImage {}".format(self.name))
return hdu
@property
def imageHdu(self):
"""Output AstroImage as a FITS Extension HDU"""
self._log("info","Creating Extension HDU from AstroImage {}".format(self.name))
with ImageData(self.fname, self.shape, mode='r+', memmap=self.memmap) as dat:
hdu = fits.ImageHDU(dat, header=self.wcs.to_header(relax=True), name=self.name)
if 'CDELT1' not in hdu.header:
hdu.header['CDELT1'] = self.scale[0]/3600.
hdu.header['CDELT2'] = self.scale[0]/3600.
# Apparently astropy refuses to add the identity matrix to a header
if ('PA1_1' not in hdu.header) and ('CD1_1' not in hdu.header):
hdu.header['PA1_1'] = 1.
hdu.header['PA1_2'] = 0.
hdu.header['PA2_1'] = 0.
hdu.header['PA2_2'] = 1.
if sys.version_info[0] >= 3:
for k,v in self.header.items():
hdu.header[k] = v
else:
for k,v in self.header.iteritems():
hdu.header[k] = v
for item in self.history:
hdu.header.add_history(item)
self._log("info","Created Extension HDU from AstroImage {}".format(self.name))
return hdu
@property
def psf_constructor(self):
import webbpsf
#**WFIRST_REMNANT**
if not hasattr(webbpsf, self.telescope.lower()) and self.telescope.lower() == 'roman':
return getattr(getattr(webbpsf, 'wfirst'), self.instrument)()
if hasattr(webbpsf, self.instrument):
return getattr(webbpsf, self.instrument)()
return getattr(getattr(webbpsf, self.telescope), self.instrument)()
@property
def psf_shape(self):
if hasattr(self, 'psf'):
sampled_shape = self.psf.data.shape
return np.array([sampled_shape[1], sampled_shape[2]])
return (0, 0)
def toFits(self, outFile):
"""Create a FITS file from the current state of the AstroImage data."""
self._log("info","Writing AstroImage {} to FITS".format(self.name))
hdulist = fits.HDUList([self.hdu])
hdulist.writeto(outFile, overwrite=True)
def updateHeader(self,k,v):
"""
Updates a single keyword in the header dictionary, replacing the current value if there is
one, otherwise adding a new value
"""
self.header[k] = v
def addHistory(self,v):
"""Adds an entry to the header history list."""
self.history.append(v)
def addTable(self, t, dist=False, *args, **kwargs):
"""
Add a catalogue table to the Image. The Table must have the following columns:
RA: RA of source
DEC: DEC of source
FLUX: flux of source
TYPE: type of source (point, sersic)
N: sersic index
Re: radius containing half of the light of the sersic profile
Phi: angle of the major axis of the sersic profile
Ratio: axial ratio of the Sersic profile
ID: id of source in catalogue
Notes: any notes of important
The following will then be done:
- the table will be shifted from RA,DEC to X,Y (and items outside the FOV will be omitted)
- the table will be split into point sources and sersic profiles
- the point sources will be added via addPoints
- the Sersic Profiles will be iteratively added via addSersicProfile
The converted table (with X,Y instead of ra,dec and non-visible points removed) will be returned.
"""
ras = t['ra']
decs = t['dec']
self._log("info", "Determining pixel co-ordinates")
if dist and self.distorted:
xs, ys = self.wcs.all_world2pix(t['ra'], t['dec'], self.wcs_origin,
quiet=True, adaptive=True,
detect_divergence=True)
else:
xs, ys = self.wcs.wcs_world2pix(t['ra'], t['dec'], self.wcs_origin)
to_keep = np.where((xs >= 0) & (xs <= self.xsize) & (ys >= 0) & (ys <= self.ysize))
self._log("info", "Keeping {} items".format(len(xs[to_keep])))
ot = None
base_state = self.getState()
min_scale = min(self.xscale, self.yscale)
if len(xs[to_keep]) > 0:
xs = xs[to_keep]
ys = ys[to_keep]
xfs, yfs = self._remap(xs, ys)
fluxes = t['flux'][to_keep]
fluxes_observed = np.empty_like(fluxes)
types = t['type'][to_keep]
ns = t['n'][to_keep]
res = t['re'][to_keep]
phis = t['phi'][to_keep]
ratios = t['ratio'][to_keep]
ids = t['id'][to_keep]
old_notes = t['notes'][to_keep]
notes = np.empty_like(xs, dtype="S150")
notes[:] = old_notes[:]
vegamags = -2.512 * np.log10(fluxes) - self.zeropoint
stmags = -2.5 * np.log10(fluxes * self.photflam) - 21.10
stars_idx = np.where(types == 'point')
if len(xs[stars_idx]) > 0:
self.updateState(base_state + "<br /><span class='indented'>Adding {} stars</span>".format(len(xs[stars_idx])))
self._log("info", "Writing {} stars".format(len(xs[stars_idx])))
self.addPoints(xs[stars_idx], ys[stars_idx], fluxes[stars_idx], *args, **kwargs)
fluxes_observed[stars_idx] = fluxes[stars_idx]
gals_idx = np.where(types == 'sersic')
if len(xs[gals_idx]) > 0:
self._log("info","Writing {} galaxies".format(len(xs[gals_idx])))
gxs = xs[gals_idx]
gys = ys[gals_idx]
gfluxes = fluxes[gals_idx]
gtypes = types[gals_idx]
gns = ns[gals_idx]
gres = res[gals_idx]
gphis = phis[gals_idx]
gratios = ratios[gals_idx]
gids = ids[gals_idx]
counter = 1
total = len(gxs)
gfluxes_observed = np.empty_like(gxs, dtype='float32')
gnotes = np.empty_like(gxs, dtype='S150')
self._log('info', 'Starting Sersic Profiles at {}'.format(time.ctime()))
for (x, y, flux, n, re, phi, ratio, id) in zip(gxs, gys, gfluxes, gns, gres, gphis, gratios, gids):
item_index = np.where(ids==id)[0][0]
self._log("info", "Index is {}".format(item_index))
self.updateState(base_state + "<br /><span class='indented'>Adding galaxy {} of {}</span>".format(counter, len(xs[gals_idx])))
central_flux = self.addSersicProfile(x, y, flux, n, re, phi, ratio, *args, **kwargs)
fluxes_observed[item_index] = central_flux
notes[item_index] = "{}: surface brightness {:.3f} yielded flux {:.3f}".format(notes[item_index], flux, central_flux)
self._log("info", "Finished Galaxy {} of {}".format(counter, total))
counter += 1
self._log('info', 'Finishing Sersic Profiles at {}'.format(time.ctime()))
ot = Table()
ot['x'] = Column(data=xfs, unit='pixel')
ot['y'] = Column(data=yfs, unit='pixel')
ot['type'] = Column(data=types)
ot['vegamag'] = Column(data=vegamags)
ot['stmag'] = Column(data=stmags)
ot['countrate'] = Column(data=fluxes_observed, unit=(u.photon/u.second))
ot['id'] = Column(data=ids)
ot['notes'] = Column(data=notes)
return ot
def addCatalogue(self, cat, dist=False, *args, **kwargs):
"""
Add a catalogue to the Image. The Catalogue must have the following columns:
RA: RA of source
DEC: DEC of source
FLUX: flux of source
TYPE: type of source (point, sersic)
N: sersic index
Re: radius containing half of the light of the sersic profile
Phi: angle of the major axis of the sersic profile
Ratio: axial ratio of the Sersic profile
ID: id of source in catalogue
Notes: any notes of important
The following will then be done:
- the catalogue will be shifted from RA,DEC to X,Y (and items outside the FOV will be
omitted)
- the catalogue will be split into point sources and sersic profiles
- the point sources will be added via addPoints
- the Sersic Profiles will be iteratively added via addSersicProfile
"""
(path, catname) = os.path.split(cat)
(catbase, catext) = os.path.splitext(catname)
self._log("info","Adding catalogue {} to AstroImage {}".format(catname, self.name))
obs_file_name = "{}_observed_{}.{}".format(catbase, self.name, self.cat_type)
obsname = os.path.join(self.out_path, obs_file_name)
self.addHistory("Adding items from catalogue {}".format(catname))
data = None
in_data = StipsDataTable.dataTableFromFile(cat)
out_data = StipsDataTable.dataTableFromFile(obsname)
out_data.meta = {'name': 'Observed Catalogue', 'type': 'observed', 'detector': self.name, 'input_catalogue': catname}
base_state = self.getState()
counter = 0
current_chunk = in_data.read_chunk()
while current_chunk is not None:
table_length = len(current_chunk['id'])
self.updateState(base_state + "<br /><span class='indented'>Adding sources {} to {}</span>".format(counter, counter+table_length))
out_chunk = self.addTable(current_chunk, dist, *args, **kwargs)
if out_chunk is not None:
out_data.write_chunk(out_chunk)
counter += table_length
current_chunk = in_data.read_chunk()
self.updateState(base_state)
self._log("info","Added catalogue {} to AstroImage {}".format(catname, self.name))
return obsname
def addPoints(self, xs, ys, rates, *args, **kwargs):
"""Adds a set of point sources to the image given their co-ordinates and count rates."""
self.addHistory("Adding {} point sources".format(len(xs)))
self._log("info","Adding {} point sources to AstroImage {}".format(len(xs),self.name))
# This acts essentially the same as rounding, but is substantially faster in
# most cases.
xs = np.floor(np.array(xs)+0.5).astype(int)
ys = np.floor(np.array(ys)+0.5).astype(int)
with ImageData(self.fname, self.shape, memmap=self.memmap) as dat:
dat[ys, xs] += rates
def addSersicProfile(self, posX, posY, flux, n, re, phi, axialRatio, *args, **kwargs):
"""
Adds a single sersic profile to the image given its co-ordinates, count rate, and source
type.
(posX,posY) are the co-ordinates of the centre of the profile (pixels).
flux is the total number of counts to add to the AstroImage.
n is the Sersic profile index.
re is the radius enclosing half of the total light (pixels).
phi is the angle of the major axis (degrees east of north).
axialRatio is the ratio of major axis to minor axis.
"""
if flux == 0.:
return 0.
self.addHistory("Adding Sersic profile at ({},{}) with flux {}, index {}, Re {}, Phi {}, and axial ratio {}".format(posX,posY,flux,n,re,phi,axialRatio))
self._log("info","Adding Sersic: re={}, n={}, flux={}, phi={:.1f}, ratio={}".format(re, n, flux, phi, axialRatio))
# Determine necessary parameters for the Sersic model -- the input radius, surface
# brightness and noise floor are all in *detector* pixels.
pixel_radius = re * self.oversample
pixel_brightness = flux / (self.oversample*self.oversample)
noise_floor = self.noise_floor / (self.oversample*self.oversample)
# Determine the pixel offset of the profile from the centre of the AstroImage
# Centre of profile is (xc, yc)
# Centre of image is (self.xsize//2, self.ysize//2)
# Centre of profile on image is (posX, posY)
# Let's say we have a 20X20 profile, centre is 10,10
# Let's say our image is 1024X1024, centre is 512,512
# Let's say posX=39, posY=27
# So we want to go from 512,512 to 39,27, so offset is -473,-485
# Offset is posX-image_centre, posY-image_centre
offset_x, offset_y = np.floor(posX) - self.xsize//2, np.floor(posY) - self.ysize//2
fractional_x, fractional_y = posX - np.floor(posX), posY - np.floor(posY)
from astropy.modeling.models import Sersic2D
# Figure out an appropriate radius. Start at 5X pixel radius, and continue until the highest value on the outer edge is below the noise floor.
max_outer_value = 2*noise_floor
filled = False
radius_multiplier = 2.5
full_frame = False
model_size = int(np.ceil(pixel_radius*radius_multiplier))
while max_outer_value > noise_floor:
radius_multiplier *= 2
model_size = int(np.ceil(pixel_radius*radius_multiplier))
if not self._filled(offset_x, offset_y, model_size, model_size):
# self._log("info", "Creating a {}x{} array for the Sersic model at ({},{})".format(model_size, model_size, posX, posY))
x, y, = np.meshgrid(np.arange(model_size), np.arange(model_size))
# In order to get fractional flux per pixel correct, carry the non-integer portion of the model centre through.
xc, yc = model_size//2 + fractional_x, model_size//2 + fractional_y
mod = Sersic2D(amplitude=pixel_brightness, r_eff=pixel_radius, n=n, x_0=xc, y_0=yc, ellip=(1.-axialRatio), theta=(np.radians(phi) + 0.5*np.pi))
img = mod(x, y)
max_outer_value = max(np.max(img[0,:]), np.max(img[-1,:]), np.max(img[:,0]), np.max(img[:,-1]))
# self._log('info', "Max outer value is {}, noise floor is {}".format(max_outer_value, noise_floor))
else:
full_frame = True
# self._log("info", "Creating full-frame Sersic model at ({},{})".format(posX, posY))
x, y = np.meshgrid(np.arange(self.ysize), np.arange(self.xsize))
xc, yc = posX, posY
mod = Sersic2D(amplitude=pixel_brightness, r_eff=pixel_radius, n=n, x_0=xc, y_0=yc, ellip=(1.-axialRatio), theta=(np.radians(phi) + 0.5*np.pi))
img = mod(x, y)
max_outer_value = 0.
img = np.where(img >= noise_floor, img, 0.)
aperture = CircularAperture((xc, yc), pixel_radius)
flux_table = aperture_photometry(img, aperture)
central_flux = flux_table['aperture_sum'][0]
self._log("info", "Sersic profile has final size {}x{}, maximum value {}, sum {}".format(model_size, model_size,
|
np.max(img)
|
numpy.max
|
from cmmab import *
import argparse
import pandas as pd
import numpy as np
import scipy.special
import scipy.stats as sp
import plotly.graph_objs as go
def make_rgb_transparent(rgb, bg_rgb, alpha):
'''Returns an RGB vector of values with given transparency level and background.
This function is used for generating colors that are transparent with the background.
It has a similar functionality compared to alpha option in other libraries. The only
difference is that it returns the rgb values of the transparent color.
Args:
rgb: The list rgb values for the original color(s).
bg_rgb: The list of rgb values for the background(s).
alpha: A number between 0 and 1 indicating the transparency level.
Returns:
rgb values for the transparent (mixed) colors.
'''
return [alpha * c1 + (1 - alpha) * c2 for (c1, c2) in zip(rgb, bg_rgb)]
def draw_uniform(num_arms, dimension, norm=2):
"""Draws samples from unit ball specified by norm.
This function draws arm parameters for any given dimension and the choice of norm being
l-2 or l-infinity.
Args:
num_arms: Number of arms needed.
dimension: Dimension of arms.
norm: An l-2 or l-infinity norm to be used.
Return:
means: The vector of arm parameters drawn randomly from unit l-2 or l-infinity ball.
"""
if norm == 'inf':
means = 2 * np.random.rand(dimension, num_arms) - 1
else:
means = np.zeros((dimension, num_arms))
ct = 0
while ct < num_arms:
h = 2 * np.random.rand(dimension)-1
if np.linalg.norm(h, norm) <= 1:
means[:, ct] = h
ct += 1
return means
def run_montecarlo_contextual(nsim, T_vals, k_vals, d_vals, covariates, labels,
true_labels, sigma, run_subs, normalize_mean_var,
save_results):
'''Implements monte carlo simulations for comparing regret of algorithms.
This function generates monte carlo instances that are used for comparing the regret
of the algorithms discussed in the paper and returns regret and number of pulls of
arms. The function has the capability of simulating for several values of time horizon
and number of arms. Please see the note on the shape of returns.
Args:
nsim: Number of simulations, i.e., monte carlo instances.
T_vals: The list of values for time horizon.
k_vals: The list of values for number of arms.
d_vals: The list of values for dimensions.
covariates: The matrix of covariates.
labels: The vector of labels.
true_labels: Whether to use true labels or use the contexts to generate a linear model
for labels (outcomes).
sigma: Standard deviation of noise (only when true_labels is 0).
run_subs: Whether to run algorithms that include subsampling step.
normalize_mean_var: Whether to adjust covariates by centralizing using mean and
normalizing using standard deviation.
save_results: A boolean indicating whether to save the regret and number of pulls
of various algorithms as .npy files.
Returns:
all_regret: A list of final (total) regret of algorithms. Each entry of the list is a
numpy array of size nsim * number of different settings (specified by the length of
T_vals and k_vals).
'''
configs = len(T_vals)
all_regret_greedy =
|
np.zeros((nsim, configs))
|
numpy.zeros
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import copy
def one_hot_encode_along_channel_axis(sequence, onehot_axis=1):
to_return = np.zeros((len(sequence),4), dtype=np.int8)
seq_to_one_hot_fill_in_array(zeros_array=to_return,
sequence=sequence, one_hot_axis=onehot_axis)
return to_return
def seq_to_one_hot_fill_in_array(zeros_array, sequence, one_hot_axis):
assert one_hot_axis==0 or one_hot_axis==1
if (one_hot_axis==0):
assert zeros_array.shape[1] == len(sequence)
elif (one_hot_axis==1):
assert zeros_array.shape[0] == len(sequence)
#will mutate zeros_array
for (i,char) in enumerate(sequence):
if (char=="A" or char=="a"):
char_idx = 0
elif (char=="C" or char=="c"):
char_idx = 1
elif (char=="G" or char=="g"):
char_idx = 2
elif (char=="T" or char=="t"):
char_idx = 3
elif (char=="N" or char=="n"):
continue #leave that pos as all 0's
else:
raise RuntimeError("Unsupported character: "+str(char))
if (one_hot_axis==0):
zeros_array[char_idx,i] = 1
elif (one_hot_axis==1):
zeros_array[i,char_idx] = 1
def enum(**enums):
class Enum(object):
pass
to_return = Enum
for key, val in enums.items():
if hasattr(val, '__call__'):
setattr(to_return, key, staticmethod(val))
else:
setattr(to_return, key, val)
to_return.vals = [x for x in enums.values()]
to_return.the_dict = enums
return to_return
def seq_from_onehot(onehot_data):
sequences = []
if len(onehot_data.shape) != 3:
onehot_data = onehot_data[np.newaxis, :]
for i in range(onehot_data.shape[0]):
onehot_seq = onehot_data[i, :, :]
sequence = ''
if onehot_seq.shape[0] < onehot_seq.shape[1]:
onehot_seq = np.swapaxes(onehot_seq, 0, 1)
for j in range(onehot_seq.shape[0]):
if(onehot_seq[j, 0]==1):
sequence = sequence + "A"
elif (onehot_seq[j, 1]==1):
sequence = sequence + "C"
elif (onehot_seq[j, 2]==1):
sequence = sequence + "G"
elif (onehot_seq[j, 3]==1):
sequence = sequence + "T"
sequences.append(sequence)
return sequences
def reverse_complement_seq(seq):
table = str.maketrans("ACTG", "TGAC")
return seq.translate(table)[::-1]
def reverse_complement_onehot(onehot, window_size):
dim = onehot.shape
axis_nt = dim.index(4)
axis_base = dim.index(window_size)
onehot_rc = np.flip(onehot, axis=axis_nt)
onehot_rc = np.flip(onehot_rc, axis=axis_base)
return onehot_rc
def shift_onehot(onehot_data, shift_amount, pad_value=0.0):
"""Shift a sequence left or right by shift_amount.
Args:
seq: a [batch_size, sequence_length, sequence_depth] sequence to shift
shift_amount: the signed amount to shift (tf.int32 or int)
pad_value: value to fill the padding (primitive or scalar tf.Tensor)
"""
flag_swap = False
if len(onehot_data.shape) != 3:
onehot_data = onehot_data[np.newaxis, :]
if onehot_data.shape[2] > onehot_data.shape[1]:
onehot_data = np.swapaxes(onehot_data,1,2)
flag_swap = True
input_shape = onehot_data.shape
pad = pad_value * np.ones(onehot_data[:, 0:np.abs(shift_amount), :].shape)
def _shift_right(_onehot_data):
sliced_onehot_data = _onehot_data[:, :-shift_amount:, :]
return np.concatenate((pad, sliced_onehot_data), axis=1)
def _shift_left(_onehot_data):
sliced_onehot_data = _onehot_data[:, -shift_amount:, :]
return np.concatenate((sliced_onehot_data, pad), axis=1)
if shift_amount > 0:
output = _shift_right(onehot_data)
else:
output = _shift_left(onehot_data)
output = np.reshape(output, input_shape)
if flag_swap:
output = np.swapaxes(output,1,2)
return output
def metric_pearson(obs, pred):
correlations = []
for i in range(len(pred)):
correlations.append(np.corrcoef(pred[i, :], obs[i, :])[0, 1])
return correlations
def metric_mse(obs, pred):
from sklearn.metrics import mean_squared_error
mses = []
for i in range(len(pred)):
mses.append(mean_squared_error(obs[i, :], pred[i, :]))
return mses
def metric_r2_score(obs, pred):
r2 = []
for i in range(len(pred)):
ssres = np.sum(np.square(obs[i, :] - pred[i, :]))
sstot = np.sum(np.square(obs[i, :] - np.mean(obs[i, :])))
r2.append(1 - ssres / sstot)
return r2
def compute_loss(obs, pred, combine_weighting):
correlations = metric_pearson(obs, pred)
mses = metric_mse(obs, pred)
metric_loss = 1 - np.stack(correlations) + combine_weighting*np.stack(mses)
return metric_loss
def minmax_norm(var_in):
max_val = np.amax(var_in)
min_val = np.amin(var_in)
subtracted = var_in - min_val
var_out = subtracted / (max_val - min_val)
return var_out
def minmax_scale(pred, labels):
subtracted = pred - np.min(pred, axis=-1)
max_pred = np.max(subtracted, axis=-1)
min_pred = np.min(subtracted, axis=-1)
max_true = np.max(labels, axis=-1)
min_true = np.min(labels, axis=-1)
scaled = subtracted / (max_pred - min_pred) * (max_true - min_true) + min_true
return scaled
def rounding_generator(data, y, name, batch_size):
import copy
l = len(data)
num_sample = batch_size - l % batch_size
data_out = copy.deepcopy(data)
data_out = np.concatenate((data_out, data_out[0:num_sample,:,:]), axis=0)
y_out = copy.deepcopy(y)
y_out = np.concatenate((y_out, y_out[0:num_sample,:]), axis=0)
name_out = copy.deepcopy(name)
name_out = np.concatenate((name_out, name_out[0:num_sample]), axis=0)
return data_out, y_out, name_out
# Matching the two datasets by concatenating the first samples from the same dataset
def upsample_generator(data1, data2):
l1 = len(data1)
l2 = len(data2)
#extending data 1 to the same size of data 2
sampleRelation = l2 // l1 #l1 must be bigger
if l2 % l1 > 0:
sampleRelation += 1
index_in = list(range(l1))
index_out = np.concatenate([index_in] * sampleRelation, axis=0)
index_out = index_out[:l2]
return index_out
# Use genome relationship, assuming the order is the corresponding
def interpolating_generator(data1, data2):
from scipy.interpolate import interp1d
index_in = np.linspace(0, len(data1), num=len(data1), endpoint=True)
f = interp1d(index_in, index_in)
index_new = np.linspace(0, len(data1), num=len(data2), endpoint=True)
index_out = f(index_new)
index_out = np.rint(index_out).astype(int)
index_out[index_out>=len(data1)] = len(data1) - 1
return index_out
def list_seq_to_fasta(index_seq, seq, motif_name, flag_unique, output_directory, single_filter_txt):
if flag_unique:
output_filename = motif_name + '_seq_unique' + single_filter_txt + '.fasta'
index_seq = list(set(index_seq))
else:
output_filename = motif_name + '_seq' + single_filter_txt + '.fasta'
list_seq = np.asarray(seq)[index_seq]
print(len(index_seq))
with open(output_directory + output_filename, 'w') as f:
for i in range(len(index_seq)):
f.write('>' + str(i) + '\n')
f.write(list_seq[i] + '\n')
def list_seqlet_to_fasta(index_seq, index_start, seq, motif_name, output_directory, single_filter_txt):
output_filename = motif_name + '_seqlet' + single_filter_txt + '.fasta'
list_seq = np.asarray(seq)[index_seq]
print(len(index_seq))
with open(output_directory + output_filename, 'w') as f:
for i in range(len(index_seq)):
max(0, index_start[i])
seqlet = list_seq[i][max(0, index_start[i]):min(250, index_start[i]+19)]
f.write('>' + str(i) + '\n')
f.write(seqlet + '\n')
def mut_seq(mut_dict, onehot_data, loc_txt):
output_onehot_data = copy.deepcopy(onehot_data)
for k in mut_dict.keys():
seq = mut_dict[k]['seq']
if loc_txt == 'mut':
mut_start = mut_dict[k][loc_txt+'_start']
mut_end = mut_dict[k][loc_txt+'_end']
else:
mut_start = mut_dict[k][loc_txt+'_start'][0]
mut_end = mut_dict[k][loc_txt+'_end'][0]
if output_onehot_data.shape[-1] > output_onehot_data.shape[-2]:
output_onehot_data[seq, :, mut_start:mut_end+1] = 0 # Not activation
else:
output_onehot_data[seq, mut_start:mut_end+1, :] = 0
return output_onehot_data
def mut_seq_perbase_pernucleotide(mut_dict, onehot_data, loc_txt):
len_mutation = mut_dict[0]['mut_end'] - mut_dict[0]['mut_start'] #19
output_onehot_data = np.zeros((251, 4, len_mutation, 4))
for k in mut_dict.keys():
seq = mut_dict[k]['seq']
if loc_txt == 'mut':
mut_start = mut_dict[k][loc_txt+'_start']
mut_end = mut_dict[k][loc_txt+'_end']
else:
mut_start = mut_dict[k][loc_txt+'_start'][0]
mut_end = mut_dict[k][loc_txt+'_end'][0]
for i in range(mut_start, mut_end):
for j in range(4):
tmp = copy.deepcopy(onehot_data[seq, :, :]) # 19 by 4
if tmp.shape[-1] == 4:
tmp[i, :] = 0
tmp[i, j] = 1
else: # what happens when both=4?
tmp[:, i] = 0
tmp[j, i] = 1
output_onehot_data[:,:,i-mut_start,j] = tmp
return output_onehot_data
def mut_seq_perbase_opposite(mut_dict, onehot_data, loc_txt, flag_order):
for k in mut_dict.keys():
seq = mut_dict[k]['seq']
if loc_txt == 'ocr':
mut_start = mut_dict[k]['start']
mut_end = mut_dict[k]['end']
elif loc_txt == 'mut':
mut_start = mut_dict[k][loc_txt+'_start']
mut_end = mut_dict[k][loc_txt+'_end']
elif loc_txt == 'resp':
mut_start = mut_dict[k][loc_txt+'_start'][0]
mut_end = mut_dict[k][loc_txt+'_end'][0]
output_onehot_data = np.zeros((251, 4, mut_end - mut_start))
for i in range(mut_start, mut_end):
tmp = copy.deepcopy(onehot_data[seq, :, :])
if tmp.shape[-1] == 4:
if flag_order == 'ATGC':
tmp = tmp[:, [0, 3, 2, 1]]
tmp_original = np.copy(tmp[i, :])
if tmp_original[0] or tmp_original[3]: # AT->C;
tmp[i, :] = 0
tmp[i, 1] = 1
elif tmp_original[1] or tmp_original[2]: # CG->A
tmp[i, :] = 0
tmp[i, 0] = 1
else: # what happens when both=4?
if flag_order == 'ATGC':
tmp = tmp[[0, 3, 2, 1], :]
tmp_original = np.copy(tmp[:, i])
if tmp_original[0] or tmp_original[3]: # AT->C;
tmp[:, i] = 0
tmp[1, i] = 1
elif tmp_original[1] or tmp_original[2]: # CG->A
tmp[:, i] = 0
tmp[0, i] = 1
output_onehot_data[:, :, i] = tmp
return output_onehot_data
def mut_seq_perbase_opposite_hyp(mut_dict, onehot_data, hyp_score, loc_txt, flag_order):
for k in mut_dict.keys():
hyp_score_k = np.stack(hyp_score[str(k)])
seq = mut_dict[k]['seq']
if loc_txt == 'ocr':
mut_start = mut_dict[k]['start']
mut_end = mut_dict[k]['end']
elif loc_txt == 'mut':
mut_start = mut_dict[k][loc_txt+'_start']
mut_end = mut_dict[k][loc_txt+'_end']
elif loc_txt == 'resp':
mut_start = mut_dict[k][loc_txt+'_start'][0]
mut_end = mut_dict[k][loc_txt+'_end'][0]
output_onehot_data = np.zeros((251, 4, mut_end - mut_start))
for i in range(mut_start, mut_end):
tmp = copy.deepcopy(onehot_data[seq, :, :])
tmp_hyp = copy.deepcopy(hyp_score_k[seq, :, :])
if tmp.shape[-1] == 4:
if flag_order == 'ATGC':
tmp = tmp[:, [0, 3, 2, 1]]
tmp[i, :] = 0
tmp[i, np.argmin(tmp_hyp[i, :])] = 1
else: # TODO: path for edge case, that when both direction dim=4
if flag_order == 'ATGC':
tmp = tmp[[0, 3, 2, 1], :]
tmp[:, i] = 0
tmp[np.argmin(tmp_hyp[i, :]), i] = 1
output_onehot_data[:, :, i] = tmp
return output_onehot_data
# Parse meme file into array
def read_meme(file_path=None):
with open(file_path) as fp:
line = fp.readline()
motifs=[]
motif_names=[]
while line:
#determine length of next motif
if line.split(" ")[0]=='MOTIF':
#add motif number to separate array
motif_names.append(line.split(" ")[1])
#get length of motif
line2=fp.readline().split(" ")
motif_length = int(float(line2[5]))
#read in motif
current_motif=np.zeros((19, 4)) # Edited pad shorter ones with 0
for i in range(motif_length):
current_motif[i,:] = fp.readline().split("\t")
motifs.append(current_motif)
line = fp.readline()
motifs = np.stack(motifs)
motif_names = np.stack(motif_names)
return motifs, motif_names
def compute_ic(motifs, motif_names, bckgrnd=None, epsilon=None):
import pandas as pd
#set background frequencies of nucleotides
if not bckgrnd:
bckgrnd = [0.25, 0.25, 0.25, 0.25]
if not epsilon:
epsilon = 1e-11
#compute information content of each motif
info_content = []
position_ic = []
for i in range(motifs.shape[0]):
position_wise_ic = np.subtract(np.sum(np.multiply(motifs[i,:,:],np.log2(motifs[i,:,:] + epsilon)), axis=1),np.sum(np.multiply(bckgrnd,np.log2(bckgrnd))))
position_ic.append(position_wise_ic)
ic = np.sum(position_wise_ic, axis=0)
info_content.append(ic)
info_content = np.stack(info_content)
position_ic = np.stack(position_ic)
#length of motif with high info content
n_info =
|
np.sum(position_ic>0.2, axis=1)
|
numpy.sum
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mi
# import scipy.interpolate as si
import matplotlib.tri as tri
import copy
from os import path
from kivy.uix.floatlayout import FloatLayout
from kivy.graphics import Ellipse, Color, Rectangle, Line, Point
from kivy.metrics import dp, sp
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.core.window import Window
from mpl_texture import InteractiveWorldMapOverlayWidget, InteractivePlotWidget, InteractiveWorldMapWidget
# from mpl_texture import InteractivePlotWidget, InteractiveWorldMapWidget
import hashlib
#from PIL import Image
# Data dictionary: datadict has form {'u':u, 'v':v, 'V':V}
# Station dictionary: statdict has form {<station code>:{'on':<True/False>,'name':<name>,'loc':(x,y,z)}}
__mydebug__ = True
#########
# To read in data to get the
def read_data(v_file_name) :
# Read in Themis-style data, which is simple and compact
data = np.loadtxt(v_file_name,usecols=[5,6,7,8,9,10,3])
baselines =
|
np.loadtxt(v_file_name,usecols=[4],dtype=str)
|
numpy.loadtxt
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy.special
from bokeh.layouts import gridplot
from bokeh.plotting import figure, show, output_file
p1 = figure(title="Normal Distribution (μ=0, σ=0.5)",tools="save",
background_fill_color="#E8DDCB")
mu, sigma = 0, 0.5
measured = np.random.normal(mu, sigma, 1000)
hist, edges = np.histogram(measured, density=True, bins=50)
x = np.linspace(-2, 2, 1000)
pdf = 1/(sigma * np.sqrt(2*np.pi)) * np.exp(-(x-mu)**2 / (2*sigma**2))
cdf = (1+scipy.special.erf((x-mu)/np.sqrt(2*sigma**2)))/2
p1.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649")
p1.line(x, pdf, line_color="#D95B43", line_width=8, alpha=0.7, legend="PDF")
p1.line(x, cdf, line_color="white", line_width=2, alpha=0.7, legend="CDF")
p1.legend.location = "center_right"
p1.legend.background_fill_color = "darkgrey"
p1.xaxis.axis_label = 'x'
p1.yaxis.axis_label = 'Pr(x)'
p2 = figure(title="Log Normal Distribution (μ=0, σ=0.5)", tools="save",
background_fill_color="#E8DDCB")
mu, sigma = 0, 0.5
measured = np.random.lognormal(mu, sigma, 1000)
hist, edges = np.histogram(measured, density=True, bins=50)
x = np.linspace(0.0001, 8.0, 1000)
pdf = 1/(x* sigma * np.sqrt(2*np.pi)) * np.exp(-(np.log(x)-mu)**2 / (2*sigma**2))
cdf = (1+scipy.special.erf((
|
np.log(x)
|
numpy.log
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import cholesky, svdvals
from numba import jit
def main():
width = 32
height = 32
length_scales = np.array([4, 16, 32])
z_in = np.random.normal(size=width * height)
x =
|
np.arange(width)
|
numpy.arange
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 9 20:38:51 2017
@author: <NAME>
"""
import time
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import rasterio
import scipy.ndimage as ndi
from skimage.util import apply_parallel
#%%
import neilpy
#%%
with rasterio.open('../sample_data/sample_dem.tif') as src:
Z = src.read(1)
Zt = src.affine
#%%
with rasterio.open('../neilpy_data/poland_30m.tif') as src:
Z = src.read(1)
Zt = src.transform
#%% 42 mins
then = time.time()
G = get_geomorphon_from_openness(Z,Zt[0],10,1)
now = time.time()
print(now-then)
#%% at 1000, 26 minutes
# Calculate the geomorphons (a numeric code, 1-10)
then = time.time()
cellsize = Zt[0]
lookup_pixels = 50
threshold_angle = 1
def gm_wrap(I):
this_G = get_geomorphon_from_openness(I,cellsize,lookup_pixels,threshold_angle)
return this_G
G = apply_parallel(gm_wrap,Z.copy(),1000,lookup_pixels)
now = time.time()
print(now-then)
#%%
im = Image.fromarray(G,mode='L')
im.putpalette(geomorphon_cmap())
plt.imshow(im)
plt.show()
im.save('../neilpy_data/poland_30m_geomorphons.png')
#%%
write_worldfile(Zt,'../neilpy_data/poland_30m_geomorphons.pgw')
#%% SMRF TESTING
fns = glob.glob(r'C:\Temp\Reference\*.txt')
total_error = np.zeros(len(fns))
for i,fn in enumerate(fns):
df = pd.read_csv(fn,header=None,names=['x','y','z','g'],delimiter='\t')
x,y,z = df.x.values,df.y.values,df.z.values
windows = np.arange(18) + 1
cellsize= 1
slope_threshold = .15
elevation_threshold = .5
elevation_scaler = 1.25
result = neilpy.smrf(x,y,z,windows,cellsize,slope_threshold,elevation_threshold,elevation_scaler)
total_error[i] = 1 - np.sum(result[3] == df.g) / len(df)
print(fn,':',total_error[i])
print('Mean total error',np.mean(total_error))
print('Median total error',np.median(total_error))
#%%
plt.imshow(Zpro)
#%%
values = [f(p[0],p[1])[0][0] for p in zip(row,col)]
#%% progressive_filter
header, df = read_las('DK22_partial.las')
Z,t = create_dem(df.x,df.y,df.z,resolution=5,bin_type='min');
Z = apply_parallel(inpaint_nans_by_springs,Z.copy(),100,10)
#%%
slope_threshold = .15
windows = np.arange(1,10,2)
cellsize = 5
OC = progressive_filter(Z,np.arange(1,10),cellsize=5,slope_threshold=.2)
plt.imshow(is_object_cell)
#%%
a = np.arange(3) # cols, x
b = np.arange(3) + 1 # rows, y
c = np.arange(9).reshape((3,3))
print(c)
g = interpolate.RegularGridInterpolator((a,b),c)
print(g((2,3))) #col/x "2" and row/y "3"
#%%
c,r = ~t * (df.x.values,df.y.values)
f = interpolate.RectBivariateSpline(row_centers,col_centers,Zpro)
values = [f(p[0],p[1])[0][0] for p in zip(r,c)]
#%%
#Z = np.arange(9).reshape((3,3))
def vipmask(Z,cellsize=1):
heights = np.zeros(np.size(Z))
dlist = np.array([np.sqrt(2),1])
for direction in range(4):
dist = dlist[direction % 2]
h0 = ashift(Z,direction) - Z
h1 = ashift(Z,direction+4) - Z
heights += triangle_height(h0.ravel(),h1.ravel(),dist*cellsize)
return heights.reshape(np.shape(Z))
#print(heights)
with rasterio.open('../sample_data/sample_dem.tif') as src:
Z = src.read(1)
Zt = src.affine
V = vipmask(Z)
#%% third go
#h0 = np.array([-1,0,0])
#h1 = np.array([1,1,1])
#x_dist = 1
'''
h0 is the height of the neighbor pixel in one direction, relative to the center
h1 is the height of the pixel on the other size of the center pixel (same dir)
xdist is the real distance between them (as some neighbors are diagnonal)
'''
def triangle_height(h0,h1,x_dist=1):
n = np.shape(h0)
# The area of the triangle is half of the cross product
h0 = np.column_stack((-x_dist*np.ones(n),h0))
h1 = np.column_stack(( x_dist*np.ones(n),h1))
cp = np.abs(np.cross(h0,h1))
# Find the base from the original coords
base = np.sqrt( (2*x_dist)**2 + (h1[:,1]-h0[:,1])**2 )
# Triangle height is the cross product divided by the base
return cp/base
#%% second go
y = np.array([[0,1,2],[2,2,3],[4,4,5]])
z_diff = np.diff(y)
z_diff[:,0] = -z_diff[:,0]
n = np.shape(z_diff)[0]
xdist = 1
a = np.ones(np.shape(z_diff))
b = np.ones(np.shape(z_diff))
a[:,0] = -xdist
b[:,0] = xdist
a[:,1] = z_diff[:,0]
b[:,1] = z_diff[:,1]
cp = np.sqrt(np.abs(np.cross(a,b)))
# Calculate base
base = np.sqrt((2**xdist*np.ones(n))**2 + (z_diff[:,1])**2)
# Calculate height
h = cp/base
print(h)
#%% First go
y = np.array([[0,1,2],[2,2,3],[4,4,5]])
# y = np.random.rand(100,3)
xdist = 1
n = np.shape(y)[0]
# Calculate cross-product
a = np.hstack((-xdist*np.ones((n,1)),np.reshape(y[:,0]-y[:,1],(n,1)),np.zeros((n,1))))
b = np.hstack((xdist*np.ones((n,1)),np.reshape(y[:,2]-y[:,1],(n,1)),np.zeros((n,1))))
cp = np.abs(np.cross(a,b))
print(cp)
#del a,b
cp = np.sqrt(np.sum(cp**2,axis=1))
# Calculate base
base = np.sqrt((2**xdist*np.ones(n))**2 + (y[:,2] - y[:,1])**2)
# Calculate height
h = cp/base
print(h)
#%%
with rasterio.open('../sample_data/sample_dem.tif') as src:
Z = src.read(1)
Zt = src.affine
plt.imshow(Z,cmap='terrain',vmin=-500,vmax=2000)
plt.show()
#%%
G = get_geomorphon_from_openness(Z,cellsize=Zt[0],lookup_pixels=25,threshold_angle=1,enhance=False)
#%%
G2 = get_geomorphon_from_openness(Z,cellsize=Zt[0],lookup_pixels=6,threshold_angle=1,enhance=True)
# repair peaks
G[G==2] = G2[G==2]
# repair ridges
G[G==3] = G2[G==3]
#%%
# Apply a "standard" colormap and display the image
im = Image.fromarray(G,mode='L')
im.putpalette(geomorphon_cmap())
plt.imshow(im)
plt.show()
#%%
with rasterio.open('../sample_data/sample_dem_geomorphons.tif') as src:
G3 = src.read(1)
np.sum(G==G3) / np.size(G3)
#%% Develop for Swiss Shading
# Uh, awesome one!
with rasterio.open('../sample_data/sample_dem.tif') as src:
Z = src.read(1)
Zt = src.affine
cellsize = Zt[0]
"""
color_table = np.zeros((2,2,3),dtype=np.uint8)
color_table[0,0,:] = [110,120,117] # Top Left
color_table[0,1,:] = [242,245,173] # Top Right
color_table[1,0,:] = [128,148,138] # Bottom Left
color_table[1,1,:] = [196,201,168] # Bottom Right
# Top Left, Top Right, Bottom Left, Bottom Right
R = ndi.zoom(np.array([[110,242],[128,196]]).astype(np.uint8),8)
G = ndi.zoom(np.array([[120,245],[148,138]]).astype(np.uint8),8)
B = ndi.zoom(np.array([[117,173],[138,168]]).astype(np.uint8),8)
"""
lut = plt.imread('swiss_shading_lookup_flipped.png')[:,:,:3]
lut = (255*lut).astype(np.uint8)
# Subtract Z_norm from 255 here to invert the colormap
Z_norm = np.round(255 * (Z - np.min(Z)) / (np.max(Z) - np.min(Z))).astype(np.uint8)
H = hillshade(Z,cellsize,return_uint8=True)
RGB = np.zeros((np.shape(Z)[0],np.shape(Z)[1],3))
RGB[:,:,0] = lut[:,:,0][Z_norm.ravel(),H.ravel()].reshape(np.shape(Z))
RGB[:,:,1] = lut[:,:,1][Z_norm.ravel(),H.ravel()].reshape(np.shape(Z))
RGB[:,:,2] = lut[:,:,2][Z_norm.ravel(),H.ravel()].reshape(np.shape(Z))
plt.imshow(RGB)
#%% Develop for Swiss Shading; got it!
# Uh, awesome one!
with rasterio.open('../sample_data/sample_dem.tif') as src:
Z = src.read(1)
Zt = src.affine
cellsize = Zt[0]
"""
color_table = np.zeros((2,2,3),dtype=np.uint8)
color_table[0,0,:] = [110,120,117] # Top Left
color_table[0,1,:] = [242,245,173] # Top Right
color_table[1,0,:] = [128,148,138] # Bottom Left
color_table[1,1,:] = [196,201,168] # Bottom Right
# Top Left, Top Right, Bottom Left, Bottom Right
R = ndi.zoom(np.array([[110,242],[128,196]]).astype(np.uint8),8)
G = ndi.zoom(np.array([[120,245],[148,138]]).astype(np.uint8),8)
B = ndi.zoom(np.array([[117,173],[138,168]]).astype(np.uint8),8)
"""
lut = plt.imread('swiss_shading_lookup.png')[:,:,:3]
lut = (255*lut).astype(np.uint8)
# Subtract 255 here to invert the colormap!
#
Z_norm = np.round(255 * (Z - np.min(Z)) / (np.max(Z) - np.min(Z))).astype(np.uint8)
H = hillshade(Z,cellsize,return_uint8=True)
RGB = np.zeros((np.shape(Z)[0],
|
np.shape(Z)
|
numpy.shape
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
## Config
dataset = "training_setA"
path = "/github/pnet2019/" + dataset + "/"
numpy_load = True
nanfill = False
mm = False
std = False
nan_to_neg = False
# Script name struct for report
script_name = 'get_healthytemp'
## Imports
import numpy as np
import os
# Fix boundary nans (replicate head/tail vals)
def nan_bounds(feats):
nanidx = np.where(np.isnan(feats))[0]
pointer_left = 0
pointer_right = len(feats) - 1
fix_left = pointer_left in nanidx
fix_right = pointer_right in nanidx
while fix_left:
if pointer_left in nanidx:
pointer_left += 1
# print("pointer_left:", pointer_left)
else:
val_left = feats[pointer_left]
feats[:pointer_left] = val_left * np.ones((1, pointer_left), dtype=np.float)
fix_left = False
while fix_right:
if pointer_right in nanidx:
pointer_right -= 1
# print("pointer_right:", pointer_right)
else:
val_right = feats[pointer_right]
feats[pointer_right + 1:] = val_right * np.ones((1, len(feats) - pointer_right - 1), dtype=np.float)
fix_right = False
# nan interpolation
def nan_interpolate(feats):
nanidx = np.where(np.isnan(feats))[0]
nan_remain = len(nanidx)
nanid = 0
while nan_remain > 0:
nanpos = nanidx[nanid]
nanval = feats[nanpos - 1]
nan_remain -= 1
nandim = 1
initpos = nanpos
# Check whether it extends
while nanpos + 1 in nanidx:
nanpos += 1
nanid += 1
nan_remain -= 1
nandim += 1
# Average sides
if np.isfinite(feats[nanpos + 1]):
nanval = 0.5 * (nanval + feats[nanpos + 1])
# Single value average
if nandim == 1:
nanval = 0.5 * (nanval + feats[nanpos + 1])
feats[initpos:initpos + nandim] = nanval * np.ones((1, nandim), dtype=np.double)
nanpos += 1
nanid += 1
## Get sepsis patients
def get_sepsis_patients(sepsis_label, patient):
patient_sep = np.zeros(len(sepsis_label), dtype=np.int)
for i in range(n):
i_pat = np.where(patient == i)[0]
patient_sep[i_pat] = int(np.sum(sepsis_label[i_pat]) > 0) * np.ones(len(i_pat), dtype=np.int)
patient_sep_idx = np.where(patient_sep != 0)[0]
patient_healthy_idx = np.where(patient_sep == 0)[0]
return patient_sep, patient_sep_idx, patient_healthy_idx
## Random seed
np.random.seed(seed=0)
## Create the feature matrix
features = []
patient = []
sepsis_label = []
dataloaded = False
## Read data
if not numpy_load:
## Folder and files
fnames = os.listdir(path)
fnames.sort()
if 'README.md' in fnames:
fnames.remove('README.md')
print('last file: ', fnames[-1])
n = len(fnames)
print(n, ' files present')
## read data
for i in range(n):
input_file = os.path.join(path, fnames[i])
if i == 0:
data, sep_lab, columns = ESNtools.read_challenge_data_label(input_file, return_header=True)
else:
data, sep_lab = ESNtools.read_challenge_data_label(input_file)
features.append(data)
sepsis_label.append(sep_lab)
pat = i * np.ones((sep_lab.shape), dtype=np.int)
patient.append(pat)
feature_matrix = np.concatenate(features)
del (features)
sepsis_label = np.concatenate(sepsis_label)
patient = np.concatenate(patient)
dataloaded = True
else:
npyfilename = "/github/pnet2019/npy/" + dataset + "_patient.npy"
patient = np.load(npyfilename)
npyfilename = "/github/pnet2019/npy/" + dataset + "_Y.npy"
sepsis_label = np.load(npyfilename)
# ADD nanfill tag
if nanfill:
dataset = dataset + "_nanfill"
if mm:
npyfilename = "/github/pnet2019/npy/" + dataset + "_mm.npy"
mm = False
print(npyfilename, '(mm) to be loaded')
else:
npyfilename = "/github/pnet2019/npy/" + dataset + ".npy"
print(npyfilename, '(not mm) to be loaded')
n = len(np.unique(patient))
print(n, ' files present')
dataloaded = True
feature_matrix = np.load(npyfilename)
##Flatten patient
patient = patient.flatten()
## Separate pointers
feature_phys = feature_matrix[:, :-6] ## Physiology
feature_demog = feature_matrix[:, -6:] ## Demographics
## Normalize mm(all) or std (sepsis, phys) vals, feature-based
if mm:
scaler = MinMaxScaler()
for i in range(n):
i_pat =
|
np.where(patient == i)
|
numpy.where
|
#!/usr/bin/env python3
import unittest
from unittest import TestCase
import numpy as np
from dynn.data import batching, dictionary
class TestNumpyBatches(TestCase):
def setUp(self):
self.input_dim = 3
self.output_dim = 2
self.num_labels = 50
self.batch_size = 5
self.data_size = 101
def _dummy_classification_iterator(self, shuffle=True):
# Create dummy data
data = np.random.uniform(size=(self.data_size, self.input_dim))
# Create targets
labels = np.random.randint(self.num_labels, size=self.data_size)
# Iterator
return batching.NumpyBatches(
data, labels, batch_size=self.batch_size, shuffle=shuffle
)
def _dummy_regression_iterator(self):
# Create dummy data
data = np.random.uniform(size=(self.data_size, self.input_dim))
# Create targets
labels = np.random.uniform(size=(self.data_size, self.output_dim))
# Iterator
return batching.NumpyBatches(
data, labels, batch_size=self.batch_size
)
def test_classification(self):
batched_dataset = self._dummy_classification_iterator()
# Try iterating
for x, y in batched_dataset:
self.assertEqual(x.shape[0], (self.input_dim))
self.assertIn(
x.shape[1],
{self.batch_size, self.data_size % self.batch_size}
)
self.assertEqual(len(y.shape), 1)
self.assertEqual(x.shape[1], y.shape[0])
def test_regression(self):
batched_dataset = self._dummy_regression_iterator()
# Try iterating
for x, y in batched_dataset:
self.assertEqual(x.shape[0], self.input_dim)
self.assertIn(
x.shape[1],
{self.batch_size, self.data_size % self.batch_size}
)
self.assertEqual(y.shape[0], self.output_dim)
self.assertEqual(x.shape[1], y.shape[1])
def test_shuffle(self):
batched_dataset = self._dummy_classification_iterator(shuffle=True)
# Record the labels for the first 2 epochs
first_epoch_labels =
|
np.concatenate([y for _, y in batched_dataset])
|
numpy.concatenate
|
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, unicode_literals
import datetime as dt
import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose
from apexpy import fortranapex as fa
from apexpy import Apex, ApexHeightError, helpers
##############################################################################
# NOTE: whenever function outputs are tested against hard-coded numbers, #
# the test results (numbers) were obtained by running the code that is #
# tested. Therefore these tests below only check that nothing changes when #
# refactoring etc., and not if the results are actually correct #
##############################################################################
###============================================================================
### Test initiating the Apex class
###============================================================================
def test_init_defaults():
Apex()
def test_init_date_int():
apex_out = Apex(date=2015)
assert apex_out.year == 2015
def test_init_date_float():
apex_out = Apex(date=2015.5)
assert apex_out.year == 2015.5
def test_init_date():
date = dt.date(2015, 1, 1)
apex_out = Apex(date=date)
assert apex_out.year == helpers.toYearFraction(date)
def test_init_datetime():
datetime = dt.datetime(2015, 6, 1, 18, 23, 45)
apex_out = Apex(date=datetime)
assert apex_out.year == helpers.toYearFraction(datetime)
def test_init_datafile_IOError():
with pytest.raises(IOError):
Apex(date=2015, datafile='foo/path/to/datafile.blah')
###============================================================================
### Test the low-level interfaces to the fortran wrappers
###============================================================================
def test__geo2qd_scalar():
apex_out = Apex(date=2000, refh=300)
for lat in [0, 30, 60, 89]:
for lon in [-179, -90, 0, 90, 180]:
assert_allclose(apex_out._geo2qd(lat, lon, 100),
fa.apxg2q(lat, lon, 100, 0)[:2])
def test__geo2qd_array():
apex_out = Apex(date=2000, refh=300)
lats, lons = apex_out._geo2qd([[0, 30], [60, 90]], 15,
[[100, 200], [300, 400]])
lat1, lon1 = fa.apxg2q(0, 15, 100, 0)[:2]
lat2, lon2 = fa.apxg2q(30, 15, 200, 0)[:2]
lat3, lon3 = fa.apxg2q(60, 15, 300, 0)[:2]
lat4, lon4 = fa.apxg2q(90, 15, 400, 0)[:2]
assert_allclose(lats.astype(float), np.array([[lat1, lat2], [lat3, lat4]],
dtype=float))
assert_allclose(lons.astype(float), np.array([[lon1, lon2], [lon3, lon4]],
dtype=float))
def test__geo2qd_longitude():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out._geo2qd(60, 180, 100),
fa.apxg2q(60, 180, 100, 0)[:2])
assert_allclose(apex_out._geo2qd(60, -180, 100),
fa.apxg2q(60, -180, 100, 0)[:2])
assert_allclose(apex_out._geo2qd(60, -180, 100),
apex_out._geo2qd(60, 180, 100))
for i in range(-5, 5):
for lat in [0, 30, 60, 90]:
assert_allclose(apex_out._geo2qd(lat, 15+i*360, 100),
fa.apxg2q(lat, 15, 100, 0)[:2])
def test__geo2apex_scalar():
apex_out = Apex(date=2000, refh=300)
for lat in [0, 30, 60, 89]:
for lon in [-179, -90, 0, 90, 180]:
assert_allclose(apex_out._geo2apex(lat, lon, 100),
fa.apxg2all(lat, lon, 100, 300, 0)[2:4])
def test__geo2apex_array():
apex_out = Apex(date=2000, refh=300)
lats, lons = apex_out._geo2apex([[0, 30], [60, 90]], 15,
[[100, 200], [300, 400]])
lat1, lon1 = fa.apxg2all(0, 15, 100, 300, 0)[2:4]
lat2, lon2 = fa.apxg2all(30, 15, 200, 300, 0)[2:4]
lat3, lon3 = fa.apxg2all(60, 15, 300, 300, 0)[2:4]
lat4, lon4 = fa.apxg2all(90, 15, 400, 300, 0)[2:4]
assert_allclose(lats.astype(float), np.array([[lat1, lat2], [lat3, lat4]],
dtype=float))
assert_allclose(lons.astype(float), np.array([[lon1, lon2], [lon3, lon4]],
dtype=float))
def test__geo2apex_longitude():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out._geo2apex(60, 180, 100),
fa.apxg2all(60, 180, 100, 300, 0)[2:4])
assert_allclose(apex_out._geo2apex(60, -180, 100),
fa.apxg2all(60, -180, 100, 300, 0)[2:4])
assert_allclose(apex_out._geo2apex(60, -180, 100),
apex_out._geo2apex(60, 180, 100))
for i in range(-5, 5):
for lat in [0, 30, 60, 90]:
assert_allclose(apex_out._geo2apex(lat, 15+i*360, 100),
fa.apxg2all(lat, 15, 100, 300, 0)[2:4])
def test__geo2apexall_scalar():
apex_out = Apex(date=2000, refh=300)
for lat in [0, 30, 60, 89]:
for lon in [-179, -90, 0, 90, 180]:
ret1 = apex_out._geo2apexall(lat, lon, 100)
ret2 = fa.apxg2all(lat, lon, 100, 300, 1)
for r1, r2 in zip(ret1, ret2):
assert_allclose(r1, r2)
def test__geo2apexall_array():
apex_out = Apex(date=2000, refh=300)
ret = apex_out._geo2apexall([[0, 30], [60, 90]], 15,
[[100, 200], [300, 400]])
ret1 = fa.apxg2all(0, 15, 100, 300, 1)
ret2 = fa.apxg2all(30, 15, 200, 300, 1)
ret3 = fa.apxg2all(60, 15, 300, 300, 1)
ret4 = fa.apxg2all(90, 15, 400, 300, 1)
for i in range(len(ret)):
try:
# ret[i] is array of floats
assert_allclose(ret[i].astype(float),
np.array([[ret1[i], ret2[i]], [ret3[i], ret4[i]]],
dtype=float))
except:
# ret[i] is array of arrays
assert_allclose(ret[i][0, 0], ret1[i])
assert_allclose(ret[i][0, 1], ret2[i])
assert_allclose(ret[i][1, 0], ret3[i])
assert_allclose(ret[i][1, 1], ret4[i])
def test__qd2geo_scalar():
apex_out = Apex(date=2000, refh=300)
for lat in [0, 30, 60, 89]:
for lon in [-179, -90, 0, 90, 180]:
for prec in [-1, 1e-2, 1e-10]:
assert_allclose(apex_out._qd2geo(lat, lon, 100, prec),
fa.apxq2g(lat, lon, 100, prec))
def test__qd2geo_array():
apex_out = Apex(date=2000, refh=300)
lats, lons, errs = apex_out._qd2geo([[0, 30], [60, 90]], 15,
[[100, 200], [300, 400]], 1e-2)
lat1, lon1, err1 = fa.apxq2g(0, 15, 100, 1e-2)
lat2, lon2, err2 = fa.apxq2g(30, 15, 200, 1e-2)
lat3, lon3, err3 = fa.apxq2g(60, 15, 300, 1e-2)
lat4, lon4, err4 = fa.apxq2g(90, 15, 400, 1e-2)
assert_allclose(lats.astype(float), np.array([[lat1, lat2], [lat3, lat4]],
dtype=float))
assert_allclose(lons.astype(float), np.array([[lon1, lon2], [lon3, lon4]],
dtype=float))
assert_allclose(errs.astype(float), np.array([[err1, err2], [err3, err4]],
dtype=float))
def test__qd2geo_longitude():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out._qd2geo(60, 180, 100, 1e-2),
fa.apxq2g(60, 180, 100, 1e-2))
assert_allclose(apex_out._qd2geo(60, -180, 100, 1e-2),
fa.apxq2g(60, -180, 100, 1e-2))
assert_allclose(apex_out._qd2geo(60, -180, 100, 1e-2),
apex_out._qd2geo(60, 180, 100, 1e-2))
for i in range(-5, 5):
for lat in [0, 30, 60, 90]:
assert_allclose(apex_out._qd2geo(lat, 15+i*360, 100, 1e-2),
fa.apxq2g(lat, 15, 100, 1e-2))
def test__basevec_scalar():
apex_out = Apex(date=2000, refh=300)
for lat in [0, 30, 60, 89]:
for lon in [-179, -90, 0, 90, 180]:
assert_allclose(apex_out._basevec(lat, lon, 100),
fa.apxg2q(lat, lon, 100, 1)[2:4])
def test__basevec_array():
apex_out = Apex(date=2000, refh=300)
f1s, f2s = apex_out._basevec([[0, 30], [60, 90]], 15,
[[100, 200], [300, 400]])
f11, f21 = fa.apxg2q(0, 15, 100, 1)[2:4]
f12, f22 = fa.apxg2q(30, 15, 200, 1)[2:4]
f13, f23 = fa.apxg2q(60, 15, 300, 1)[2:4]
f14, f24 = fa.apxg2q(90, 15, 400, 1)[2:4]
assert_allclose(f1s[0, 0], f11)
assert_allclose(f1s[0, 1], f12)
assert_allclose(f1s[1, 0], f13)
assert_allclose(f1s[1, 1], f14)
assert_allclose(f2s[0, 0], f21)
assert_allclose(f2s[0, 1], f22)
assert_allclose(f2s[1, 0], f23)
assert_allclose(f2s[1, 1], f24)
def test__basevec_longitude():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out._basevec(60, 180, 100),
fa.apxg2q(60, 180, 100, 1)[2:4])
assert_allclose(apex_out._basevec(60, -180, 100),
fa.apxg2q(60, -180, 100, 1)[2:4])
assert_allclose(apex_out._basevec(60, -180, 100),
apex_out._basevec(60, 180, 100))
for i in range(-5, 5):
for lat in [0, 30, 60, 90]:
assert_allclose(apex_out._basevec(lat, 15+i*360, 100),
fa.apxg2q(lat, 15, 100, 1)[2:4])
###============================================================================
### Test the convert() method
###============================================================================
def test_convert_geo2apex():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.convert(60, 15, 'geo', 'apex', height=100),
apex_out.geo2apex(60, 15, 100))
def test_convert_geo2qd():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.convert(60, 15, 'geo', 'qd', height=100),
apex_out.geo2qd(60, 15, 100))
def test_convert_geo2mlt_nodate():
apex_out = Apex(date=2000, refh=300)
with pytest.raises(ValueError):
apex_out.convert(60, 15, 'geo', 'mlt')
def test_convert_geo2mlt():
datetime = dt.datetime(2000, 3, 9, 14, 25, 58)
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.convert(60, 15, 'geo', 'mlt', height=100,
ssheight=2e5, datetime=datetime)[1],
apex_out.mlon2mlt(apex_out.geo2apex(60, 15, 100)[1],
datetime, ssheight=2e5))
def test_convert_apex2geo():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.convert(60, 15, 'apex', 'geo', height=100,
precision=1e-2),
apex_out.apex2geo(60, 15, 100, precision=1e-2)[:-1])
def test_convert_apex2qd():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.convert(60, 15, 'apex', 'qd', height=100),
apex_out.apex2qd(60, 15, height=100))
def test_convert_apex2mlt():
datetime = dt.datetime(2000, 3, 9, 14, 25, 58)
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.convert(60, 15, 'apex', 'mlt', height=100,
datetime=datetime, ssheight=2e5)[1],
apex_out.mlon2mlt(15, datetime, ssheight=2e5))
def test_convert_qd2geo():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.convert(60, 15, 'qd', 'geo', height=100,
precision=1e-2),
apex_out.qd2geo(60, 15, 100, precision=1e-2)[:-1])
def test_convert_qd2apex():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.convert(60, 15, 'qd', 'apex', height=100),
apex_out.qd2apex(60, 15, height=100))
def test_convert_qd2mlt():
datetime = dt.datetime(2000, 3, 9, 14, 25, 58)
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.convert(60, 15, 'qd', 'mlt', height=100,
datetime=datetime, ssheight=2e5)[1],
apex_out.mlon2mlt(15, datetime, ssheight=2e5))
def test_convert_mlt2geo():
datetime = dt.datetime(2000, 3, 9, 14, 25, 58)
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.convert(60, 15, 'mlt', 'geo', height=100,
datetime=datetime, precision=1e-2,
ssheight=2e5),
apex_out.apex2geo(60, apex_out.mlt2mlon(15, datetime,
ssheight=2e5), 100,
precision=1e-2)[:-1])
def test_convert_mlt2apex():
datetime = dt.datetime(2000, 3, 9, 14, 25, 58)
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.convert(60, 15, 'mlt', 'apex', height=100,
datetime=datetime, ssheight=2e5),
(60, apex_out.mlt2mlon(15, datetime, ssheight=2e5)))
def test_convert_mlt2qd():
datetime = dt.datetime(2000, 3, 9, 14, 25, 58)
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.convert(60, 15, 'mlt', 'qd', height=100,
datetime=datetime, ssheight=2e5),
apex_out.apex2qd(60, apex_out.mlt2mlon(15, datetime,
ssheight=2e5),
height=100))
def test_convert_invalid_lat():
apex_out = Apex(date=2000, refh=300)
with pytest.raises(ValueError):
apex_out.convert(91, 0, 'geo', 'geo')
with pytest.raises(ValueError):
apex_out.convert(-91, 0, 'geo', 'geo')
apex_out.convert(90, 0, 'geo', 'geo')
apex_out.convert(-90, 0, 'geo', 'geo')
assert_allclose(apex_out.convert(90+1e-5, 0, 'geo', 'apex'),
apex_out.convert(90, 0, 'geo', 'apex'), rtol=0, atol=1e-8)
def test_convert_invalid_transformation():
apex_out = Apex(date=2000, refh=300)
with pytest.raises(NotImplementedError):
apex_out.convert(0, 0, 'foobar', 'geo')
with pytest.raises(NotImplementedError):
apex_out.convert(0, 0, 'geo', 'foobar')
###============================================================================
### Test the geo2apex() method
###============================================================================
def test_geo2apex():
apex_out = Apex(date=2000, refh=300)
lat, lon = apex_out.geo2apex(60, 15, 100)
assert_allclose((lat, lon), apex_out._geo2apex(60, 15, 100))
assert type(lat) != np.ndarray
assert type(lon) != np.ndarray
def test_geo2apex_vectorization():
apex_out = Apex(date=2000, refh=300)
assert apex_out.geo2apex([60, 60], 15, 100)[0].shape == (2,)
assert apex_out.geo2apex(60, [15, 15], 100)[0].shape == (2,)
assert apex_out.geo2apex(60, 15, [100, 100])[0].shape == (2,)
def test_geo2apex_invalid_lat():
apex_out = Apex(date=2000, refh=300)
with pytest.raises(ValueError):
apex_out.geo2apex(91, 0, 0)
with pytest.raises(ValueError):
apex_out.geo2apex(-91, 0, 0)
apex_out.geo2apex(90, 0, 0)
apex_out.geo2apex(-90, 0, 0)
assert_allclose(apex_out.geo2apex(90+1e-5, 0, 0),
apex_out.geo2apex(90, 0, 0), rtol=0, atol=1e-8)
def test_geo2apex_undefined_warning():
apex_out = Apex(date=2000, refh=10000)
with warnings.catch_warnings(record=True) as w:
ret = apex_out.geo2apex(0, 0, 0)
assert ret[0] == -9999
assert issubclass(w[-1].category, UserWarning)
assert 'set to -9999 where' in str(w[-1].message)
###============================================================================
### Test the apex2geo() method
###============================================================================
def test_apex2geo():
apex_out = Apex(date=2000, refh=300)
lat, lon, error = apex_out.apex2geo(60, 15, 100, precision=1e-2)
assert_allclose((lat, lon, error),
apex_out.qd2geo(*apex_out.apex2qd(60, 15, 100), height=100,
precision=1e-2))
assert type(lat) != np.ndarray
assert type(lon) != np.ndarray
assert type(error) != np.ndarray
def test_apex2geo_vectorization():
apex_out = Apex(date=2000, refh=300)
assert apex_out.apex2geo([60, 60], 15, 100)[0].shape == (2,)
assert apex_out.apex2geo(60, [15, 15], 100)[0].shape == (2,)
assert apex_out.apex2geo(60, 15, [100, 100])[0].shape == (2,)
def test_apex2geo_invalid_lat():
apex_out = Apex(date=2000, refh=300)
with pytest.raises(ValueError):
apex_out.apex2geo(91, 0, 0, 1e-2)
with pytest.raises(ValueError):
apex_out.apex2geo(-91, 0, 0, 1e-2)
apex_out.apex2geo(90, 0, 0, 1e-2)
apex_out.apex2geo(-90, 0, 0, 1e-2)
assert_allclose(apex_out.apex2geo(90+1e-5, 0, 0, 1e-2),
apex_out.apex2geo(90, 0, 0, 1e-2), rtol=0, atol=1e-8)
###============================================================================
### Test the geo2qd() method
###============================================================================
def test_geo2qd():
apex_out = Apex(date=2000, refh=300)
lat, lon = apex_out.geo2qd(60, 15, 100)
assert_allclose((lat, lon), apex_out._geo2qd(60, 15, 100))
assert type(lat) != np.ndarray
assert type(lon) != np.ndarray
def test_geo2qd_vectorization():
apex_out = Apex(date=2000, refh=300)
assert apex_out.geo2qd([60, 60], 15, 100)[0].shape == (2,)
assert apex_out.geo2qd(60, [15, 15], 100)[0].shape == (2,)
assert apex_out.geo2qd(60, 15, [100, 100])[0].shape == (2,)
def test_geo2qd_invalid_lat():
apex_out = Apex(date=2000, refh=300)
with pytest.raises(ValueError):
apex_out.geo2qd(91, 0, 0)
with pytest.raises(ValueError):
apex_out.geo2qd(-91, 0, 0)
apex_out.geo2qd(90, 0, 0)
apex_out.geo2qd(-90, 0, 0)
assert_allclose(apex_out.geo2qd(90+1e-5, 0, 0), apex_out.geo2qd(90, 0, 0),
rtol=0, atol=1e-8)
###============================================================================
### Test the qd2geo() method
###============================================================================
def test_qd2geo():
apex_out = Apex(date=2000, refh=300)
lat, lon, error = apex_out.qd2geo(60, 15, 100, precision=1e-2)
assert_allclose((lat, lon, error), apex_out._qd2geo(60, 15, 100, 1e-2))
assert type(lat) != np.ndarray
assert type(lon) != np.ndarray
assert type(error) != np.ndarray
def test_qd2geo_vectorization():
apex_out = Apex(date=2000, refh=300)
assert apex_out.qd2geo([60, 60], 15, 100)[0].shape == (2,)
assert apex_out.qd2geo(60, [15, 15], 100)[0].shape == (2,)
assert apex_out.qd2geo(60, 15, [100, 100])[0].shape == (2,)
def test_qd2geo_invalid_lat():
apex_out = Apex(date=2000, refh=300)
with pytest.raises(ValueError):
apex_out.qd2geo(91, 0, 0, precision=1e-2)
with pytest.raises(ValueError):
apex_out.qd2geo(-91, 0, 0, precision=1e-2)
apex_out.qd2geo(90, 0, 0, precision=1e-2)
apex_out.qd2geo(-90, 0, 0, precision=1e-2)
assert_allclose(apex_out.qd2geo(90+1e-5, 0, 0, 1e-2),
apex_out.qd2geo(90, 0, 0, 1e-2), rtol=0, atol=1e-8)
###============================================================================
### Test the apex2qd() method
###============================================================================
def test_apex2qd():
apex_out = Apex(date=2000, refh=300)
lat, lon = apex_out.apex2qd(60, 15, 100)
assert_allclose((lat, lon),
[60.498401, 15])
assert type(lat) != np.ndarray
assert type(lon) != np.ndarray
def test_apex2qd_vectorization():
apex_out = Apex(date=2000, refh=300)
assert apex_out.apex2qd([60, 60], 15, 100)[0].shape == (2,)
assert apex_out.apex2qd(60, [15, 15], 100)[0].shape == (2,)
assert apex_out.apex2qd(60, 15, [100, 100])[0].shape == (2,)
def test_apex2qd_invalid_lat():
apex_out = Apex(date=2000, refh=300)
with pytest.raises(ValueError):
apex_out.apex2qd(91, 0, 0)
with pytest.raises(ValueError):
apex_out.apex2qd(-91, 0, 0)
apex_out.apex2qd(90, 0, 0)
apex_out.apex2qd(-90, 0, 0)
assert_allclose(apex_out.apex2qd(90+1e-5, 0, 0), apex_out.apex2qd(90, 0, 0),
rtol=0, atol=1e-8)
def test_apex2qd_apexheight_close():
apex_out = Apex(date=2000, refh=300)
apex_out.apex2qd(0, 15, 300+1e-6)
def test_apex2qd_apexheight_over():
apex_out = Apex(date=2000, refh=300)
with pytest.raises(ApexHeightError):
apex_out.apex2qd(0, 15, 301)
###============================================================================
### Test the qd2apex() method
###============================================================================
def test_qd2apex():
apex_out = Apex(date=2000, refh=300)
lat, lon = apex_out.qd2apex(60, 15, 100)
assert_allclose((lat, lon),
[59.491381, 15])
assert type(lat) != np.ndarray
assert type(lon) != np.ndarray
def test_qd2apex_vectorization():
apex_out = Apex(date=2000, refh=300)
assert apex_out.qd2apex([60, 60], 15, 100)[0].shape == (2,)
assert apex_out.qd2apex(60, [15, 15], 100)[0].shape == (2,)
assert apex_out.qd2apex(60, 15, [100, 100])[0].shape == (2,)
def test_qd2apex_invalid_lat():
apex_out = Apex(date=2000, refh=300)
with pytest.raises(ValueError):
apex_out.qd2apex(91, 0, 0)
with pytest.raises(ValueError):
apex_out.qd2apex(-91, 0, 0)
apex_out.qd2apex(90, 0, 0)
apex_out.qd2apex(-90, 0, 0)
assert_allclose(apex_out.qd2apex(90+1e-5, 0, 0), apex_out.qd2apex(90, 0, 0),
rtol=0, atol=1e-8)
def test_qd2apex_apexheight_close():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.qd2apex(0, 15, 300-1e-5),
apex_out.qd2apex(0, 15, 300))
def test_qd2apex_apexheight_over():
apex_out = Apex(date=2000, refh=300)
with pytest.raises(ApexHeightError):
apex_out.qd2apex(0, 15, 299)
###============================================================================
### Test mlon2mlt()
###============================================================================
def test_mlon2mlt_scalar():
apex_out = Apex(date=2000, refh=300)
mlon = apex_out.mlon2mlt(0, dt.datetime(2000, 2, 3, 4, 5, 6))
assert_allclose(mlon, 23.019629923502603)
assert type(mlon) != np.ndarray
def test_mlon2mlt_ssheight():
apex_out = Apex(date=2000, refh=300)
mlt = apex_out.mlon2mlt(0, dt.datetime(2000, 2, 3, 4, 5, 6),
ssheight=50*2000)
assert_allclose(mlt, 23.026712036132814)
def test_mlon2mlt_1Darray():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.mlon2mlt([0, 180],
dt.datetime(2000, 2, 3, 4, 5, 6)),
[23.019261, 11.019261], rtol=1e-4)
def test_mlon2mlt_2Darray():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.mlon2mlt([[0, 180], [0, 180]],
dt.datetime(2000, 2, 3, 4, 5, 6)),
[[23.019261, 11.019261], [23.019261, 11.019261]], rtol=1e-4)
def test_mlon2mlt_diffdates():
apex_out = Apex(date=2000, refh=300)
dtime1 = dt.datetime(2000, 2, 3, 4, 5, 6)
dtime2 = dt.datetime(2000, 2, 3, 5, 5, 6)
assert apex_out.mlon2mlt(0, dtime1) != apex_out.mlon2mlt(0, dtime2)
def test_mlon2mlt_offset():
apex_out = Apex(date=2000, refh=300)
date = dt.datetime(2000, 2, 3, 4, 5, 6)
assert_allclose(apex_out.mlon2mlt(0, date),
apex_out.mlon2mlt(-15, date) + 1)
assert_allclose(apex_out.mlon2mlt(0, date),
apex_out.mlon2mlt(-10*15, date) + 10)
def test_mlon2mlt_range():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.mlon2mlt(range(0, 361, 30),
dt.datetime(2000, 2, 3, 4, 5, 6)),
[23.01963, 1.01963, 3.01963, 5.01963, 7.01963,
9.01963, 11.01963, 13.01963, 15.01963, 17.01963,
19.01963, 21.01963, 23.01963],
rtol=1e-4)
###============================================================================
### Test mlt2mlon()
###============================================================================
def test_mlt2mlon_scalar():
apex_out = Apex(date=2000, refh=300)
mlt = apex_out.mlt2mlon(0, dt.datetime(2000, 2, 3, 4, 5, 6))
assert_allclose(mlt, 14.705551147460938)
assert type(mlt) != np.ndarray
def test_mlt2mlon_ssheight():
apex_out = Apex(date=2000, refh=300)
mlt = apex_out.mlt2mlon(0, dt.datetime(2000, 2, 3, 4, 5, 6),
ssheight=50*2000)
assert_allclose(mlt, 14.599319458007812)
def test_mlt2mlon_1Darray():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.mlt2mlon([0, 12],
dt.datetime(2000, 2, 3, 4, 5, 6)),
[14.705551, 194.705551], rtol=1e-4)
def test_mlt2mlon_2Darray():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.mlt2mlon([[0, 12], [0, 12]],
dt.datetime(2000, 2, 3, 4, 5, 6)),
[[14.705551, 194.705551], [14.705551, 194.705551]],
rtol=1e-4)
def test_mlt2mlon_diffdates():
apex_out = Apex(date=2000, refh=300)
dtime1 = dt.datetime(2000, 2, 3, 4, 5, 6)
dtime2 = dt.datetime(2000, 2, 3, 5, 5, 6)
assert apex_out.mlt2mlon(0, dtime1) != apex_out.mlt2mlon(0, dtime2)
def test_mlt2mlon_offset():
apex_out = Apex(date=2000, refh=300)
date = dt.datetime(2000, 2, 3, 4, 5, 6)
assert_allclose(apex_out.mlt2mlon(0, date), apex_out.mlt2mlon(1, date) - 15)
assert_allclose(apex_out.mlt2mlon(0, date),
apex_out.mlt2mlon(10, date) - 150)
def test_mlt2mlon_range():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.mlt2mlon(range(0, 25, 2),
dt.datetime(2000, 2, 3, 4, 5, 6)),
[14.705551, 44.705551, 74.705551, 104.705551, 134.705551,
164.705551, 194.705551, 224.705551, 254.705551, 284.705551,
314.705551, 344.705551, 14.705551],
rtol=1e-4)
###============================================================================
### Test mlt/mlon back and forth
###============================================================================
def test_mlon2mlt2mlon():
apex_out = Apex(date=2000, refh=300)
date = dt.datetime(2000, 2, 3, 4, 5, 6)
assert_allclose(apex_out.mlon2mlt(apex_out.mlt2mlon(0, date), date), 0)
assert_allclose(apex_out.mlon2mlt(apex_out.mlt2mlon(6, date), date), 6)
assert_allclose(apex_out.mlon2mlt(apex_out.mlt2mlon(12, date), date), 12)
assert_allclose(apex_out.mlon2mlt(apex_out.mlt2mlon(18, date), date), 18)
assert_allclose(apex_out.mlon2mlt(apex_out.mlt2mlon(24, date), date), 0)
def test_mlt2mlon2mlt():
apex_out = Apex(date=2000, refh=300)
date = dt.datetime(2000, 2, 3, 4, 5, 6)
assert_allclose(apex_out.mlt2mlon(apex_out.mlon2mlt(0, date), date), 0)
assert_allclose(apex_out.mlt2mlon(apex_out.mlon2mlt(90, date), date), 90)
assert_allclose(apex_out.mlt2mlon(apex_out.mlon2mlt(180, date), date), 180)
assert_allclose(apex_out.mlt2mlon(apex_out.mlon2mlt(270, date), date), 270)
assert_allclose(apex_out.mlt2mlon(apex_out.mlon2mlt(360, date), date), 0)
###============================================================================
### Test the map_to_height() method
###============================================================================
def test_map_to_height():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.map_to_height(60, 15, 100, 10000, conjugate=False,
precision=1e-10),
(31.841459274291992, 17.916629791259766, 0))
assert_allclose(apex_out.map_to_height(30, 170, 100, 500, conjugate=False,
precision=1e-2),
(25.727252960205078, 169.60546875, 0.00017655163537710905))
def test_map_to_height_same_height():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.map_to_height(60, 15, 100, 100, conjugate=False,
precision=1e-10),
(60, 15, 3.4150946248701075e-6), rtol=1e-5)
def test_map_to_height_conjugate():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.map_to_height(60, 15, 100, 10000, conjugate=True,
precision=1e-10),
(-25.424892425537109, 27.310417175292969,
1.2074182222931995e-6))
assert_allclose(apex_out.map_to_height(30, 170, 100, 500, conjugate=True,
precision=1e-2),
(-13.76642894744873, 164.24259948730469,
0.00056820799363777041))
def test_map_to_height_vectorization():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.map_to_height([60, 60], 15, 100, 100),
([60]*2, [15]*2, [3.4150946248701075e-6]*2), rtol=1e-5)
assert_allclose(apex_out.map_to_height(60, [15, 15], 100, 100),
([60]*2, [15]*2, [3.4150946248701075e-6]*2), rtol=1e-5)
assert_allclose(apex_out.map_to_height(60, 15, [100, 100], 100),
([60]*2, [15]*2, [3.4150946248701075e-6]*2), rtol=1e-5)
assert_allclose(apex_out.map_to_height(60, 15, 100, [100, 100]),
([60]*2, [15]*2, [3.4150946248701075e-6]*2), rtol=1e-5)
def test_map_to_height_ApexHeightError():
apex_out = Apex(date=2000, refh=300)
with pytest.raises(ApexHeightError):
apex_out.map_to_height(0, 15, 100, 10000)
###============================================================================
### Test the map_E_to_height() method
###============================================================================
def test_map_E_to_height():
apex_out = Apex(date=2000, refh=300)
out_60_15_100_500 = [0.7115211, 2.3562392, 0.57259707]
out_60_15_100_500_234 = [1.560284, 3.439154, 0.782339]
out_60_15_100_1000 = [0.677964, 2.089811, 0.558601]
out_60_15_200_500 = [0.723773, 2.427366, 0.590826]
out_60_30_100_500 = [0.686265, 2.375296, 0.600594]
out_70_15_100_500 = [0.727605, 2.180817, 0.291414]
# scalar
assert_allclose(apex_out.map_E_to_height(60, 15, 100, 500, [1, 2, 3]),
out_60_15_100_500, rtol=1e-5)
assert_allclose(apex_out.map_E_to_height(60, 15, 100, 500, [2, 3, 4]),
out_60_15_100_500_234, rtol=1e-5)
assert_allclose(apex_out.map_E_to_height(60, 15, 100, 1000, [1, 2, 3]),
out_60_15_100_1000, rtol=1e-5)
assert_allclose(apex_out.map_E_to_height(60, 15, 200, 500, [1, 2, 3]),
out_60_15_200_500, rtol=1e-5)
assert_allclose(apex_out.map_E_to_height(60, 30, 100, 500, [1, 2, 3]),
out_60_30_100_500, rtol=1e-5)
assert_allclose(apex_out.map_E_to_height(70, 15, 100, 500, [1, 2, 3]),
out_70_15_100_500, rtol=1e-5)
# vectorize lat
assert_allclose(apex_out.map_E_to_height([60, 70], 15, 100, 500,
np.array([[1, 2, 3]]*2).T),
np.array([out_60_15_100_500, out_70_15_100_500]).T,
rtol=1e-5)
# vectorize lon
assert_allclose(apex_out.map_E_to_height(60, [15, 30], 100, 500,
np.array([[1, 2, 3]]*2).T),
np.array([out_60_15_100_500, out_60_30_100_500]).T,
rtol=1e-5)
# vectorize height
assert_allclose(apex_out.map_E_to_height(60, 15, [100, 200], 500,
np.array([[1, 2, 3]]*2).T),
np.array([out_60_15_100_500, out_60_15_200_500]).T,
rtol=1e-5)
# vectorize newheight
assert_allclose(apex_out.map_E_to_height(60, 15, 100, [500, 1000],
np.array([[1, 2, 3]]*2).T),
np.array([out_60_15_100_500, out_60_15_100_1000]).T,
rtol=1e-5)
# vectorize E
assert_allclose(apex_out.map_E_to_height(60, 15, 100, 500,
np.array([[1, 2, 3], [2, 3, 4]]).T),
np.array([out_60_15_100_500, out_60_15_100_500_234]).T,
rtol=1e-5)
###============================================================================
### Test the map_V_to_height() method
###============================================================================
def test_map_V_to_height():
apex_out = Apex(date=2000, refh=300)
out_60_15_100_500 = [0.819719, 2.845114, 0.695437]
out_60_15_100_500_234 = [1.830277, 4.14345, 0.947624]
out_60_15_100_1000 = [0.924577, 3.149964, 0.851343]
out_60_15_200_500 = [0.803882, 2.793206, 0.682839]
out_60_30_100_500 = [0.761412, 2.878837, 0.736549]
out_70_15_100_500 = [0.846819, 2.592572, 0.347919]
# scalar
assert_allclose(apex_out.map_V_to_height(60, 15, 100, 500, [1, 2, 3]),
out_60_15_100_500, rtol=1e-5)
assert_allclose(apex_out.map_V_to_height(60, 15, 100, 500, [2, 3, 4]),
out_60_15_100_500_234, rtol=1e-5)
assert_allclose(apex_out.map_V_to_height(60, 15, 100, 1000, [1, 2, 3]),
out_60_15_100_1000, rtol=1e-5)
assert_allclose(apex_out.map_V_to_height(60, 15, 200, 500, [1, 2, 3]),
out_60_15_200_500, rtol=1e-5)
assert_allclose(apex_out.map_V_to_height(60, 30, 100, 500, [1, 2, 3]),
out_60_30_100_500, rtol=1e-5)
assert_allclose(apex_out.map_V_to_height(70, 15, 100, 500, [1, 2, 3]),
out_70_15_100_500, rtol=1e-5)
# vectorize lat
assert_allclose(apex_out.map_V_to_height([60, 70], 15, 100, 500,
np.array([[1, 2, 3]]*2).T),
np.array([out_60_15_100_500, out_70_15_100_500]).T,
rtol=1e-5)
# vectorize lon
assert_allclose(apex_out.map_V_to_height(60, [15, 30], 100, 500,
np.array([[1, 2, 3]]*2).T),
np.array([out_60_15_100_500, out_60_30_100_500]).T,
rtol=1e-5)
# vectorize height
assert_allclose(apex_out.map_V_to_height(60, 15, [100, 200], 500,
np.array([[1, 2, 3]]*2).T),
np.array([out_60_15_100_500, out_60_15_200_500]).T,
rtol=1e-5)
# vectorize newheight
assert_allclose(apex_out.map_V_to_height(60, 15, 100, [500, 1000],
np.array([[1, 2, 3]]*2).T),
np.array([out_60_15_100_500, out_60_15_100_1000]).T,
rtol=1e-5)
# vectorize E
assert_allclose(apex_out.map_V_to_height(60, 15, 100, 500,
np.array([[1, 2, 3],
[2, 3, 4]]).T),
np.array([out_60_15_100_500, out_60_15_100_500_234]).T,
rtol=1e-5)
###============================================================================
### Test basevectors_qd()
###============================================================================
# test coords
def test_basevectors_qd_scalar_geo():
apex_out = Apex(date=2000, refh=300)
assert_allclose(apex_out.basevectors_qd(60, 15, 100, coords='geo'),
apex_out._basevec(60, 15, 100))
def test_basevectors_qd_scalar_apex():
apex_out = Apex(date=2000, refh=300)
glat, glon, _ = apex_out.apex2geo(60, 15, 100, precision=1e-2)
assert_allclose(apex_out.basevectors_qd(60, 15, 100, coords='apex',
precision=1e-2),
apex_out._basevec(glat, glon, 100))
def test_basevectors_qd_scalar_qd():
apex_out = Apex(date=2000, refh=300)
glat, glon, _ = apex_out.qd2geo(60, 15, 100, precision=1e-2)
assert_allclose(apex_out.basevectors_qd(60, 15, 100, coords='qd',
precision=1e-2),
apex_out._basevec(glat, glon, 100))
# test shapes and vectorization of arguments
def test_basevectors_qd_scalar_shape():
apex_out = Apex(date=2000, refh=300)
ret = apex_out.basevectors_qd(60, 15, 100)
for r in ret:
assert r.shape == (2,)
def test_basevectors_qd_vectorization():
apex_out = Apex(date=2000, refh=300)
ret = apex_out.basevectors_qd([60, 60, 60, 60], 15, 100, coords='geo')
for r in ret:
assert r.shape == (2, 4)
ret = apex_out.basevectors_qd(60, [15, 15, 15, 15], 100, coords='geo')
for r in ret:
assert r.shape == (2, 4)
ret = apex_out.basevectors_qd(60, 15, [100, 100, 100, 100], coords='geo')
for r in ret:
assert r.shape == (2, 4)
# test array return values
def test_basevectors_qd_array():
apex_out = Apex(date=2000, refh=300)
f1, f2 = apex_out.basevectors_qd([0, 30], 15, 100, coords='geo')
f1_lat0, f2_lat0 = apex_out._basevec(0, 15, 100)
f1_lat30, f2_lat30 = apex_out._basevec(30, 15, 100)
assert_allclose(f1[:, 0], f1_lat0)
assert_allclose(f2[:, 0], f2_lat0)
assert_allclose(f1[:, 1], f1_lat30)
assert_allclose(f2[:, 1], f2_lat30)
###============================================================================
### Test basevectors_apex()
###============================================================================
# test against return from _geo2apexall for different coords
def test_basevectors_apex_scalar_geo():
apex_out = Apex(date=2000, refh=300)
(f1, f2, f3, g1, g2, g3, d1, d2, d3, e1, e2,
e3) = apex_out.basevectors_apex(60, 15, 100, coords='geo')
(_, _, _, _, f1_, f2_, _, d1_, d2_, d3_, _, e1_, e2_,
e3_) = apex_out._geo2apexall(60, 15, 100)
assert_allclose(f1, f1_)
assert_allclose(f2, f2_)
assert_allclose(d1, d1_)
assert_allclose(d2, d2_)
assert_allclose(d3, d3_)
assert_allclose(e1, e1_)
assert_allclose(e2, e2_)
assert_allclose(e3, e3_)
def test_basevectors_apex_scalar_apex():
apex_out = Apex(date=2000, refh=300)
(f1, f2, f3, g1, g2, g3, d1, d2, d3, e1, e2,
e3) = apex_out.basevectors_apex(60, 15, 100, coords='apex', precision=1e-2)
glat, glon, _ = apex_out.apex2geo(60, 15, 100, precision=1e-2)
(_, _, _, _, f1_, f2_, _, d1_, d2_, d3_, _, e1_, e2_,
e3_) = apex_out._geo2apexall(glat, glon, 100)
assert_allclose(f1, f1_)
assert_allclose(f2, f2_)
assert_allclose(d1, d1_)
assert_allclose(d2, d2_)
assert_allclose(d3, d3_)
assert_allclose(e1, e1_)
assert_allclose(e2, e2_)
assert_allclose(e3, e3_)
def test_basevectors_apex_scalar_qd():
apex_out = Apex(date=2000, refh=300)
(f1, f2, f3, g1, g2, g3, d1, d2, d3, e1, e2,
e3) = apex_out.basevectors_apex(60, 15, 100, coords='qd', precision=1e-2)
glat, glon, _ = apex_out.qd2geo(60, 15, 100, precision=1e-2)
(_, _, _, _, f1_, f2_, _, d1_, d2_, d3_, _, e1_, e2_,
e3_) = apex_out._geo2apexall(glat, glon, 100)
assert_allclose(f1, f1_)
assert_allclose(f2, f2_)
assert_allclose(d1, d1_)
assert_allclose(d2, d2_)
assert_allclose(d3, d3_)
assert_allclose(e1, e1_)
assert_allclose(e2, e2_)
assert_allclose(e3, e3_)
# test shapes and vectorization of arguments
def test_basevectors_apex_scalar_shape():
apex_out = Apex(date=2000, refh=300)
ret = apex_out.basevectors_apex(60, 15, 100, precision=1e-2)
for r in ret[:2]:
assert r.shape == (2,)
for r in ret[2:]:
assert r.shape == (3,)
def test_basevectors_apex_vectorization():
apex_out = Apex(date=2000, refh=300)
ret = apex_out.basevectors_apex([60, 60, 60, 60], 15, 100)
for r in ret[:2]:
assert r.shape == (2, 4)
for r in ret[2:]:
assert r.shape == (3, 4)
ret = apex_out.basevectors_apex(60, [15, 15, 15, 15], 100)
for r in ret[:2]:
assert r.shape == (2, 4)
for r in ret[2:]:
assert r.shape == (3, 4)
ret = apex_out.basevectors_apex(60, 15, [100, 100, 100, 100])
for r in ret[:2]:
assert r.shape == (2, 4)
for r in ret[2:]:
assert r.shape == (3, 4)
# test correct vectorization of height
def test_basevectors_apex_vectorization_height():
apex_out = Apex(date=2000, refh=0)
(f1, f2, f3, g1, g2, g3, d1, d2, d3, e1, e2,
e3) = apex_out.basevectors_apex(60, 15, [200, 400], coords='geo')
(_, _, _, _, f1_1, f2_1, _, d1_1, d2_1, d3_1, _, e1_1, e2_1,
e3_1) = apex_out._geo2apexall(60, 15, 200)
(_, _, _, _, f1_2, f2_2, _, d1_2, d2_2, d3_2, _, e1_2, e2_2,
e3_2) = apex_out._geo2apexall(60, 15, 400)
assert_allclose(f1[:, 0], f1_1)
assert_allclose(f2[:, 0], f2_1)
assert_allclose(d1[:, 0], d1_1)
assert_allclose(d2[:, 0], d2_1)
assert_allclose(d3[:, 0], d3_1)
assert_allclose(e1[:, 0], e1_1)
assert_allclose(e2[:, 0], e2_1)
assert_allclose(e3[:, 0], e3_1)
assert_allclose(f3[:, 0], np.array([-0.088671, -0.018272, 0.993576]),
rtol=1e-4)
assert_allclose(g1[:, 0], np.array([0.903098, 0.245273, 0.085107]),
rtol=1e-4)
assert_allclose(g2[:, 0], np.array([-0.103495, 1.072078, 0.01048]),
rtol=1e-4)
assert_allclose(g3[:, 0], np.array([0, 0, 1.006465]), rtol=1e-4)
assert_allclose(f1[:, 1], f1_2)
assert_allclose(f2[:, 1], f2_2)
|
assert_allclose(d1[:, 1], d1_2)
|
numpy.testing.assert_allclose
|
import numpy as np
import cv2
import math
import os
import random
from PIL import Image, ImageDraw
import csv
os.system("mkdir data1")
length = [7,15]
width = [1,3]
color = [(255,0,0),(0,0,255)]
itr = 0
for i in length:
if(i==7):
label1 = 0
else:
label1 = 1
for j in width:
if j == 1:
label2 = 0
else:
label2 = 1
for k in range(0,12):
theta = k*15*(math.pi)/180
for c in color:
if(c==(255,0,0)):
label = 0
for r in range(1000):
im1 = np.zeros((28,28,3),dtype='int')
im = Image.fromarray(im1,'RGB')
line1 = ImageDraw.Draw(im)
y2 = i*(math.sin(theta))
x2 = i*(math.cos(theta))
xshift=random.randint(int((x2/2)-14),int(14-(x2/2)))
yshift = random.randint(int((y2/2)-14),int(14-(y2/2)))
#point1,point2 = (int(14- (x2/2) +xshift),int(14 + (y2/2) + yshift)),(int(14 + (x2/2) + xshift),int(14 - (y2/2) + yshift))
#point1, point2 = (x1, y1), (x2, y2)
line1.line((int(14- (x2/2) +xshift),int(14 + (y2/2) + yshift),int(14 + (x2/2) + xshift),int(14 - (y2/2) + yshift)), fill = c, width=j)
iname = str(itr)+".jpg"
# itr+=1
iname = str(label1)+"_"+str(label2)+"_"+str(k)+"_"+str(label)+str(r+1)+".jpg"
#writer.writerow([str(itr),str(iname)])
im.save("data1/"+iname)
def makeVideo(outputVideoname):
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
outputVideo = cv2.VideoWriter(outputVideoname,fourcc,2,(84,84))
for i in length:
if(i==7):
label1 = 0
else:
label1 = 1
for j in width:
if j == 1:
label2 = 0
else:
label2 = 1
for k in range(0,12):
for c in color:
if(c==(255,0,0)):
label = 0
else:
label = 1
for r in range(0,10):
frame = Image.new('RGB',(84,84))
iname1 = str(label1)+"_"+str(label2)+"_"+str(k)+"_"+str(label)+""+str(r*9+1)+".jpg"
iname2 = str(label1)+"_"+str(label2)+"_"+str(k)+"_"+str(label)+""+str(r*9+2)+".jpg"
iname3 = str(label1)+"_"+str(label2)+"_"+str(k)+"_"+str(label)+""+str(r*9+3)+".jpg"
iname4 = str(label1)+"_"+str(label2)+"_"+str(k)+"_"+str(label)+""+str(r*9+4)+".jpg"
iname5 = str(label1)+"_"+str(label2)+"_"+str(k)+"_"+str(label)+""+str(r*9+5)+".jpg"
iname6 = str(label1)+"_"+str(label2)+"_"+str(k)+"_"+str(label)+""+str(r*9+6)+".jpg"
iname7 = str(label1)+"_"+str(label2)+"_"+str(k)+"_"+str(label)+""+str(r*9+7)+".jpg"
iname8 = str(label1)+"_"+str(label2)+"_"+str(k)+"_"+str(label)+""+str(r*9+8)+".jpg"
iname9 = str(label1)+"_"+str(label2)+"_"+str(k)+"_"+str(label)+""+str(r*9+9)+".jpg"
frame.paste(Image.open("data1/"+iname1),(0,0,28,28))
frame.paste(Image.open("data1/"+iname2), (28,0,56,28))
frame.paste(Image.open("data1/"+iname3), (56,0,84,28))
frame.paste(Image.open("data1/"+iname4), (0,28,28,56))
frame.paste(Image.open("data1/"+iname5), (28,28,56,56))
frame.paste(Image.open("data1/"+iname6), (56,28,84,56))
frame.paste(Image.open("data1/"+iname7), (0,56,28,84))
frame.paste(Image.open("data1/"+iname8), (28,56,56,84))
frame.paste(Image.open("data1/"+iname9), (56,56,84,84))
frame = cv2.cvtColor(
|
np.asarray(frame,dtype=np.uint8)
|
numpy.asarray
|
import numpy as np
from pathlib import Path
from sklearn.model_selection import train_test_split
import pickle
from sklearn.preprocessing import MinMaxScaler, StandardScaler
class ExercisePerformanceDataCreator:
def __init__(self, config):
self._config = config
def get_feature_datasets(self):
data_dir = self._config.dataset.path
is_kinect = True
labels = self._config.dataset.scores_path
train_names, test_names = self._get_split_datasets(data_dir)
# Get validation set names
validation_split = 0.1
slice_indx = int(len(train_names) - len(train_names) * validation_split)
validation_names = train_names[slice_indx:]
train_names = train_names[:slice_indx]
# Get data
train_features = []
train_labels = []
test_features = []
test_labels = []
val_features = []
val_labels = []
for name in train_names:
data, labels = self._get_data_instance(name, labels, data_dir, True, is_kinect)
for i in range(data.shape[0]):
train_features.append(data[i])
for i in range(labels.shape[0]):
train_labels.append(labels[i])
train_features = np.array(train_features)
train_labels = np.array(train_labels)
for name in test_names:
data, labels = self._get_data_instance(name, labels, data_dir, False, is_kinect)
for i in range(data.shape[0]):
test_features.append(data[i])
test_labels.append(labels)
test_features = np.array(test_features)
test_labels = np.array(test_labels)
for name in validation_names:
data, labels = self._get_data_instance(name, labels, data_dir, True, is_kinect)
for i in range(data.shape[0]):
val_features.append(data[i])
for i in range(labels.shape[0]):
val_labels.append(labels[i])
val_features = np.array(val_features)
val_labels =
|
np.array(val_labels)
|
numpy.array
|
import numpy as np
from autode.log import logger
from scipy.spatial import distance_matrix
from autode.geom import get_rot_mat_kabsch, get_rot_mat_euler_from_terms
class AtomType:
@property
def n_empty_sites(self):
"""Number of empty sites on this template"""
return len(self._site_coords)
def empty_site(self):
"""Iterator for the coordinate of the next free site"""
return self._site_coords.pop(0)
def empty_site_mr(self, point, other_coords):
"""Return the site on this atom that is furthest from all other
coordinates using a simple 1/r potential where r is the distance from
the site to the other coordinates
Arguments:
point (np.ndarray): Coordinate of this atom, shape = (3,)
other_coords (np.ndarray): Other coordinates, shape = (N, 3)
Returns:
(np.ndarray): Coordinate of the site centered at the origin
"""
dists = np.array([np.linalg.norm(other_coords - (site + point), axis=1)
for site in self._site_coords])
repulsion = np.sum(np.power(dists, -1), axis=1)
return self._site_coords.pop(np.argmin(repulsion))
def reset_onto(self, points, coord):
"""
Reset the site coordinates given a set of points. Ignore any points
located exactly at the origin and, once fitted, remove the sites
that are coincident with the points
Arguments:
points (iterable(np.ndarray)): List (or iterable) of points that
that the sites need to be reset onto
coord (np.ndarray): Coordinate of this atom
"""
origin = np.zeros(3)
points = np.array([(point - coord) / np.linalg.norm(point - coord)
for point in points
if not np.allclose(point, origin)])
# Take a copy of the template coordinates to rotate and delete
site_coords = np.copy(self.template_site_coords)
if len(site_coords) == len(points) or len(points) == 0:
logger.info('No reset needed - sites were all occupied')
return
logger.info(f'Rotating {len(site_coords)} sites onto'
f' {len(points)} points')
# Rotate all the sites such that n sites are optimally orientated onto
# the (fixed) points
rot_mat = get_rot_mat_kabsch(p_matrix=site_coords[:len(points)],
q_matrix=points)
site_coords = np.dot(rot_mat, site_coords.T).T
# For each point (row) calculate the minimum distance to a site on
# this atom
min_dists = np.min(distance_matrix(site_coords, points), axis=1)
# Re-populate the empty sites, which are the sites that are not the
# closest to the points
self._site_coords = [coord for i, coord in enumerate(site_coords)
if i not in np.argsort(min_dists)[:len(points)]]
return None
def rotate_empty_onto(self, point, coord):
"""Rotate the site coordinates such that an empty site is coincident
with the vector from a coordinate to a point, and remove the site
from the list of available sites"""
return self.rotate_onto(point, coord, site=self.empty_site())
def rotate_randomly(self):
"""Rotate the sites randomly to prevent zero cross products"""
point = np.copy(self._site_coords[0])
point += np.random.uniform(0.01, 0.02, size=3)
self.rotate_onto(point=point, coord=np.zeros(3),
site=self._site_coords[0])
return
def rotate_onto(self, point, coord, site):
"""
Rotate this atom type so a site is coincident with a point if this
atom is at a coord i.e.::
site
/
/ -->
point--------coord point--site--coord
-----------------------------------------------------------------------
Arguments:
point (np.ndarray): shape = (3,)
coord (np.ndarray): shape = (3,)
site (np.ndarray): shapte = (3,)
"""
vector = point - coord
normal = np.cross(site, vector)
normal /= np.linalg.norm(normal)
# Sites are normal vectors, no no need for mod
arg = np.dot(site, vector) / np.linalg.norm(vector)
# cos(-θ/2) = √(arg + 1) / √2
a = np.sqrt(1.0 + arg) / np.sqrt(2)
# sin(-θ/2) = √(1-arg) / √2
b, c, d = -normal * (np.sqrt(1.0 - arg) / np.sqrt(2))
# 3D rotation matrix from the Euler–Rodrigues formula
rot_matrix = get_rot_mat_euler_from_terms(a=a, b=b, c=c, d=d)
# Rotate all the sites (no need to translate as they're already
# positioned around the origin)
self._site_coords = [
|
np.matmul(rot_matrix, site)
|
numpy.matmul
|
import numpy as np
import cv2
import transformations as trans
from record import VideoRecorder, PictureRecorder
def main():
"""Texture mapping application.
Mouse controls:
- Use the left mouse button and drag to move mesh vertices.
- Click an empty area to create a new vertex/point.
- Right-click a vertex to remove it.
Keyboard controls:
- r: Reset the deformation.
- t: Use the deformed mesh as the new base mesh.
- o: Toggle drawing the mesh itself.
- p: Save screenshot in the outputs folder.
- s: Start/stop recording. Result is saved in the outputs folder.
- q: Quit the application.
"""
image = cv2.imread('./inputs/po.jpg')
gui = MeshGUI(2, 2, image)
gui.loop()
def dist(a, b):
return np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def triangle_properties(triangle):
pos = np.min(triangle, axis=0)
upper_right = np.max(triangle, axis=0)
size = upper_right - pos
return np.int32(pos), np.int32(size)
class TriangleMesh:
"""2D triangle mesh.
"""
def __init__(self, width, height, cols, rows, offset=30):
"""Create a new grid-based triangle mesh.
:param width: Mesh width.
:param height: Mesh height.
:param cols: Number of columns for the mesh grid.
:param rows: Number of rows for the mesh grid.
:param offset: Offset mesh from edges.
"""
self.width = width
self.height = height
self.base_grid = self._create_grid(cols, rows, offset)
self.grid = self.base_grid.copy()
self.triangles = None
self.update_indexes = None
self.update()
self.observers = []
def add_observer(self, obj):
"""Observer will be notified on mesh updates.
:param obj: Observable object (has an _update(indices) method).
"""
self.observers.append(obj)
def _dirty(self, indices=None):
# Notify all observers
for ob in self.observers:
ob.update(indices)
def reset(self):
"""Reset mesh deformation.
"""
self.grid = self.base_grid.copy()
self.update()
self._dirty()
def switch_base(self):
"""Use deformed mesh as the new base mesh.
"""
self.base_grid = self.grid.copy()
self.update()
self._dirty()
def triangles_rounded(self):
return self.triangles.astype(np.int32)
def add_point(self, x, y):
"""Add the specified point to mesh and _update triangle definitions. Notify observers.
:param x:
:param y:
"""
self.base_grid.append((x, y))
self.grid.append((x, y))
self.update()
self._dirty()
def delete_point(self, i):
"""Remove point at index i. Notify observers.
:param i:
"""
del self.grid[i]
del self.base_grid[i]
self.update()
self._dirty()
def get_point_idx(self, point):
"""Get index of point in mesh.
:param point:
:return: Index.
"""
return self.grid.index(tuple(point))
def update(self):
"""Update triangles and point indices.
"""
self._update_triangles()
self._update_indices()
def _update_triangles(self):
subdiv = cv2.Subdiv2D((0, 0, self.width, self.height))
subdiv.insert(self.grid)
tri = subdiv.getTriangleList()
# 3 points per triangle, 2 coordinates per point
self.triangles =
|
np.reshape(tri, (tri.shape[0], 3, 2))
|
numpy.reshape
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 27 16:46:45 2020
@author: pfm
"""
from scipy.spatial.distance import cdist
import numpy as np
# Filename: kdtw_cdist.py
# Python source code for the "Kernelized" Dynamic Time Warping similarity (as defined in the reference below).
# Author: <NAME>
# Version: V1.0 du 13/09/2014,
# Licence: GPL
# ******************************************************************
# This software and description is free delivered "AS IS" with no
# guaranties for work at all. Its up to you testing it modify it as
# you like, but no help could be expected from me due to lag of time
# at the moment. I will answer short relevant questions and help as
# my time allow it. I have tested it played with it and found no
# problems in stability or malfunctions so far.
# Have fun.
# *****************************************************************
# Please cite as:
# @article{marteau:hal-00486916,
# AUTHOR = {Marteau, Pierre-Francois and <NAME>},
# TITLE = {{On Recursive Edit Distance Kernels with Application to Time Series Classification}},
# JOURNAL = {{IEEE Transactions on Neural Networks and Learning Systems}},
# PAGES = {1-14},
# YEAR = {2014},
# MONTH = Jun,
# KEYWORDS = {Elastic distance, Time warp kernel, Time warp inner product, Definiteness, Time series classification, SVM},
# DOI = {10.1109/TNNLS.2014.2333876},
# URL = {http://hal.inria.fr/hal-00486916}
# }
#
'''
# input A: first multivariate time series: array of array (nxd), n is the number of sample, d is the dimension of each sample
# intput B: second multivariate time series: array of array (nxd), n is the number of sample, d is the dimension of each sample
# input local_kernel: matrix of local kernel evaluations
'''
def kdtw_lk(A, B, local_kernel):
d=np.shape(A)[1]
Z=[np.zeros(d)]
A = np.concatenate((Z,A), axis=0)
B = np.concatenate((Z,B), axis=0)
[la,d] =
|
np.shape(A)
|
numpy.shape
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'appMainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
import sys
import time
from datetime import datetime
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import pyqtSlot
from UI_appMainWindow import Ui_MainWindow
import ShapeRecognitionV2 as sr
import cv2
import numpy as np
import os.path
from imageio import imwrite
import matplotlib.pyplot as plt
from scipy.ndimage.interpolation import rotate
import scipy.ndimage
import scipy.interpolate as interp
from PIL import Image as PILImage
from math import sqrt
import ntpath
from scipy.ndimage.morphology import binary_opening
import skimage.exposure as exposure
#import libtiff
import worker
from PyQt5.QtCore import QThread
import ctypes as ct
from ALP4 import *
class appMainWindow(QtWidgets.QMainWindow):
def __init__(self):
#libtiff.libtiff_ctypes.suppress_warnings()
#libtiff.TIFFSetWarningHandler(_null_warning_handler)
super(appMainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.sceneCamera = QtWidgets.QGraphicsScene()
self.sceneMask = QtWidgets.QGraphicsScene()
self.show()
self.fileNameCameraImage = ""
self.ui.btn_CamImageImport.clicked.connect(self.onClick_CamImageImport)
self.ui.btn_Calibrate.clicked.connect(self.onClick_Calibrate)
self.ui.btn_SaveCalibration.clicked.connect(self.onClick_SaveCalibration)
self.ui.cbox_LockCalibration.clicked.connect(self.onClick_LockCalibration)
self.ui.btn_DMDMaskGen.clicked.connect(self.onClick_DMDMaskGen)
self.ui.btn_DMDMaskSave.clicked.connect(self.onClick_DMDMaskSave)
self.ui.btn_getThresholdValues.clicked.connect(self.onClick_GetThresholdValues)
self.ui.btn_MaskToAdd.clicked.connect(self.onClick_MaskToAddImport)
self.ui.btn_SendCalParameters.clicked.connect(self.onClick_SendCalParameters)
self.ui.slider_thresholdValue.valueChanged.connect(self.valueChange_ThresholdValue)
self.showImageInView("./TestImages/Vialux_DMD.png", self.ui.view_CameraImage)
self.showImageInView("./TestImages/UoL_logo.jpeg", self.ui.view_DMDMaskImage)
self.CalibrationValuesFile = './Calibration/CalibrationValues.txt'
self.CalibrationImageFile = './Calibration/CalibrationImageFile.txt'
self.maskCountCalibration = 1
self.maskCountThreshold = 1
self.maskCountSlit = 1
self.maskCountPinhole = 1
if os.path.isfile(self.CalibrationValuesFile) and os.path.isfile(self.CalibrationImageFile):
self.calibrationValuesStorage = np.loadtxt(self.CalibrationValuesFile, dtype=float)
self.ui.txt_DMDSizeX.setPlainText(str(self.calibrationValuesStorage[0]))
self.ui.txt_DMDSizeY.setPlainText(str(self.calibrationValuesStorage[1]))
if self.calibrationValuesStorage[2] == 1:
self.ui.radioButton_CamDMD2ImageMask.setChecked(True)
else:
self.ui.radioButton_CamDMD1ImageMask.setChecked(True)
self.ui.txt_CalPositionX.setPlainText(str(self.calibrationValuesStorage[3]))
self.ui.txt_CalPositionY.setPlainText(str(self.calibrationValuesStorage[4]))
self.ui.txt_CalRotation.setPlainText(str(self.calibrationValuesStorage[5]))
self.ui.txt_CalWidth.setPlainText(str(self.calibrationValuesStorage[6]))
self.ui.txt_CalHeight.setPlainText(str(self.calibrationValuesStorage[7]))
self.ui.txt_CalibrationThreshold.setPlainText(str(self.calibrationValuesStorage[8]))
imageFile = open(self.CalibrationImageFile,'r')
self.fileNameCameraImage = imageFile.read()
self.calibrationImageStorage = self.fileNameCameraImage
imageFile.close()
#if self.calibrationImageStorage
try:
self.onClick_Calibrate()
except:
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('Calibration Image Not Found.')
error_dialog.exec_()
self.MaskGeneratedFlag = False
self.DMDConnectFlag = False
self.DMDConnectionLostFlag = False
self.DMDDisplayFlag = False
self.DMDReconnectCount = 0
self.DMDReconnectAttemptLimit = 10
self.ui.btn_ConnectDMD.clicked.connect(self.onClick_ConnectDMD)
self.ui.btn_DisconnectDMD.clicked.connect(self.onClick_DisconnectDMD)
self.ui.btn_HaltDMD.clicked.connect(self.onClick_HaltDMD)
self.ui.btn_CamDMD1DMD.clicked.connect(self.onClick_CamDMD1DMD)
self.ui.btn_CamDMD2DMD.clicked.connect(self.onClick_CamDMD2DMD)
self.ui.btn_DisplayCurrentDMD.clicked.connect(self.onClick_DMDDisplayCurrentMask)
self.ui.btn_DisplayFileDMD.clicked.connect(self.onClick_DMDDisplayFileMask)
###### Threading
# Create worker and thread inside form - no parents!
self.workerObj = worker.Worker()
self.thread = QThread()
# Connect Workers Signals to Form method slots to post data.
self.workerObj.statusCheckDMDTimer.connect(self.statusCheckDMD)
# Move the Worker object to the Thread object
self.workerObj.moveToThread(self.thread)
# Connect Worker Signals to the Thread slots
self.workerObj.finished.connect(self.thread.quit)
# Connect Thread started signal to Worker operational slot method
self.thread.started.connect(self.workerObj.procCounter)
# * - Thread finished signal will close the app if you want!
#self.thread.finished.connect(app.exit)
# Start the thread
self.thread.start()
#############
self.onClick_ConnectDMD()
if self.DMDConnectFlag:
self.onClick_CamDMD1DMD()
return
def statusCheckDMD(self):
now = datetime.now()
currentTime = now.strftime("%H:%M:%S")
if self.DMDConnectionLostFlag:
print('Trying to reconnect at: ', currentTime)
print('Connection attempt: ', self.DMDReconnectCount)
if self.DMDConnectFlag and not(self.DMDConnectionLostFlag):
try:
testVal = int(self.DMD.DevInquire(inquireType = ALP_USB_CONNECTION).value)
if testVal != 0:
self.ui.view_DMDConnectionStatus.setPixmap(QtGui.QPixmap("./TestImages/orange.png"))
self.DMDConnectionLostFlag = True
self.DMDReconnectCount = 0
print('Disconnected at: ', currentTime)
else:
print('Connected at: ', currentTime)
except:
self.ui.view_DMDConnectionStatus.setPixmap(QtGui.QPixmap("./TestImages/orange.png"))
self.DMDConnectionLostFlag = True
self.DMDReconnectCount = 0
print('Disconnected at: ', currentTime)
if self.DMDConnectFlag and self.DMDConnectionLostFlag and self.DMDReconnectCount < self.DMDReconnectAttemptLimit:
try:
# self.DMD.Halt()
# if self.DMDDisplayFlag == True:
# self.DMD.FreeSeq()
self.DMDConnectFlag = False
self.DMD.Free()
except:
self.DMDReconnectCount = self.DMDReconnectCount + 1
if not(self.DMDConnectFlag) and self.DMDConnectionLostFlag and self.DMDReconnectCount < self.DMDReconnectAttemptLimit:
try:
self.DMD = ALP4(version = '4.3', libDir = 'C:/Program Files/ALP-4.3/ALP-4.3 API')
self.DMD.Initialize()
self.DMDConnectFlag = True
self.DMDConnectionLostFlag = False
self.DMDReconnectCount = 0
print('Reconnected at: ', currentTime)
self.ui.view_DMDConnectionStatus.setPixmap(QtGui.QPixmap("./TestImages/green.png"))
return
except:
self.DMDReconnectCount = self.DMDReconnectCount + 1
if self.DMDReconnectCount >= self.DMDReconnectAttemptLimit:
self.DMDConnectFlag = False
self.DMDDisplayFlag = False
self.DMDConnectionLostFlag = False
self.DMDReconnectCount = 0
print('Reconnect failed at: ', currentTime)
self.ui.view_DMDConnectionStatus.setPixmap(QtGui.QPixmap("./TestImages/red.png"))
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('DMD connection lost!')
error_dialog.exec_()
return
def showImageInView(self, image, view):
if isinstance(image, str):# or type(image) is np.ndarray:
imagePixMap = QtGui.QPixmap(image)#.scaled(300,300, aspectRatioMode=QtCore.Qt.KeepAspectRatio, transformMode=QtCore.Qt.SmoothTransformation)
else:
height, width, chanel = image.shape
bytesPerChanel = 3 * width
imagePixMap = QtGui.QImage(image, width, height, bytesPerChanel, QtGui.QImage.Format_RGB888)#.scaled(300,300, aspectRatioMode=QtCore.Qt.KeepAspectRatio, transformMode=QtCore.Qt.SmoothTransformation)
view.setPixmap(QtGui.QPixmap(imagePixMap))
view.repaint()
return
def maskGenerationCalibration(self, CamDMD2Bool):
xSize = int(float(self.ui.txt_DMDSizeX.toPlainText()))
ySize = int(float(self.ui.txt_DMDSizeY.toPlainText()))
maskCentreX = int(float(self.ui.txt_DMDCalibrationMaskPositionX.toPlainText()))
maskCentreY = int(float(self.ui.txt_DMDCalibrationMaskPositionY.toPlainText()))
maskHeight = int(float(self.ui.txt_DMDCalibrationMaskHeight.toPlainText()))
maskWidth = int(float(self.ui.txt_DMDCalibrationMaskWidth.toPlainText()))
maskRotation = float(self.ui.txt_DMDCalibrationMaskRotation.toPlainText())
centreCircleRadius = float(self.ui.txt_CentreCircleSize.toPlainText())
if CamDMD2Bool:
localMask = np.zeros((maskHeight, maskWidth), dtype=np.uint8)
else:
localMask = np.ones((maskHeight, maskWidth), dtype=np.uint8) * 255
for x in range(maskWidth):
for y in range(maskHeight):
if sqrt(((x - (maskWidth/2)) ** 2) + ((y - (maskHeight/2)) ** 2)) < centreCircleRadius:
if CamDMD2Bool:
localMask[y, x] = 255
else:
localMask[y, x] = 0
shiftX = -maskCentreX
shiftY = -maskCentreY
if CamDMD2Bool:
localMask = rotate(localMask, angle = maskRotation, mode='constant', cval=255)
rotHeight, rotWidth = localMask.shape
padY = round((ySize - rotHeight) / 2)
padX = round((xSize - rotWidth) / 2)
localMask = np.pad(localMask, ((padY, padY), (padX, padX)), 'constant', constant_values = (255, 255))
if localMask.shape[0] < 1080 or localMask.shape[1] < 1920:
localMask = np.pad(localMask, ((0, 1), (0, 1)), 'constant', constant_values = (255, 255))
localMask = localMask[0:int(float(self.ui.txt_DMDSizeY.toPlainText())),0:int(float(self.ui.txt_DMDSizeX.toPlainText()))]
localMask = scipy.ndimage.shift(localMask, np.array([shiftY, shiftX]), cval=255)
else:
localMask = rotate(localMask, angle = maskRotation, mode='constant', cval=0)
rotHeight, rotWidth = localMask.shape
padY = int((ySize - rotHeight) / 2)
padX = int((xSize - rotWidth) / 2)
localMask = np.pad(localMask, ((padY, padY), (padX, padX)), 'constant', constant_values = (0, 0))
if localMask.shape[0] < 1080 or localMask.shape[1] < 1920:
localMask = np.pad(localMask, ((0, 1), (0, 1)), 'constant', constant_values = (255, 255))
localMask = localMask[0:int(float(self.ui.txt_DMDSizeY.toPlainText())),0:int(float(self.ui.txt_DMDSizeX.toPlainText()))]
localMask = scipy.ndimage.shift(localMask, np.array([shiftY, shiftX]), cval=0)
self.MaskGeneratedFlag = True
return localMask
def maskGenerationThreshold(self, CamDMD2Bool):
if not(self.ui.cbox_LockCalibration.isChecked()):
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('Lock above calibration before generating threshold masks.')
error_dialog.exec_()
return
originalImage = plt.imread(self.fileNameCameraImage)
pts = self.cntrPoints.reshape(4,2)
rect = np.zeros((4,2), dtype='float32')
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)] # tl
rect[2] = pts[np.argmax(s)] # br
diff = np.diff(pts, axis=1)
rect[3] = pts[np.argmin(diff)] # tr
rect[1] = pts[np.argmax(diff)] # bl
(tl, tr, br, bl) = rect
# width of new image
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
# height of new image
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
# Max of A&B is are final dimensions
maxWidth = max(int(widthA), int(widthB))
maxHeight = max(int(heightA), int(heightB))
# Destination points which image will be mapped to
dst = np.array([
[0, 0],
[0, maxWidth - 1],
[maxHeight - 1, maxWidth - 1],
[maxHeight - 1, 0]], dtype = "float32")
# Calculate perspective transform matrix
M = cv2.getPerspectiveTransform(rect, dst)
# Warp perspective based on calculated transform
warpImage = cv2.warpPerspective(originalImage, M, (maxHeight, maxWidth))
# Scale warped section to actual size on DMD
warpImageScaled = cv2.resize(warpImage, dsize=(self.calibration.width, self.calibration.height), interpolation=cv2.INTER_LANCZOS4)
# Pad settings based on calibration mask parameters
DMDHalfHeight = self.calibration.DMDSizeY / 2
DMDHalfWidth = self.calibration.DMDSizeX / 2
offsetX = self.calibration.positionX
offsetY = self.calibration.positionY
# Rotate and pad scaled section of DMD mask to full DMD mask
if not(CamDMD2Bool):
rotatedImage = rotate(warpImageScaled, angle=self.calibration.rotation, cval=0.0)
maskHalfWidth = rotatedImage.shape[1] / 2
maskHalfHeight = rotatedImage.shape[0] / 2
padYTop = int(DMDHalfHeight - maskHalfHeight + offsetY)
padYBottom = int(DMDHalfHeight - maskHalfHeight - offsetY)
padXLeft = int(DMDHalfWidth - maskHalfWidth + offsetX)
padXRight = int(DMDHalfWidth - maskHalfWidth - offsetX)
print(maskHalfWidth)
print(maskHalfHeight)
print(padYTop)
print(padYBottom)
print(padXLeft)
print(padXRight)
localMask = np.pad(rotatedImage, ((padYTop, padYBottom), (padXLeft, padXRight)), 'constant', constant_values=0.0)
localMask = cv2.threshold(localMask, int(float(self.ui.txt_currentThreshold.toPlainText())), 255, cv2.THRESH_BINARY)
else:
rotatedImage = rotate(warpImageScaled, angle=self.calibration.rotation, cval=255.0)
maskHalfWidth = rotatedImage.shape[1] / 2
maskHalfHeight = rotatedImage.shape[0] / 2
padYTop = int(DMDHalfHeight - maskHalfHeight + offsetY)
padYBottom = int(DMDHalfHeight - maskHalfHeight - offsetY)
padXLeft = int(DMDHalfWidth - maskHalfWidth + offsetX)
padXRight = int(DMDHalfWidth - maskHalfWidth - offsetX)
print(maskHalfWidth)
print(maskHalfHeight)
print(padYTop)
print(padYBottom)
print(padXLeft)
print(padXRight)
localMask = np.pad(rotatedImage, ((padYTop, padYBottom), (padXLeft, padXRight)), 'constant', constant_values=255.0)
localMask = cv2.threshold(localMask, int(float(self.ui.txt_currentThreshold.toPlainText())), 255, cv2.THRESH_BINARY_INV)
localMask = localMask[1]
# Flip mask as required
if self.ui.cBox_FlipLR.isChecked():
localMask = np.fliplr(localMask)
if self.ui.cBox_FlipUD.isChecked():
localMask = np.flipud(localMask)
# Add to existing mask as required
if not(self.ui.txt_MaskToAdd.toPlainText() == ''):
try:
localMaskToAdd = plt.imread(self.ui.txt_MaskToAdd.toPlainText())
localMaskToAdd = localMaskToAdd.astype(dtype = np.uint8)
if localMask.shape == localMaskToAdd.shape:
localMaskAdded = localMask + localMaskToAdd
if not(CamDMD2Bool):
localMaskTuple = cv2.threshold(localMaskAdded, 200, 255, cv2.THRESH_BINARY)
else:
localMaskTuple = cv2.threshold(localMaskAdded, 500, 255, cv2.THRESH_BINARY)
localMask = localMaskTuple[1]
else:
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('Mask to add is different size to new Threshold Mask.')
error_dialog.exec_()
return
except:
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('File to add to new Threshold Mask not found.')
error_dialog.exec_()
return
thresholdFilterSize = int(float(self.ui.txt_ThresholdFilterSize.toPlainText()))
if thresholdFilterSize > 0:
localMask = binary_opening(localMask, structure=np.ones((thresholdFilterSize, thresholdFilterSize))).astype(np.uint8) * 255
localMask = cv2.bitwise_not(binary_opening(cv2.bitwise_not(localMask), structure=np.ones((thresholdFilterSize, thresholdFilterSize))).astype(np.uint8) * 255)
self.MaskGeneratedFlag = True
return localMask
def maskGenerationSlit(self, CamDMD2Bool):
xSize = int(float(self.ui.txt_DMDSizeX.toPlainText()))
ySize = int(float(self.ui.txt_DMDSizeY.toPlainText()))
localMask = np.zeros((ySize, xSize), dtype=np.uint8)
self.MaskGeneratedFlag = True
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('Feature under development. Blank mask returned.')
error_dialog.exec_()
return localMask
def maskGenerationPinhole(self, CamDMD2Bool):
xSize = int(float(self.ui.txt_DMDSizeX.toPlainText()))
ySize = int(float(self.ui.txt_DMDSizeY.toPlainText()))
# pinholeNum = int(float(self.ui.spinBox_NumberOfPinholes.Value()))
# pinholeRadius = int(float(self.ui.txt_PinholeRadius.toPlainText()))
# pinholePitch = int(float(self.ui.txt_PinholePitch.toPlainText()))
# pinholeRotation = int(float(self.ui.txt_PinholeRotation.toPlainText()))
# pinholeX = int(float(self.ui.txt_PinholeX.toPlainText()))
# pinholeY = int(float(self.ui.txt_PinholeY.toPlainText()))
# if CamDMD2Bool:
# localMask = np.zeros((ySize, xSize), dtype=np.uint8)
# else:
# localMask = np.ones((ySize, xSize), dtype=np.uint8) * 255
# for x in range(xSize):
# for y in range(ySize):
# if sqrt(((x - (maskWidth/2)) ** 2) + ((y - (maskHeight/2)) ** 2)) < centreCircleRadius:
# if CamDMD2Bool:
# localMask[y, x] = 255
# else:
# localMask[y, x] = 0
# shiftX = -maskCentreX
# shiftY = -maskCentreY
# if CamDMD2Bool:
# localMask = rotate(localMask, angle = maskRotation, mode='constant', cval=255)
# rotHeight, rotWidth = localMask.shape
# padY = round((ySize - rotHeight) / 2)
# padX = round((xSize - rotWidth) / 2)
# localMask = np.pad(localMask, ((padY, padY), (padX, padX)), 'constant', constant_values = (255, 255))
# if localMask.shape[0] < 1080 or localMask.shape[1] < 1920:
# localMask = np.pad(localMask, ((0, 1), (0, 1)), 'constant', constant_values = (255, 255))
# localMask = localMask[0:int(float(self.ui.txt_DMDSizeY.toPlainText())),0:int(float(self.ui.txt_DMDSizeX.toPlainText()))]
# localMask = scipy.ndimage.shift(localMask, np.array([shiftY, shiftX]), cval=255)
# else:
# localMask = rotate(localMask, angle = maskRotation, mode='constant', cval=0)
# rotHeight, rotWidth = localMask.shape
# padY = int((ySize - rotHeight) / 2)
# padX = int((xSize - rotWidth) / 2)
# localMask = np.pad(localMask, ((padY, padY), (padX, padX)), 'constant', constant_values = (0, 0))
# if localMask.shape[0] < 1080 or localMask.shape[1] < 1920:
# localMask = np.pad(localMask, ((0, 1), (0, 1)), 'constant', constant_values = (255, 255))
# localMask = localMask[0:int(float(self.ui.txt_DMDSizeY.toPlainText())),0:int(float(self.ui.txt_DMDSizeX.toPlainText()))]
# localMask = scipy.ndimage.shift(localMask, np.array([shiftY, shiftX]), cval=0)
localMask = np.zeros((ySize, xSize), dtype=np.uint8)
self.MaskGeneratedFlag = True
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('Feature under development. Blank mask returned.')
error_dialog.exec_()
return localMask
@pyqtSlot()
def valueChange_ThresholdValue(self):
self.ui.txt_currentThreshold.setPlainText(str(self.ui.slider_thresholdValue.value()))
self.ui.txt_currentThreshold.repaint()
return
@pyqtSlot()
def onClick_GetThresholdValues(self):
localArray = plt.imread(self.fileNameCameraImage)
minValue = np.amin(localArray)
maxValue = np.amax(localArray)
self.ui.label_highValueThreshold.setText(str(maxValue))
self.ui.label_lowValueThreshold.setText(str(minValue))
self.ui.slider_thresholdValue.setMinimum(minValue)
self.ui.slider_thresholdValue.setMaximum(maxValue)
self.ui.slider_thresholdValue.setValue(maxValue/2)
self.ui.slider_thresholdValue.setSliderPosition(maxValue/2)
self.ui.txt_currentThreshold.setPlainText(str(maxValue/2))
self.repaint()
return
@pyqtSlot()
def onClick_SendCalParameters(self):
if self.ui.cbox_LockCalibration.isChecked():
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('Unlock above calibration before transferring values.')
error_dialog.exec_()
return
self.ui.txt_CalPositionX.setPlainText(self.ui.txt_DMDCalibrationMaskPositionX.toPlainText())
self.ui.txt_CalPositionY.setPlainText(self.ui.txt_DMDCalibrationMaskPositionY.toPlainText())
self.ui.txt_CalRotation.setPlainText(self.ui.txt_DMDCalibrationMaskRotation.toPlainText())
self.ui.txt_CalWidth.setPlainText(self.ui.txt_DMDCalibrationMaskWidth.toPlainText())
self.ui.txt_CalHeight.setPlainText(self.ui.txt_DMDCalibrationMaskHeight.toPlainText())
self.repaint()
return
@pyqtSlot()
def onClick_CamImageImport(self):
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
self.fileNameCameraImage, _ = QtWidgets.QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","All Files (*);;tiff (*.tiff);;tif (*.tif);;png (*.png)", options=options)
if self.fileNameCameraImage:
self.showImageInView(self.fileNameCameraImage, self.ui.view_CameraImage)
return
@pyqtSlot()
def onClick_MaskToAddImport(self):
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
self.fileNameMaskToAdd, _ = QtWidgets.QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","All Files (*);;tiff (*.tiff);;tif (*.tif);;png (*.png)", options=options)
if self.fileNameMaskToAdd:
self.showImageInView(self.fileNameMaskToAdd, self.ui.view_DMDMaskImage)
self.ui.txt_MaskToAdd.setPlainText(self.fileNameMaskToAdd)
self.ui.txt_MaskToAdd.repaint()
return
@pyqtSlot()
def onClick_Calibrate(self):
if not self.fileNameCameraImage:
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('Calibration image must be loaded before calibration can be performed.')
error_dialog.exec_()
return
if not(self.ui.txt_DMDSizeX.toPlainText().replace(".", "", 1).isdigit()) or not(self.ui.txt_DMDSizeY.toPlainText().replace(".", "", 1).isdigit()):
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('DMD calibration ratio and DMD X & Y size must be numeric.')
error_dialog.exec_()
return
if self.ui.radioButton_CamDMD2ImageMask.isChecked():
checkState = 1
else:
checkState = 0
calValues = np.array([
float(self.ui.txt_DMDSizeX.toPlainText()),
float(self.ui.txt_DMDSizeY.toPlainText()),
float(checkState),
float(self.ui.txt_CalPositionX.toPlainText()),
float(self.ui.txt_CalPositionY.toPlainText()),
float(self.ui.txt_CalRotation.toPlainText()),
float(self.ui.txt_CalWidth.toPlainText()),
float(self.ui.txt_CalHeight.toPlainText()),
float(self.ui.txt_CalibrationThreshold.toPlainText())
])
try:
self.calibration = sr.ShapeDetector(self.fileNameCameraImage, calValues)
except ValueError:
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('Problem with Calibration Image File.')
error_dialog.exec_()
return
try:
self.cntrPoints = self.calibration.detectCalibration()
height1D, width1D = self.calibration.sourceImage.shape
rgbImage = np.zeros([height1D, width1D, 3] , dtype=np.uint8)
rgbImage[:,:,0] = self.calibration.sourceImage
rgbImage[:,:,1] = self.calibration.sourceImage
rgbImage[:,:,2] = self.calibration.sourceImage
cv2.drawContours(rgbImage, [self.cntrPoints], 0, (255, 0, 0), 5)
self.showImageInView(rgbImage, self.ui.view_CameraImage)
self.calibrationValuesStorage = np.array([
float(self.calibration.DMDSizeX),
float(self.calibration.DMDSizeY),
float(self.calibration.shapeColour),
float(self.calibration.positionX),
float(self.calibration.positionY),
float(self.calibration.rotation),
float(self.calibration.width),
float(self.calibration.height),
float(self.calibration.thresholdCalibrationValue)
], dtype=float)
self.calibrationImageStorage = self.fileNameCameraImage
except:
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('Problem with Calibration Threshold level set.')
error_dialog.exec_()
return
return
@pyqtSlot()
def onClick_SaveCalibration(self):
np.savetxt(self.CalibrationValuesFile, self.calibrationValuesStorage, fmt='%1.2f')
imageFile = open(self.CalibrationImageFile, 'w+')
imageFile.write(self.calibrationImageStorage)
imageFile.close()
return
@pyqtSlot()
def onClick_LockCalibration(self):
if self.ui.cbox_LockCalibration.isChecked():
self.ui.txt_DMDSizeX.setEnabled(False)
self.ui.txt_DMDSizeY.setEnabled(False)
self.ui.txt_CalPositionX.setEnabled(False)
self.ui.txt_CalPositionY.setEnabled(False)
self.ui.radioButton_CamDMD2ImageMask.setEnabled(False)
self.ui.radioButton_CamDMD1ImageMask.setEnabled(False)
self.ui.txt_CalRotation.setEnabled(False)
self.ui.txt_CalWidth.setEnabled(False)
self.ui.txt_CalHeight.setEnabled(False)
self.ui.btn_SaveCalibration.setEnabled(False)
self.ui.btn_Calibrate.setEnabled(False)
self.ui.txt_CalibrationThreshold.setEnabled(False)
elif not(self.ui.cbox_LockCalibration.isChecked()):
self.ui.txt_DMDSizeX.setEnabled(True)
self.ui.txt_DMDSizeY.setEnabled(True)
self.ui.txt_CalPositionX.setEnabled(True)
self.ui.txt_CalPositionY.setEnabled(True)
self.ui.radioButton_CamDMD2ImageMask.setEnabled(True)
self.ui.radioButton_CamDMD1ImageMask.setEnabled(True)
self.ui.txt_CalRotation.setEnabled(True)
self.ui.txt_CalWidth.setEnabled(True)
self.ui.txt_CalHeight.setEnabled(True)
self.ui.btn_SaveCalibration.setEnabled(True)
self.ui.btn_Calibrate.setEnabled(True)
self.ui.txt_CalibrationThreshold.setEnabled(True)
else:
self.ui.txt_DMDSizeX.setEnabled(True)
self.ui.txt_DMDSizeY.setEnabled(True)
self.ui.txt_CalPositionX.setEnabled(True)
self.ui.txt_CalPositionY.setEnabled(True)
self.ui.radioButton_CamDMD2ImageMask.setEnabled(True)
self.ui.radioButton_CamDMD1ImageMask.setEnabled(True)
self.ui.txt_CalRotation.setEnabled(True)
self.ui.txt_CalWidth.setEnabled(True)
self.ui.txt_CalHeight.setEnabled(True)
self.ui.btn_SaveCalibration.setEnabled(True)
self.ui.btn_Calibrate.setEnabled(True)
self.ui.txt_CalibrationThreshold.setEnabled(True)
self.repaint()
return
@pyqtSlot()
def onClick_DMDMaskGen(self):
self.Mask = None
self.MaskGeneratedFlag = False
if self.ui.tab_MaskFunctionality.currentIndex() == 0:
self.Mask = self.maskGenerationCalibration(self.ui.radioButton_CamDMD2DMDMask.isChecked())
elif self.ui.tab_MaskFunctionality.currentIndex() == 1:
self.Mask = self.maskGenerationThreshold(self.ui.radioButton_CamDMD2DMDMask.isChecked())
elif self.ui.tab_MaskFunctionality.currentIndex() == 2:
self.Mask = self.maskGenerationSlit(self.ui.radioButton_CamDMD2DMDMask.isChecked())
elif self.ui.tab_MaskFunctionality.currentIndex() == 3:
self.Mask = self.maskGenerationPinhole(self.ui.radioButton_CamDMD2DMDMask.isChecked())
if not(self.MaskGeneratedFlag):
return
else:
self.MaskChoice = self.ui.tab_MaskFunctionality.currentIndex()
height1D, width1D = self.Mask.shape
rgbImage = np.zeros([height1D, width1D, 3] , dtype=np.uint8)
rgbImage[:,:,0] = self.Mask
rgbImage[:,:,1] = self.Mask
rgbImage[:,:,2] = self.Mask
self.showImageInView(rgbImage, self.ui.view_DMDMaskImage)
return
@pyqtSlot()
def onClick_DMDMaskSave(self):
if not(self.MaskGeneratedFlag):
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('Mask must be generated before it can be saved.')
error_dialog.exec_()
return
if self.MaskChoice == 0:
DefaultSaveName = './Masks/Calibration/CalibrationMask-' + str(self.maskCountCalibration) + '_CCSize-' + self.ui.txt_CentreCircleSize.toPlainText() + '.bmp'
self.maskCountCalibration = self.maskCountCalibration + 1
elif self.MaskChoice == 1:
DefaultSaveName = './Masks/Threshold/ThresholdMask-' + str(self.maskCountThreshold) + '.bmp'
self.maskCountThreshold = self.maskCountThreshold + 1
elif self.MaskChoice == 2:
DefaultSaveName = './Masks/Slit/SlitMask-' + str(self.maskCountSlit) + '_NumSlits-' + str(self.ui.spinBox_NumberOfSlits.value()) + '_Width-' + self.ui.txt_SlitWidth.toPlainText() + '_Separation-' + self.ui.txt_SlitSeparation.toPlainText() + '_Rotation' + self.ui.txt_SlitRotation.toPlainText() + '.bmp'
self.maskCountSlit = self.maskCountSlit + 1
elif self.MaskChoice == 3:
DefaultSaveName = './Masks/Pinhole/PinholeMask-' + str(self.maskCountPinhole) + '_NumPinholes-' + str(self.ui.spinBox_NumberOfPinholes.value()) + '_Radius-' + self.ui.txt_PinholeRadius.toPlainText() + 'Pitch-' + self.ui.txt_PinholePitch.toPlainText() + '_Rotation' + self.ui.txt_PinholeRotation.toPlainText() + '.bmp'
self.maskCountPinhole = self.maskCountPinhole + 1
else:
imwrite('./Masks/Mask.bmp', self.Mask)
saveMask = self.Mask.astype(np.uint8)
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
fileName, _ = QtWidgets.QFileDialog.getSaveFileName(self,"QFileDialog.getSaveFileName()",DefaultSaveName,"All Files (*);;Bitmap (*.bmp)", options=options)
imwrite(fileName, saveMask)
return
@pyqtSlot()
def onClick_ConnectDMD(self):
try:
if self.DMDConnectFlag == False:
self.DMD = ALP4(version = '4.3', libDir = 'C:/Program Files/ALP-4.3/ALP-4.3 API')
self.DMD.Initialize()
self.DMDConnectFlag = True
except:
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('DMD not connected.')
error_dialog.exec_()
return
if self.DMD.nSizeX != int(float(self.ui.txt_DMDSizeX.toPlainText())) or self.DMD.nSizeY != int(float(self.ui.txt_DMDSizeY.toPlainText())):
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('Size of DMD connected (' + str(self.DMD.nSizeX) + 'x' + str(self.DMD.nSizeY) + ' is not the same as the calibration settings. Calibration required.')
error_dialog.exec_()
self.onClick_LockCalibration()
self.ui.cbox_LockCalibration.setChecked(False)
self.ui.view_DMDConnectionStatus.setPixmap(QtGui.QPixmap("./TestImages/green.png"))
return
@pyqtSlot()
def onClick_DisconnectDMD(self):
try:
self.DMD.Halt()
if self.DMDDisplayFlag == True:
self.DMD.FreeSeq()
self.DMD.Free()
self.ui.view_DMDConnectionStatus.setPixmap(QtGui.QPixmap("./TestImages/red.png"))
self.DMDConnectFlag = False
self.DMDDisplayFlag = False
return
except:
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage("No DMD Connected.")
error_dialog.exec_()
return
@pyqtSlot()
def onClick_CamDMD1DMD(self):
try:
mask =
|
np.ones([self.DMD.nSizeY, self.DMD.nSizeX])
|
numpy.ones
|
#import matplotlib.pyplot as plt
import numpy as np
#import random
import copy
def plan(targets,obstacles):
def Lroute(tar, obs, tend):
s = []
t = 0
s.append(tar[0])
weight = 99
while (t < (len(tar) - 1)):
l = len(s) - 1
Location = copy.copy(s[l])
'''judge the weight'''
mov = np.array([0.0] * 4)
Destination = tar[t + 1]
RelativeP = Destination - Location
'''tend weight'''
if tend == 1:
mov[0] += 1
mov[2] += -1
elif tend == 2:
mov[0] += -1
mov[2] += 1
elif tend == 0:
mov[0] += (Location[0] - 3)
mov[2] -= (Location[0] - 3)
'''target weight'''
if RelativeP[0] > 0:
mov[2] += weight
mov[0] -= weight
elif RelativeP[0] < 0:
mov[0] += weight
mov[2] -= weight
if RelativeP[1] > 0:
mov[1] += weight * 1.1
mov[3] -= weight * 1.1
elif RelativeP[1] < 0:
mov[3] += weight * 1.1
mov[1] -= weight * 1.1
'''border weight'''
if Location[0] == 1:
mov[0] -= 10000
if Location[0] == 5:
mov[2] -= 10000
if Location[1] == 1:
mov[3] -= 10000
if Location[1] == 5:
mov[1] -= 10000
'''obstacle weight'''
if (obs == (Location + [1, 0])).all(1).any():
mov[2] -= 10000
if (obs == (Location + [-1, 0])).all(1).any():
mov[0] -= 10000
if (obs == (Location + [0, 1])).all(1).any():
mov[1] -= 10000
if (obs == (Location + [0, -1])).all(1).any():
mov[3] -= 10000
'''dont loop'''
for i in range(l):
if (s[l - 1 - i] == (Location + [1, 0])).all():
mov[2] -= 50
if (s[l - 1 - i] == (Location + [-1, 0])).all():
mov[0] -= 50
if (s[l - 1 - i] == (Location + [0, 1])).all():
mov[1] -= 50
if (s[l - 1 - i] == (Location + [0, -1])).all():
mov[3] -= 50
'''choose movement'''
if max(mov) < -800:
s.append(Location)
movement = np.argmax(mov)
if movement == 0:
s.append(Location + [-1, 0])
if movement == 1:
s.append(Location + [0, 1])
if movement == 2:
s.append(Location + [1, 0])
if movement == 3:
s.append(Location + [0, -1])
'''arrive at a target?'''
if (Destination == s[l + 1]).all():
t += 1
return (s)
def ana(targets, obstacles):
'''targets allocation'''
core = [np.mean(targets.T[0]), np.mean(targets.T[1])]
targetsR1 = np.array([[-1]] * 4)
targetsR2 = np.array([[-1]] * 4)
targetsR3 = np.array([[-1]] * 4)
targetsM1 = np.array([[0, 0]] * 4)
targetsM2 = np.array([[0, 0]] * 4)
targetsM3 = np.array([[0, 0]] * 4)
step = np.array([[500] * 12] * 12)
TMov = np.array([[0, 0]] * 2)
for i in range(12):
for j in range(12):
if i != j:
TMov[0] = targets[i]
TMov[1] = targets[j]
l = Lroute(TMov, obstacles, 0)
step[i][j] = len(l) - 1
stepO = copy.copy(step)
k1 = []
for i in range(12):
k1.append((targets[i][0] - core[0]) - (targets[i][1] - core[1]))
targetsR1[0] = k1.index(min(k1))
for i in range(12):
step[k1.index(min(k1))][i] = 500
def thk(ss, targetsR, ss1, Tar):
k = 1
while k < 4:
stepd = np.array([[0]] * 12)
stepl = np.array([[600]] * 12)
stepw =
|
np.array([[0.0]] * 12)
|
numpy.array
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
Density fitting with Gaussian basis
Ref:
J. Chem. Phys. 147, 164119 (2017)
'''
import time
import copy
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc import tools
from pyscf.pbc import gto
from pyscf.pbc.df import ft_ao
from pyscf.pbc.lib.kpts_helper import is_zero, gamma_point, member
def density_fit(mf, auxbasis=None, mesh=None, with_df=None):
'''Generte density-fitting SCF object
Args:
auxbasis : str or basis dict
Same format to the input attribute mol.basis. If auxbasis is
None, auxiliary basis based on AO basis (if possible) or
even-tempered Gaussian basis will be used.
mesh : tuple
number of grids in each direction
with_df : DF object
'''
from pyscf.pbc.df import df
if with_df is None:
if hasattr(mf, 'kpts'):
kpts = mf.kpts
else:
kpts = numpy.reshape(mf.kpt, (1,3))
with_df = df.DF(mf.cell, kpts)
with_df.max_memory = mf.max_memory
with_df.stdout = mf.stdout
with_df.verbose = mf.verbose
with_df.auxbasis = auxbasis
if mesh is not None:
with_df.mesh = mesh
mf = copy.copy(mf)
mf.with_df = with_df
mf._eri = None
return mf
def get_j_kpts(mydf, dm_kpts, hermi=1, kpts=numpy.zeros((1,3)), kpts_band=None):
log = logger.Logger(mydf.stdout, mydf.verbose)
t1 = (time.clock(), time.time())
if mydf._cderi is None or not mydf.has_kpts(kpts_band):
if mydf._cderi is not None:
log.warn('DF integrals for band k-points were not found %s. '
'DF integrals will be rebuilt to include band k-points.',
mydf._cderi)
mydf.build(kpts_band=kpts_band)
t1 = log.timer_debug1('Init get_j_kpts', *t1)
dm_kpts = lib.asarray(dm_kpts, order='C')
dms = _format_dms(dm_kpts, kpts)
nset, nkpts, nao = dms.shape[:3]
naux = mydf.get_naoaux()
nao_pair = nao * (nao+1) // 2
kpts_band, input_band = _format_kpts_band(kpts_band, kpts), kpts_band
nband = len(kpts_band)
j_real = gamma_point(kpts_band) and not numpy.iscomplexobj(dms)
dmsR = dms.real.transpose(0,1,3,2).reshape(nset,nkpts,nao**2)
dmsI = dms.imag.transpose(0,1,3,2).reshape(nset,nkpts,nao**2)
rhoR = numpy.zeros((nset,naux))
rhoI = numpy.zeros((nset,naux))
max_memory = max(2000, (mydf.max_memory - lib.current_memory()[0]))
for k, kpt in enumerate(kpts):
kptii = numpy.asarray((kpt,kpt))
p1 = 0
for LpqR, LpqI in mydf.sr_loop(kptii, max_memory, False):
p0, p1 = p1, p1+LpqR.shape[0]
#:Lpq = (LpqR + LpqI*1j).reshape(-1,nao,nao)
#:rhoR[:,p0:p1] += numpy.einsum('Lpq,xqp->xL', Lpq, dms[:,k]).real
#:rhoI[:,p0:p1] += numpy.einsum('Lpq,xqp->xL', Lpq, dms[:,k]).imag
rhoR[:,p0:p1] += numpy.einsum('Lp,xp->xL', LpqR, dmsR[:,k])
rhoI[:,p0:p1] += numpy.einsum('Lp,xp->xL', LpqR, dmsI[:,k])
if LpqI is not None:
rhoR[:,p0:p1] -= numpy.einsum('Lp,xp->xL', LpqI, dmsI[:,k])
rhoI[:,p0:p1] += numpy.einsum('Lp,xp->xL', LpqI, dmsR[:,k])
LpqR = LpqI = None
t1 = log.timer_debug1('get_j pass 1', *t1)
weight = 1./nkpts
rhoR *= weight
rhoI *= weight
vjR = numpy.zeros((nset,nband,nao_pair))
vjI = numpy.zeros((nset,nband,nao_pair))
for k, kpt in enumerate(kpts_band):
kptii = numpy.asarray((kpt,kpt))
p1 = 0
for LpqR, LpqI in mydf.sr_loop(kptii, max_memory, True):
p0, p1 = p1, p1+LpqR.shape[0]
#:Lpq = (LpqR + LpqI*1j)#.reshape(-1,nao,nao)
#:vjR[:,k] += numpy.dot(rho[:,p0:p1], Lpq).real
#:vjI[:,k] += numpy.dot(rho[:,p0:p1], Lpq).imag
vjR[:,k] += numpy.dot(rhoR[:,p0:p1], LpqR)
if not j_real:
vjI[:,k] += numpy.dot(rhoI[:,p0:p1], LpqR)
if LpqI is not None:
vjR[:,k] -= numpy.dot(rhoI[:,p0:p1], LpqI)
vjI[:,k] += numpy.dot(rhoR[:,p0:p1], LpqI)
LpqR = LpqI = None
t1 = log.timer_debug1('get_j pass 2', *t1)
if j_real:
vj_kpts = vjR
else:
vj_kpts = vjR + vjI*1j
vj_kpts = lib.unpack_tril(vj_kpts.reshape(-1,nao_pair))
vj_kpts = vj_kpts.reshape(nset,nband,nao,nao)
return _format_jks(vj_kpts, dm_kpts, input_band, kpts)
def get_k_kpts(mydf, dm_kpts, hermi=1, kpts=numpy.zeros((1,3)), kpts_band=None,
exxdiv=None):
cell = mydf.cell
log = logger.Logger(mydf.stdout, mydf.verbose)
t1 = (time.clock(), time.time())
if mydf._cderi is None or not mydf.has_kpts(kpts_band):
if mydf._cderi is not None:
log.warn('DF integrals for band k-points were not found %s. '
'DF integrals will be rebuilt to include band k-points.',
mydf._cderi)
mydf.build(kpts_band=kpts_band)
t1 = log.timer_debug1('Init get_k_kpts', *t1)
dm_kpts = lib.asarray(dm_kpts, order='C')
dms = _format_dms(dm_kpts, kpts)
nset, nkpts, nao = dms.shape[:3]
kpts_band, input_band = _format_kpts_band(kpts_band, kpts), kpts_band
nband = len(kpts_band)
vkR = numpy.zeros((nset,nband,nao,nao))
vkI = numpy.zeros((nset,nband,nao,nao))
dmsR = numpy.asarray(dms.real, order='C')
dmsI = numpy.asarray(dms.imag, order='C')
# K_pq = ( p{k1} i{k2} | i{k2} q{k1} )
bufR = numpy.empty((mydf.blockdim*nao**2))
bufI = numpy.empty((mydf.blockdim*nao**2))
max_memory = max(2000, mydf.max_memory-lib.current_memory()[0])
def make_kpt(ki, kj, swap_2e):
kpti = kpts[ki]
kptj = kpts_band[kj]
for LpqR, LpqI in mydf.sr_loop((kpti,kptj), max_memory, False):
nrow = LpqR.shape[0]
pLqR = numpy.ndarray((nao,nrow,nao), buffer=bufR)
pLqI = numpy.ndarray((nao,nrow,nao), buffer=bufI)
tmpR = numpy.ndarray((nao,nrow*nao), buffer=LpqR)
tmpI = numpy.ndarray((nao,nrow*nao), buffer=LpqI)
pLqR[:] = LpqR.reshape(-1,nao,nao).transpose(1,0,2)
pLqI[:] = LpqI.reshape(-1,nao,nao).transpose(1,0,2)
for i in range(nset):
zdotNN(dmsR[i,ki], dmsI[i,ki], pLqR.reshape(nao,-1),
pLqI.reshape(nao,-1), 1, tmpR, tmpI)
zdotCN(pLqR.reshape(-1,nao).T, pLqI.reshape(-1,nao).T,
tmpR.reshape(-1,nao), tmpI.reshape(-1,nao),
1, vkR[i,kj], vkI[i,kj], 1)
if swap_2e:
tmpR = tmpR.reshape(nao*nrow,nao)
tmpI = tmpI.reshape(nao*nrow,nao)
for i in range(nset):
zdotNN(pLqR.reshape(-1,nao), pLqI.reshape(-1,nao),
dmsR[i,kj], dmsI[i,kj], 1, tmpR, tmpI)
zdotNC(tmpR.reshape(nao,-1), tmpI.reshape(nao,-1),
pLqR.reshape(nao,-1).T, pLqI.reshape(nao,-1).T,
1, vkR[i,ki], vkI[i,ki], 1)
if kpts_band is kpts: # normal k-points HF/DFT
for ki in range(nkpts):
for kj in range(ki):
make_kpt(ki, kj, True)
make_kpt(ki, ki, False)
t1 = log.timer_debug1('get_k_kpts: make_kpt ki>=kj (%d,*)'%ki, *t1)
else:
for ki in range(nkpts):
for kj in range(nband):
make_kpt(ki, kj, False)
t1 = log.timer_debug1('get_k_kpts: make_kpt (%d,*)'%ki, *t1)
if (gamma_point(kpts) and gamma_point(kpts_band) and
not numpy.iscomplexobj(dm_kpts)):
vk_kpts = vkR
else:
vk_kpts = vkR + vkI * 1j
vk_kpts *= 1./nkpts
if exxdiv:
assert(exxdiv.lower() == 'ewald')
_ewald_exxdiv_for_G0(cell, kpts, dms, vk_kpts, kpts_band)
return _format_jks(vk_kpts, dm_kpts, input_band, kpts)
##################################################
#
# Single k-point
#
##################################################
def get_jk(mydf, dm, hermi=1, kpt=numpy.zeros(3),
kpts_band=None, with_j=True, with_k=True, exxdiv=None):
'''JK for given k-point'''
vj = vk = None
if kpts_band is not None and abs(kpt-kpts_band).sum() > 1e-9:
kpt = numpy.reshape(kpt, (1,3))
if with_k:
vk = get_k_kpts(mydf, dm, hermi, kpt, kpts_band, exxdiv)
if with_j:
vj = get_j_kpts(mydf, dm, hermi, kpt, kpts_band)
return vj, vk
cell = mydf.cell
log = logger.Logger(mydf.stdout, mydf.verbose)
t1 = (time.clock(), time.time())
if mydf._cderi is None or not mydf.has_kpts(kpts_band):
if mydf._cderi is not None:
log.warn('DF integrals for band k-points were not found %s. '
'DF integrals will be rebuilt to include band k-points.',
mydf._cderi)
mydf.build(kpts_band=kpts_band)
t1 = log.timer_debug1('Init get_jk', *t1)
dm = numpy.asarray(dm, order='C')
dms = _format_dms(dm, [kpt])
nset, _, nao = dms.shape[:3]
dms = dms.reshape(nset,nao,nao)
j_real = gamma_point(kpt)
k_real = gamma_point(kpt) and not numpy.iscomplexobj(dms)
kptii = numpy.asarray((kpt,kpt))
dmsR = dms.real.reshape(nset,nao,nao)
dmsI = dms.imag.reshape(nset,nao,nao)
mem_now = lib.current_memory()[0]
max_memory = max(2000, (mydf.max_memory - mem_now))
if with_j:
vjR = numpy.zeros((nset,nao,nao))
vjI = numpy.zeros((nset,nao,nao))
if with_k:
vkR = numpy.zeros((nset,nao,nao))
vkI = numpy.zeros((nset,nao,nao))
buf1R = numpy.empty((mydf.blockdim*nao**2))
buf2R = numpy.empty((mydf.blockdim*nao**2))
buf1I = numpy.zeros((mydf.blockdim*nao**2))
buf2I = numpy.empty((mydf.blockdim*nao**2))
max_memory *= .5
log.debug1('max_memory = %d MB (%d in use)', max_memory, mem_now)
def contract_k(pLqR, pLqI):
# K ~ 'iLj,lLk*,li->kj' + 'lLk*,iLj,li->kj'
#:pLq = (LpqR + LpqI.reshape(-1,nao,nao)*1j).transpose(1,0,2)
#:tmp = numpy.dot(dm, pLq.reshape(nao,-1))
#:vk += numpy.dot(pLq.reshape(-1,nao).conj().T, tmp.reshape(-1,nao))
nrow = pLqR.shape[1]
tmpR = numpy.ndarray((nao,nrow*nao), buffer=buf2R)
if k_real:
for i in range(nset):
lib.ddot(dmsR[i], pLqR.reshape(nao,-1), 1, tmpR)
lib.ddot(pLqR.reshape(-1,nao).T, tmpR.reshape(-1,nao), 1, vkR[i], 1)
else:
tmpI = numpy.ndarray((nao,nrow*nao), buffer=buf2I)
for i in range(nset):
zdotNN(dmsR[i], dmsI[i], pLqR.reshape(nao,-1),
pLqI.reshape(nao,-1), 1, tmpR, tmpI, 0)
zdotCN(pLqR.reshape(-1,nao).T, pLqI.reshape(-1,nao).T,
tmpR.reshape(-1,nao), tmpI.reshape(-1,nao),
1, vkR[i], vkI[i], 1)
pLqI = None
thread_k = None
for LpqR, LpqI in mydf.sr_loop(kptii, max_memory, False):
LpqR = LpqR.reshape(-1,nao,nao)
t1 = log.timer_debug1(' load', *t1)
if thread_k is not None:
thread_k.join()
if with_j:
#:rho_coeff = numpy.einsum('Lpq,xqp->xL', Lpq, dms)
#:vj += numpy.dot(rho_coeff, Lpq.reshape(-1,nao**2))
rhoR = numpy.einsum('Lpq,xpq->xL', LpqR, dmsR)
if not j_real:
LpqI = LpqI.reshape(-1,nao,nao)
rhoR -= numpy.einsum('Lpq,xpq->xL', LpqI, dmsI)
rhoI = numpy.einsum('Lpq,xpq->xL', LpqR, dmsI)
rhoI += numpy.einsum('Lpq,xpq->xL', LpqI, dmsR)
vjR += numpy.einsum('xL,Lpq->xpq', rhoR, LpqR)
if not j_real:
vjR -= numpy.einsum('xL,Lpq->xpq', rhoI, LpqI)
vjI += numpy.einsum('xL,Lpq->xpq', rhoR, LpqI)
vjI += numpy.einsum('xL,Lpq->xpq', rhoI, LpqR)
t1 = log.timer_debug1(' with_j', *t1)
if with_k:
nrow = LpqR.shape[0]
pLqR = numpy.ndarray((nao,nrow,nao), buffer=buf1R)
pLqR[:] = LpqR.transpose(1,0,2)
if not k_real:
pLqI = numpy.ndarray((nao,nrow,nao), buffer=buf1I)
if LpqI is not None:
pLqI[:] = LpqI.reshape(-1,nao,nao).transpose(1,0,2)
thread_k = lib.background_thread(contract_k, pLqR, pLqI)
t1 = log.timer_debug1(' with_k', *t1)
LpqR = LpqI = pLqR = pLqI = None
if thread_k is not None:
thread_k.join()
thread_k = None
if with_j:
if j_real:
vj = vjR
else:
vj = vjR + vjI * 1j
vj = vj.reshape(dm.shape)
if with_k:
if k_real:
vk = vkR
else:
vk = vkR + vkI * 1j
if exxdiv:
assert(exxdiv.lower() == 'ewald')
_ewald_exxdiv_for_G0(cell, kpt, dms, vk)
vk = vk.reshape(dm.shape)
t1 = log.timer('sr jk', *t1)
return vj, vk
def _format_dms(dm_kpts, kpts):
nkpts = len(kpts)
nao = dm_kpts.shape[-1]
dms = dm_kpts.reshape(-1,nkpts,nao,nao)
return dms
def _format_kpts_band(kpts_band, kpts):
if kpts_band is None:
kpts_band = kpts
else:
kpts_band = numpy.reshape(kpts_band, (-1,3))
return kpts_band
def _format_jks(v_kpts, dm_kpts, kpts_band, kpts):
if kpts_band is kpts or kpts_band is None:
return v_kpts.reshape(dm_kpts.shape)
else:
if hasattr(kpts_band, 'ndim') and kpts_band.ndim == 1:
v_kpts = v_kpts[:,0]
if dm_kpts.ndim <= 3: # nset=1
return v_kpts[0]
else:
return v_kpts
def zdotNN(aR, aI, bR, bI, alpha=1, cR=None, cI=None, beta=0):
'''c = a*b'''
cR = lib.ddot(aR, bR, alpha, cR, beta)
cR = lib.ddot(aI, bI,-alpha, cR, 1 )
cI = lib.ddot(aR, bI, alpha, cI, beta)
cI = lib.ddot(aI, bR, alpha, cI, 1 )
return cR, cI
def zdotCN(aR, aI, bR, bI, alpha=1, cR=None, cI=None, beta=0):
'''c = a.conj()*b'''
cR = lib.ddot(aR, bR, alpha, cR, beta)
cR = lib.ddot(aI, bI, alpha, cR, 1 )
cI = lib.ddot(aR, bI, alpha, cI, beta)
cI = lib.ddot(aI, bR,-alpha, cI, 1 )
return cR, cI
def zdotNC(aR, aI, bR, bI, alpha=1, cR=None, cI=None, beta=0):
'''c = a*b.conj()'''
cR = lib.ddot(aR, bR, alpha, cR, beta)
cR = lib.ddot(aI, bI, alpha, cR, 1 )
cI = lib.ddot(aR, bI,-alpha, cI, beta)
cI = lib.ddot(aI, bR, alpha, cI, 1 )
return cR, cI
def _ewald_exxdiv_for_G0(cell, kpts, dms, vk, kpts_band=None):
if (cell.dimension == 1 or
(cell.dimension == 2 and cell.low_dim_ft_type is None)):
return _ewald_exxdiv_1d2d(cell, kpts, dms, vk, kpts_band)
else:
return _ewald_exxdiv_3d(cell, kpts, dms, vk, kpts_band)
def _ewald_exxdiv_3d(cell, kpts, dms, vk, kpts_band=None):
s = cell.pbc_intor('int1e_ovlp', hermi=1, kpts=kpts)
madelung = tools.pbc.madelung(cell, kpts)
if kpts is None:
for i,dm in enumerate(dms):
vk[i] += madelung * reduce(numpy.dot, (s, dm, s))
elif numpy.shape(kpts) == (3,):
if kpts_band is None or is_zero(kpts_band-kpts):
for i,dm in enumerate(dms):
vk[i] += madelung * reduce(numpy.dot, (s, dm, s))
elif kpts_band is None or numpy.array_equal(kpts, kpts_band):
for k in range(len(kpts)):
for i,dm in enumerate(dms):
vk[i,k] += madelung * reduce(numpy.dot, (s[k], dm[k], s[k]))
else:
for k, kpt in enumerate(kpts):
for kp in member(kpt, kpts_band.reshape(-1,3)):
for i,dm in enumerate(dms):
vk[i,kp] += madelung * reduce(numpy.dot, (s[k], dm[k], s[k]))
def _ewald_exxdiv_1d2d(cell, kpts, dms, vk, kpts_band=None):
s = cell.pbc_intor('int1e_ovlp', hermi=1, kpts=kpts)
madelung = tools.pbc.madelung(cell, kpts)
Gv, Gvbase, kws = cell.get_Gv_weights(cell.mesh)
G0idx, SI_on_z = gto.cell._SI_for_uniform_model_charge(cell, Gv)
coulG = 4*numpy.pi /
|
numpy.linalg.norm(Gv[G0idx], axis=1)
|
numpy.linalg.norm
|
import math
import numpy as np
import pygame
import random
import time
'''
LEFT -> button_direction = 0
RIGHT -> button_direction = 1
DOWN -> button_direction = 2
UP -> button_direction = 3
'''
class SnakeGame:
SPEED = 10
SIDE = 500
RED = (255, 100, 100)
WHITE = (255, 255, 255)
YELLOW = (255, 255, 0)
BLACK = (0, 0, 0)
CLOCK = pygame.time.Clock()
OBSTACLES = 0
def __init__(self):
self.reset_game()
self.display = pygame.display.set_mode((self.SIDE, self.SIDE))
def reset_game(self):
self.head = [250,250]
self.body = [[250,250],[240,250],[230,250],[220,250]]
self.apple_position = [random.randrange(1, 50) * 10, random.randrange(1, 50) * 10]
self.obstacles = [[random.randrange(1, 50) * 10, random.randrange(1, 50) * 10] for _ in range(self.OBSTACLES)]
if [250,250] in self.obstacles:
self.obstacles.remove([250,250])
if [260,250] in self.obstacles:
self.obstacles.remove([260,250])
self.score = 0
def __start_game(self):
pygame.init()
self.display.fill(self.BLACK)
pygame.display.update()
def __finish_game(self):
self.display = pygame.display.set_mode((self.SIDE, self.SIDE))
self.display.fill(self.BLACK)
pygame.display.update()
self.__display_score(f'Your Score is: {self.score}')
pygame.quit()
def start_game(self):
self.__start_game()
self.__play_game(1)
self.__finish_game()
def __meet_apple(self):
self.apple_position = [random.randrange(1, 50) * 10, random.randrange(1, 50) * 10]
while self.apple_position in self.obstacles:
self.apple_position = [random.randrange(1, 50) * 10, random.randrange(1, 50) * 10]
self.score += 1
def __meet_boundaries(self, head):
if self.SIDE > head[0] >= 0 and self.SIDE > head[1] >= 0:
return 0
else:
return 1
def __meet_self(self, head):
if head in self.body[1:]:
return 1
else:
return 0
def __meet_obstacles(self, head):
if head in self.obstacles:
return 1
else:
return 0
def meet_obstacle(self, current_direction_vec=None):
if current_direction_vec is None:
# return 1 if self.__meet_boundaries(self.head) or self.__meet_self(self.head) else 0
return 1 if self.__meet_boundaries(self.head) or self.__meet_self(self.head) or self.__meet_obstacles(self.head) else 0
else:
next_step = self.head + current_direction_vec
# return 1 if self.__meet_boundaries(next_step) or self.__meet_self(next_step.tolist()) else 0
return 1 if self.__meet_boundaries(next_step) or self.__meet_self(next_step.tolist()) or self.__meet_obstacles(self.head) else 0
def _generate_snake(self, button_direction):
if button_direction == 1:
self.head[0] += 10
elif button_direction == 0:
self.head[0] -= 10
elif button_direction == 2:
self.head[1] += 10
elif button_direction == 3:
self.head[1] -= 10
else:
pass
if self.head == self.apple_position:
self.__meet_apple()
self.body.insert(0, list(self.head))
else:
self.body.insert(0, list(self.head))
self.body.pop()
def _display_snake(self):
for position in self.body:
pygame.draw.rect(
self.display,
self.WHITE,
pygame.Rect(position[0], position[1], 10, 10)
)
def _display_apple(self):
# image = pygame.image.load('snake_deeplearning/apple.jpg')
# self.display.blit(image, (self.apple_position[0], self.apple_position[1]))
pygame.draw.rect(
self.display,
self.YELLOW,
pygame.Rect(self.apple_position[0], self.apple_position[1], 10, 10)
)
def _display_obstacles(self):
for obs in self.obstacles:
pygame.draw.rect(
self.display,
self.RED,
pygame.Rect(obs[0], obs[1], 10, 10)
)
def __display_score(self, display_text):
largeText = pygame.font.Font('freesansbold.ttf', 35)
TextSurf = largeText.render(display_text, True, self.WHITE)
TextRect = TextSurf.get_rect()
TextRect.center = (self.SIDE / 2, self.SIDE / 2)
self.display.blit(TextSurf, TextRect)
pygame.display.update()
time.sleep(2)
def __play_game(self, button_direction):
crashed = False
prev_button_direction = 1
button_direction = 1
while crashed is not True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
break
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT and prev_button_direction != 1:
button_direction = 0
elif event.key == pygame.K_RIGHT and prev_button_direction != 0:
button_direction = 1
elif event.key == pygame.K_UP and prev_button_direction != 2:
button_direction = 3
elif event.key == pygame.K_DOWN and prev_button_direction != 3:
button_direction = 2
else:
button_direction = button_direction
self.display.fill(self.BLACK)
self._display_apple()
self._display_obstacles()
self._display_snake()
self._generate_snake(button_direction)
pygame.display.set_caption(f'Snake Game SCORE: {self.score}')
pygame.display.update()
prev_button_direction = button_direction
if self.meet_obstacle() == 1:
crashed = True
break
self.CLOCK.tick(SnakeGame.SPEED)
class SnakeGameForTraining(SnakeGame):
""" Function used by training_data_generator.py """
def blocked_directions(self):
current_direction_vec = np.array(self.body[0]) - np.array(self.body[1])
left_direction_vec = np.array([current_direction_vec[1], -current_direction_vec[0]])
right_direction_vec = np.array([-current_direction_vec[1], current_direction_vec[0]])
is_front_blocked = self.meet_obstacle(current_direction_vec)
is_left_blocked = self.meet_obstacle(left_direction_vec)
is_right_blocked = self.meet_obstacle(right_direction_vec)
return current_direction_vec, is_front_blocked, is_left_blocked, is_right_blocked
def get_angle_with_apple(self):
apple_direction_vec =
|
np.array(self.apple_position)
|
numpy.array
|
import numpy as np
from .indep_sim import (
linear,
spiral,
exponential,
cubic,
joint_normal,
step,
quadratic,
w_shaped,
uncorrelated_bernoulli,
logarithmic,
fourth_root,
sin_four_pi,
sin_sixteen_pi,
two_parabolas,
circle,
ellipse,
diamond,
multiplicative_noise,
square,
multimodal_independence,
)
_SIMS = [
linear,
spiral,
exponential,
cubic,
joint_normal,
step,
quadratic,
w_shaped,
uncorrelated_bernoulli,
logarithmic,
fourth_root,
sin_four_pi,
sin_sixteen_pi,
two_parabolas,
circle,
ellipse,
diamond,
multiplicative_noise,
square,
multimodal_independence,
]
def _normalize(x, y):
"""Normalize input data matricies."""
x[:, 0] = x[:, 0] / np.max(np.abs(x[:, 0]))
y[:, 0] = y[:, 0] / np.max(np.abs(y[:, 0]))
return x, y
def _2samp_rotate(sim, x, y, p, degree=90, pow_type="samp"):
angle = np.radians(degree)
data = np.hstack([x, y])
same_shape = [
"joint_normal",
"logarithmic",
"sin_four_pi",
"sin_sixteen_pi",
"two_parabolas",
"square",
"diamond",
"circle",
"ellipse",
"multiplicative_noise",
"multimodal_independence",
]
if sim.__name__ in same_shape:
rot_shape = 2 * p
else:
rot_shape = p + 1
rot_mat = np.identity(rot_shape)
if pow_type == "dim":
if sim.__name__ not in [
"exponential",
"cubic",
"spiral",
"uncorrelated_bernoulli",
"fourth_root",
"circle",
]:
for i in range(rot_shape):
mat = np.random.normal(size=(rot_shape, 1))
mat = mat / np.sqrt(np.sum(mat ** 2))
if i == 0:
rot = mat
else:
rot = np.hstack([rot, mat])
rot_mat, _ = np.linalg.qr(rot)
if (p % 2) == 1:
rot_mat[0] *= -1
else:
rot_mat[np.ix_((0, -1), (0, -1))] = np.array(
[[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]
)
elif pow_type == "samp":
rot_mat[np.ix_((0, 1), (0, 1))] = np.array(
[[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]
)
else:
raise ValueError("pow_type not a valid flag ('dim', 'samp')")
rot_data = (rot_mat @ data.T).T
if sim.__name__ in same_shape:
x_rot, y_rot = np.hsplit(rot_data, 2)
else:
x_rot, y_rot = np.hsplit(rot_data, [-p])
return x_rot, y_rot
def rot_2samp(sim, n, p, noise=True, degree=90):
r"""
Rotates input simulations to produce a 2-sample simulation.
Parameters
----------
sim : callable()
The simulation (from the ``hyppo.sims`` module) that is to be rotated.
n : int
The number of samples desired by the simulation.
p : int
The number of dimensions desired by the simulation.
noise : bool, (default: True)
Whether or not to include noise in the simulation.
degree : float, (default: 90)
The number of degrees to rotate the input simulation by (in first dimension).
Returns
-------
samp1, samp2 : ndarray
Rotated data matrices. `samp1` and `samp2` have shapes `(n, p+1)` and `(n, p+1)`
or `(n, 2p)` and `(n, 2p)` depending on the independence simulation. Here, `n`
is the number of samples and `p` is the number of dimensions.
Examples
--------
>>> from hyppo.sims import rot_2samp, linear
>>> x, y = rot_2samp(linear, 100, 1)
>>> print(x.shape, y.shape)
(100, 2) (100, 2)
"""
if sim not in _SIMS:
raise ValueError("Not valid simulation")
if sim.__name__ == "multimodal_independence":
x, y = sim(n, p)
x_rot, y_rot = sim(n, p)
else:
if sim.__name__ == "multiplicative_noise":
x, y = sim(n, p)
else:
x, y = sim(n, p, noise=noise)
x_rot, y_rot = _2samp_rotate(sim, x, y, p, degree=degree, pow_type="samp")
samp1 = np.hstack([x, y])
samp2 = np.hstack([x_rot, y_rot])
return samp1, samp2
def trans_2samp(sim, n, p, noise=True, degree=90, trans=0.3):
r"""
Translates and rotates input simulations to produce a 2-sample
simulation.
Parameters
----------
n : int
The number of samples desired by the simulation.
p : int
The number of dimensions desired by the simulation.
noise : bool, (default: False)
Whether or not to include noise in the simulation.
degree : float, (default: 90)
The number of degrees to rotate the input simulation by (in first dimension).
trans : float, (default: 0.3)
The amount to translate the second simulation by (in first dimension).
Returns
-------
samp1, samp2 : ndarray
Translated/rotated data matrices. `samp1` and `samp2` have shapes `(n, p+1)` and
`(n, p+1)` or `(n, 2p)` and `(n, 2p)` depending on the independence simulation.
Here, `n` is the number of samples and `p` is the number of dimensions.
Examples
--------
>>> from hyppo.sims import trans_2samp, linear
>>> x, y = trans_2samp(linear, 100, 1)
>>> print(x.shape, y.shape)
(100, 2) (100, 2)
"""
if sim not in _SIMS:
raise ValueError("Not valid simulation")
if sim.__name__ == "multimodal_independence":
x, y = sim(n, p)
x_trans, y_trans = sim(n, p)
else:
if sim.__name__ == "multiplicative_noise":
x, y = sim(n, p)
else:
x, y = sim(n, p, noise=noise)
x, y = _normalize(x, y)
x_trans, y_trans = _2samp_rotate(sim, x, y, p, degree=degree, pow_type="dim")
x_trans[:, 0] += trans
y_trans[:, 0] = y_trans[:, -1]
samp1 = np.hstack([x, y])
samp2 = np.hstack([x_trans, y_trans])
return samp1, samp2
def gaussian_3samp(n, epsilon=1, weight=0, case=1):
r"""
Generates 3 sample of gaussians corresponding to 5 cases.
Parameters
----------
n : int
The number of samples desired by the simulation.
epsilon : float, (default: 1)
The amount to translate simulation by (amount depends on case).
weight : float, (default: False)
Number between 0 and 1 corresponding to weight of the second Gaussian
(used in case 4 and 5 to produce a mixture of Gaussians)
case : {1, 2, 3, 4, 5}, (default: 1)
The case in which to evaluate statistical power for each test.
Returns
-------
sims : list of ndarray
List of 3 2-dimensional multivariate Gaussian each
corresponding to the desired case.
Examples
--------
>>> from hyppo.sims import gaussian_3samp
>>> sims = gaussian_3samp(100)
>>> print(sims[0].shape, sims[1].shape, sims[2].shape)
(100, 2) (100, 2) (100, 2)
"""
old_case = case
if old_case == 4:
case = 2
elif old_case == 5:
case = 3
sigma = np.identity(2)
mu1 = [0] * 3
mu2 = [0] * 3
if case == 1:
pass
elif case == 2:
mu2 = [0, 0, epsilon]
elif case == 3:
mu1 = [0, -epsilon / 2, epsilon / 2]
mu2 = [
(np.sqrt(3) / 3) * epsilon,
-(
|
np.sqrt(3)
|
numpy.sqrt
|
# !/usr/bin/env python3
import os, sys
crt_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(crt_dir)
# print('sys.path:', sys.path)
import argparse
import numpy as np
import soundfile as sf
from pathlib import Path
import onnxruntime as ort
import SoundSourceLocalization.SpeechEnhancement.code.ns_featurelib as ns_featurelib
class NSnet2Enhancer(object):
"""NSnet2 enhancer class."""
def __init__(self, modelfile, cfg=None):
"""Instantiate NSnet2 given a trained model path."""
self.cfg = {
'winlen' : 0.02,
'hopfrac' : 0.5,
'fs' : 16000,
'mingain' : -80,
'feattype': 'LogPow'
}
self.frameShift = float(self.cfg['winlen']) * float(self.cfg["hopfrac"])
self.fs = int(self.cfg['fs'])
self.Nfft = int(float(self.cfg['winlen']) * self.fs)
self.mingain = 10 ** (self.cfg['mingain'] / 20)
"""load onnx model"""
self.ort = ort.InferenceSession(modelfile)
self.dtype = np.float32
def enhance(self, x):
"""Obtain the estimated filter"""
onnx_inputs = {
self.ort.get_inputs()[0].name: x.astype(self.dtype)
}
out = self.ort.run(None, onnx_inputs)[0][0]
return out
def __call__(self, sigIn, inFs):
"""Enhance a single Audio signal."""
assert inFs == self.fs, "Inconsistent sampling rate!"
inputSpec = ns_featurelib.calcSpec(sigIn, self.cfg)
inputFeature = ns_featurelib.calcFeat(inputSpec, self.cfg)
# shape: [batch x time x freq]
inputFeature = np.expand_dims(np.transpose(inputFeature), axis=0)
# Obtain network output
out = self.enhance(inputFeature)
# limit suppression gain
Gain = np.transpose(out)
Gain =
|
np.clip(Gain, a_min=self.mingain, a_max=1.0)
|
numpy.clip
|
"""
Credits:
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import unittest
import logging
import datetime
import os
import copy
import numpy as np
from eolearn.core import EOPatch, FeatureType, CRS, CopyTask, DeepCopyTask, AddFeature, RemoveFeature, RenameFeature,\
DuplicateFeature, InitializeFeature, MoveFeature, MergeFeatureTask, MapFeatureTask, ZipFeatureTask,\
ExtractBandsTask, CreateEOPatchTask
logging.basicConfig(level=logging.DEBUG)
class TestCoreTasks(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../../../example_data', 'TestEOPatch')
cls.patch = EOPatch()
cls.patch.data['bands'] = np.arange(2*3*3*2).reshape(2, 3, 3, 2)
cls.patch.mask_timeless['mask'] = np.arange(3*3*2).reshape(3, 3, 2)
cls.patch.scalar['values'] = np.arange(10*5).reshape(10, 5)
cls.patch.timestamp = [datetime.datetime(2017, 1, 1, 10, 4, 7),
datetime.datetime(2017, 1, 4, 10, 14, 5),
datetime.datetime(2017, 1, 11, 10, 3, 51),
datetime.datetime(2017, 1, 14, 10, 13, 46),
datetime.datetime(2017, 1, 24, 10, 14, 7),
datetime.datetime(2017, 2, 10, 10, 1, 32),
datetime.datetime(2017, 2, 20, 10, 6, 35),
datetime.datetime(2017, 3, 2, 10, 0, 20),
datetime.datetime(2017, 3, 12, 10, 7, 6),
datetime.datetime(2017, 3, 15, 10, 12, 14)]
cls.patch.bbox = (324.54, 546.45, 955.4, 63.43, 3857)
cls.patch.meta_info['something'] = np.random.rand(10, 1)
def test_copy(self):
patch_copy = CopyTask().execute(self.patch)
self.assertEqual(self.patch, patch_copy, 'Copied patch is different')
patch_copy.data['new'] = np.arange(1).reshape(1, 1, 1, 1)
self.assertFalse('new' in self.patch.data, 'Dictionary of features was not copied')
patch_copy.data['bands'][0, 0, 0, 0] += 1
self.assertTrue(
|
np.array_equal(self.patch.data['bands'], patch_copy.data['bands'])
|
numpy.array_equal
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 <NAME>, <NAME>, <NAME>,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 <NAME>
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
"""
@author: <NAME> and <NAME>, K.U. Leuven 2012
"""
from casadi import *
import numpy as np
import matplotlib.pyplot as plt
# -----------------------------------------------------------------------------
# Collocation setup
# -----------------------------------------------------------------------------
nicp = 1 # Number of (intermediate) collocation points per control interval
xref = 0.1 # chariot reference
l = 1. #- -> crane, + -> pendulum
m = 1.
M = 1.
g = 9.81
tf = 5.0
nk = 50
ndstate = 6
nastate = 1
ninput = 1
# Degree of interpolating polynomial
deg = 4
# Radau collocation points
cp = "radau"
# Size of the finite elements
h = tf/nk/nicp
# Coefficients of the collocation equation
C = np.zeros((deg+1,deg+1))
# Coefficients of the continuity equation
D = np.zeros(deg+1)
# Collocation point
tau = SX.sym("tau")
# All collocation time points
tau_root = [0] + collocation_points(deg, cp)
T = np.zeros((nk,deg+1))
for i in range(nk):
for j in range(deg+1):
T[i][j] = h*(i + tau_root[j])
# For all collocation points: eq 10.4 or 10.17 in Biegler's book
# Construct Lagrange polynomials to get the polynomial basis at the collocation point
for j in range(deg+1):
L = 1
for j2 in range(deg+1):
if j2 != j:
L *= (tau-tau_root[j2])/(tau_root[j]-tau_root[j2])
# Evaluate the polynomial at the final time to get the coefficients of the continuity equation
lfcn = Function('lfcn', [tau],[L])
D[j] = lfcn(1.0)
# Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity equation
tfcn = Function('tfcn', [tau],[tangent(L,tau)])
for j2 in range(deg+1):
C[j][j2] = tfcn(tau_root[j2])
# -----------------------------------------------------------------------------
# Model setup
# -----------------------------------------------------------------------------
# Declare variables (use scalar graph)
t = SX.sym("t") # time
u = SX.sym("u") # control
xd = SX.sym("xd",ndstate) # differential state
xa = SX.sym("xa",nastate) # algebraic state
xddot = SX.sym("xdot",ndstate) # differential state time derivative
p = SX.sym("p",0,1) # parameters
x = SX.sym("x")
y = SX.sym("y")
w = SX.sym("w")
dx = SX.sym("dx")
dy = SX.sym("dy")
dw = SX.sym("dw")
res = vertcat(xddot[0] - dx,\
xddot[1] - dy,\
xddot[2] - dw,\
m*xddot[3] + (x-w)*xa, \
m*xddot[4] + y*xa - g*m,\
M*xddot[5] + (w-x)*xa + u,\
(x-w)*(xddot[3] - xddot[5]) + y*xddot[4] + dy*dy + (dx-dw)*(dx-dw))
xd[0] = x
xd[1] = y
xd[2] = w
xd[3] = dx
xd[4] = dy
xd[5] = dw
# System dynamics (implicit formulation)
ffcn = Function('ffcn', [t,xddot,xd,xa,u,p],[res])
# Objective function
MayerTerm = Function('mayer', [t,xd,xa,u,p],[(x-xref)*(x-xref) + (w-xref)*(w-xref) + dx*dx + dy*dy])
LagrangeTerm = Function('lagrange', [t,xd,xa,u,p],[(x-xref)*(x-xref) + (w-xref)*(w-xref)])
# Control bounds
u_min = np.array([-2])
u_max = np.array([ 2])
u_init = np.array((nk*nicp*(deg+1))*[[0.0]]) # needs to be specified for every time interval (even though it stays constant)
# Differential state bounds
#Path bounds
xD_min = np.array([-inf, -inf, -inf, -inf, -inf, -inf])
xD_max = np.array([ inf, inf, inf, inf, inf, inf])
#Initial bounds
xDi_min = np.array([ 0.0, l, 0.0, 0.0, 0.0, 0.0])
xDi_max = np.array([ 0.0, l, 0.0, 0.0, 0.0, 0.0])
#Final bounds
xDf_min = np.array([-inf, -inf, -inf, -inf, -inf, -inf])
xDf_max = np.array([ inf, inf, inf, inf, inf, inf])
#Initial guess for differential states
xD_init = np.array((nk*nicp*(deg+1))*[[ 0.0, l, 0.0, 0.0, 0.0, 0.0]]) # needs to be specified for every time interval
# Algebraic state bounds and initial guess
xA_min = np.array([-inf])
xA_max = np.array([ inf])
xAi_min = np.array([-inf])
xAi_max = np.array([ inf])
xAf_min = np.array([-inf])
xAf_max = np.array([ inf])
xA_init = np.array((nk*nicp*(deg+1))*[[sign(l)*9.81]])
# Parameter bounds and initial guess
p_min = np.array([])
p_max = np.array([])
p_init =
|
np.array([])
|
numpy.array
|
import time
import random
import numpy as np
from collections import deque
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.compat.v1.disable_eager_execution()
from matplotlib import pyplot as plt
class DQNAgent:
""" DQN agent """
def __init__(self, states, actions, max_memory, double_q):
self.states = states
self.actions = actions
self.session = tf.Session()
self.build_model()
self.saver = tf.train.Saver(max_to_keep=10)
self.session.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
self.memory = deque(maxlen=max_memory)
self.eps = 1
self.eps_decay = 0.99999975
self.eps_min = 0.1
self.gamma = 0.90
self.batch_size = 32
self.burnin = 100000
self.copy = 10000
self.step = 0
self.learn_each = 3
self.learn_step = 0
self.save_each = 500000
self.double_q = double_q
def build_model(self):
""" Model builder function """
self.input = tf.placeholder(dtype=tf.float32, shape=(None, ) + self.states, name='input')
self.q_true = tf.placeholder(dtype=tf.float32, shape=[None], name='labels')
self.a_true = tf.placeholder(dtype=tf.int32, shape=[None], name='actions')
self.reward = tf.placeholder(dtype=tf.float32, shape=[], name='reward')
self.input_float = tf.to_float(self.input) / 255.
# Online network
with tf.variable_scope('online'):
self.conv_1 = tf.layers.conv2d(inputs=self.input_float, filters=32, kernel_size=8, strides=4, activation=tf.nn.relu)
self.conv_2 = tf.layers.conv2d(inputs=self.conv_1, filters=64, kernel_size=4, strides=2, activation=tf.nn.relu)
self.conv_3 = tf.layers.conv2d(inputs=self.conv_2, filters=64, kernel_size=3, strides=1, activation=tf.nn.relu)
self.flatten = tf.layers.flatten(inputs=self.conv_3)
self.dense = tf.layers.dense(inputs=self.flatten, units=512, activation=tf.nn.relu)
self.output = tf.layers.dense(inputs=self.dense, units=self.actions, name='output')
# Target network
with tf.variable_scope('target'):
self.conv_1_target = tf.layers.conv2d(inputs=self.input_float, filters=32, kernel_size=8, strides=4, activation=tf.nn.relu)
self.conv_2_target = tf.layers.conv2d(inputs=self.conv_1_target, filters=64, kernel_size=4, strides=2, activation=tf.nn.relu)
self.conv_3_target = tf.layers.conv2d(inputs=self.conv_2_target, filters=64, kernel_size=3, strides=1, activation=tf.nn.relu)
self.flatten_target = tf.layers.flatten(inputs=self.conv_3_target)
self.dense_target = tf.layers.dense(inputs=self.flatten_target, units=512, activation=tf.nn.relu)
self.output_target = tf.stop_gradient(tf.layers.dense(inputs=self.dense_target, units=self.actions, name='output_target'))
# Optimizer
self.action = tf.argmax(input=self.output, axis=1)
self.q_pred = tf.gather_nd(params=self.output, indices=tf.stack([tf.range(tf.shape(self.a_true)[0]), self.a_true], axis=1))
self.loss = tf.losses.huber_loss(labels=self.q_true, predictions=self.q_pred)
self.train = tf.train.AdamOptimizer(learning_rate=0.00025).minimize(self.loss)
# Summaries
self.summaries = tf.summary.merge([
tf.summary.scalar('reward', self.reward),
tf.summary.scalar('loss', self.loss),
tf.summary.scalar('max_q', tf.reduce_max(self.output))
])
self.writer = tf.summary.FileWriter(logdir='./logs', graph=self.session.graph)
def copy_model(self):
""" Copy weights to target network """
self.session.run([tf.assign(new, old) for (new, old) in zip(tf.trainable_variables('target'), tf.trainable_variables('online'))])
def save_model(self):
""" Saves current model to disk """
self.saver.save(sess=self.session, save_path='./models/model', global_step=self.step)
def add(self, experience):
""" Add observation to experience """
self.memory.append(experience)
def predict(self, model, state):
""" Prediction """
if model == 'online':
return self.session.run(fetches=self.output, feed_dict={self.input: np.array(state)})
if model == 'target':
return self.session.run(fetches=self.output_target, feed_dict={self.input: np.array(state)})
def run(self, state):
""" Perform action """
if
|
np.random.rand()
|
numpy.random.rand
|
import sys
import time
from datetime import datetime
import numpy as np
from simple_pid import PID
# Components
class ProcessVariable(object):
def read(self, unit=None):
return None
class Thermistor(ProcessVariable):
def __init__(
self, reference_pin, thermistor_pin, bias_resistance=1962,
A=1.125308852122e-03, B=2.34711863267e-04, C=8.5663516e-08
):
self.reference_pin = reference_pin
self.thermistor_pin = thermistor_pin
self.reading = None
self.referenced_reading = None
self.bias_resistance = bias_resistance
self.A = A
self.B = B
self.C = C
def calibrate_steinhart_hart(self, temperature_resistance_pairs, unit='C'):
(temperatures, resistances) = zip(*temperature_resistance_pairs)
T =
|
np.array(temperatures)
|
numpy.array
|
"""
This is multiprocessed (parallelized) so (I think) it has to be run from command line inside the `if __name__ == '__main__':` block b/c python won't let subproccesses spawn subproccesses
For multiprocessing it will look up the number of cores on your computer and use n-1 of them.
this code has a single class, AK_predictor()
it takes inputs:
df_u_raw = dataframe with users data --> read in from raw data .csv
df_r_raw = dataframe with ratings data --> read in from raw data .csv
N = positive int, number of samples for montecarlo sim when computing distances over unknown ratings. default is N = 10000
It takes a couple minutes to initiate b/c it builds all needed similarirty matrices
(recommend pickling it after instatiation)
To train it use the self.param_grid_search(n_folds = 20, results_folder = results_folder) method. it'll grid search all possible inputs and write outputs to results_folder.
- this also takes a minute or two.
- this instantiates self.results, and self.optimal_params()
(recommend pickling it after training)
To fill in the missing values of the ratings data use self.fill_r, and pass it parameter values (ideally self.optimal_params)
- This will instantiate the attribute self.df_r_filled (data frame of ratings + predicted ratings instead of nans)
- it will write this to a csv in the passed results_folder.
Basic useage, see the main block at bottom.
---
more info on what it does:
it makes df_r form df_r_raw (raw ratings data) by converting the comments to a float using sentiment analysis tools (average of RoBERTa and VADER)
- sim_user_rankings: It make a matrix of similarities of the users based on the users rankings of aspects (df_u) see citation for Kendall Tau Correlation, bottom of doc block
- sim_user_ratings: it makes a matrix of similarites of the users based on their ratings (df_r). it integrates over unknown (missing) ratings using a uniform prior distribution
- sim_item_ratings: it makes a matrix of similarites of the items based on their ratings (df_r). it integrates over unknown (missing) ratings using a uniform prior distribution
Similarities of users (items) from ratings are simply exp(-|| r(i,:,:) - r(j,: : )||_p )
- we have 8 distances for these similarities given by p in [0, 1, 2, np.inf] x naive_dist in [True, False]
- naive_dist = True means ignores unknown ratings. e.g., if two users have the same ratings on only a few items, it gives them distance 0
- naive_dist = False uses a uniform prior on ratings {1, .., 5} for unknown ratings and computes the expected difference in ratings when computing similarities.
Note that the first similarity on users from aspect ranks is Kendall Tau Correlation
Fix a p and a naive_dist value (so a distance on rating vectors)
From a similarity on users, we define an unseen rating (aspect k) from user u, item j as follows,
r(u,j,k): sum_{v: r(v,j,k) exists} r(v,j,k) sim(u,v) / z_u , w/ z_u = sum_same set sim(u,v)
Analogously for similarities on items.
Now armed with these similaritis and abilities to predict rankings, we defines unknown ratings as r(u,i): = convex sum of three predictions for r(u,i):
- user_similarity_by_rank -- weight is parameter a in [0,1]
- user_similarity_by_ratings -- weight is parameter b in [a, 1]
- item_similarity_by_ratings -- weight is 1-a-b
the param_grid_search() method searches over all values of a, b, p, naive_dist and for each one it computes the 20-fold cross-validation average error (macro-averaged)
the best tuple are our optimal parameters.
Reference for the similarity by ranking the aspects:
^ Kendall Tau Reference: <NAME>, “A Computer Method for Calculating Kendall’s Tau with Ungrouped Data”, Journal of the American Statistical Association, Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
- RAB 2021-04-29
"""
# imports
import pandas as pd
import ast, sys
from .utils import *
import numpy as np
from numpy.linalg import norm
from scipy.stats import kendalltau
from itertools import product
from collections import defaultdict
from .sentiment import roBERTa_polarity, vader_polarity
os.environ["TOKENIZERS_PARALLELISM"] = "false" ## this turns off annoying warnings that get printed 1000 times...
from multiprocessing import Pool, cpu_count
from timeit import default_timer as timer
class AK_predictor:
"""
Inputs: df_u_raw = dataframe with users data
df_r_raw = dataframe with ratings data
N = positive int, number of samples for montecarlo sim when computing distances over unknown ratings. default is N = 10000
Attributes:
- df_u_raw, df_r_raw = original inputs with column's added
- df_u, df_r = augmented dataframes with the needed subset of colums for users (includes aspect rankings) and ratings (users x items x aspects + overall ratings)
- _user_id_index = {u: i for i,u in enumerate(self.df_r.user_id.unique())} ## this is the map between {user_id: r's row index}--> same for similarity matricies that are indexed from 0 to num_users.
- _tool_id_index = {v: j for j,v in enumerate(self.df_r.tool_id.unique())} ## this is the map between {tool_id: r's column index}--> same for similiarity matricies that are indexed from 0 to num_items.
- df_r_filled = same as df_r but has all nan values filled in. this is blank until self.fill_r() is run.
- num_users, num_tools = number of users, tools
- r0 = numpy array with ratings, nan values for missing ratings. of shape num_users x num_tools x 8 ratings.
r0[user, tool, 0] = overall ratings, r0[user, tool, 1:] = aspect ratings
- nan_dists = dict giving probability distributions of distances between ratings a and b where at least one of a,b are unseen (nan values). It uses a uniform prior on unseen ratings in the {1, .., 5}
- sim_u_rank = numpy array of shape (num_users x num_users), triangular, 1's on diag. gives the Kendall tau similiarity (0 to 1 scale) of users based on their aspect rankings in the user table df_u_raw0
- sim_u_ratings = dict of form { (p, bool): similarity matrix of users based on ratings with Lp distance, and naive_dist = bool} (p in [0, 1, 2, np.inf])
- sim_i_ratings = dict of form { (p, bool): similarity matrix of users based on ratings with Lp distance, and naive_dist = bool} (p in [0, 1, 2, np.inf])
- p_optimal = None until grid search is run to train it
Methods: see each method's code block
"""
def __init__(self, df_u_raw, df_r_raw, N = 10000):
print("Beginning initialization...")
self.df_u_raw = df_u_raw
self.df_r_raw = df_r_raw
self.N = N
self.optimal_params = {} ## not instantiated here--found when running self.param_grid_search
self.results = {} ## not instantiated here--adds a key (n_folds), and values (results for all combinations of parameters)
self.df_u, self.df_r = self._make_dfs_from_raw()
self.num_users = len(self.df_r.user_id.unique())
self.num_tools = len(self.df_r.tool_id.unique())
## making r, numpy array of shape (num_users, num_tools, num_aspects + 1 (overall)),
self._user_id_index = {u: i for i,u in enumerate(self.df_r.user_id.unique())} ## this is the map between {user_id: r's row index}--> same for similarity matricies that are indexed from 0 to num_users.
self._tool_id_index = {v: j for j,v in enumerate(self.df_r.tool_id.unique())} ## this is the map between {tool_id: r's column index}--> same for similiarity matricies that are indexed from 0 to num_items.
self.r0 = self._make_r0() ## numpy array of ratings, r0[user, tool, 0] = overall rating
self.df_r_filled = None ## place holder to be filled by running self.fill_df_r
## similarity and user conditional probabilities based on how they ranked the aspect questions:
self.sim_u_rank = self._make_user_similarity_by_rank()
self.nan_dists = self._make_nan_dist_probs() ## needed for distance computations
## for multiprocessing need to define the pool object:
processes = cpu_count()-1
pool = Pool(processes=processes)
## making similarity matricies from ratings for differing p-norms, and naive/bayesian distance methods...
args = [ (p, naive_dist) for p in [ 0, 1, 2, np.inf]
for naive_dist in [ True, False] ]
start = timer()
print(f'\t\tInstantiating similarity matrix attributes from ratings. This is multiprocessed using {processes} cores. \n\t\t This step takes a minute or two ...')
sim_u_results = pool.starmap( self.make_sim_u_ratings, args )
sim_i_results = pool.starmap( self.make_sim_i_ratings, args )
end = timer()
print(f'\t\t Done, elapsed time: {end - start}')
## put the results into the desired attributes:
self.sim_u_ratings = { args[i] : sim_u_results[i] for i in range(len(args))}
self.sim_i_ratings = { args[i] : sim_i_results[i] for i in range(len(args))}
print("\tDone initializing!")
@staticmethod
def sentiment_score(text_string, verbose = True): ## need to double check this works with real data--> are we inputting the right kind of text/preprocessing is ok, etc.
"""
Takes average of Vader and roBERTa_polarity (in [-1,1]), maps that to [0,5], then uses ceiling to map to {1,2,...,5}
"""
try:
if np.isnan(text_string): return np.nan
except: pass
try:
text_string = text_string.strip()
if not text_string:
return np.nan
except: pass
if not text_string:
return np.nan
x = roBERTa_polarity(text_string)
y = vader_polarity(text_string)
if verbose and (np.abs(x-y) > .5):
print(f'assign score manually, roBERTa_polarity = {x} vader_polarity = {y}, text is: \n\t{text_string}')
sentiment = (x + y)/2 ## In [-1,1]
sentiment = np.ceil( 5 * (sentiment + 1)/2 ) ## into [0,1], into [0,5], into {1,2,..,5}
if sentiment == 0: sentiment = 1
if sentiment == 6: sentiment = 5
return sentiment
def _make_dfs_from_raw(self, verbose = False):
"""
adds cols to self.df_r_raw with numeric sentiment analysis of comments
adds cols to sefl.df_u_raw with rankings converted from string(tuple) to jsut a tuple.
returns corresponding dataframes with only cols needed. :
df_r = ratings dataframe with aspect responses sorted, and free response converted to a float
df_u = user dataframe with rankings converted to tuples (not strings)
"""
for col in ["aspect_rank", "tool_rank"]: ## convert strings of tuples into just tuples:
self.df_u_raw[col] = self.df_u_raw.apply(lambda row: ast.literal_eval(row[col + "ing"]), axis = 1)
## make attribute df_u
df_u = self.df_u_raw[['user_id', 'familiarity', 'occupation', 'experience', 'name', 'aspect_rank', 'tool_rank']]
self.df_r_raw["aspect_id_6"] = self.df_r_raw.apply(lambda row: self.sentiment_score(row["Free Response"], verbose = verbose), axis = 1) ## add column w/ numeric sentiment score
## make attribute df_r
print(self.df_r_raw.index)
df_r = self.df_r_raw[["user_id", "tool_id", "Overall Score"] + [f'aspect_id_{k}' for k in range(7)]] ## keep only the cols we want in df_r
return df_u, df_r
def _make_r0(self):
"""
uses only self.num_users, .num_tools, and .df_r, dataframe with user_id, tool_id, and all questions answered on a 1-5 scale
**Important that df_r has a row for each user x each tool (w/ nan rating values if that user didn't rank that tool)**
Output: r = n,m,num_questions array of results. nan values in missing ratings.
r[user, tool, 0] = overall ratings,
r[user, tool, 1:] = aspect ratings
"""
m = self.num_users
n = self.num_tools
r = np.empty((m,n,8)) ## ratings matrix, with 0th rating the overall rating (Q3), and ratings 1-7 the aspect ratings
r[:] = np.nan
for index, row in self.df_r.iterrows():
i = self._user_id_index[row.user_id] ## converts from user_id to index
j = self._tool_id_index[row.tool_id] ## converts from tool_id to index
cols = list(self.df_r.columns)
cols.remove("user_id")
cols.remove("tool_id")
for k, c in enumerate(cols):
r[i,j,k] = row[c]
return r
def _make_user_similarity_by_rank(self):
"""
Only uses self.df_u = dataframe with user data. specifically it requires a column named "aspect_rank"
Kendall Tao Correlation used.
kt(x,y) gives values in [-1,1]. 1 if x=y and -1 if x = inverse(y). here we normalize it to be between 0 and 1
Output: sim_rank - array, mxm, symmetric, 1's on diagonals, gives similarity of users based on their rankings of the aspects
"""
m = self.num_users
sim_rank = np.ones((m,m))
inverse_lookup = {i:u for u,i in self._user_id_index.items()} ## need to map the indices i = 0,.., num_users to the u = user_ids
l = sorted(inverse_lookup.keys())
for ind, i in enumerate(l[:-1]): ## run thru i in l except the last element
x = self.df_u[self.df_u.user_id == inverse_lookup[i]].iloc[0]["aspect_rank"]
for j in l[ind+1:]: ## run thru j in l with j>i
y = self.df_u[self.df_u.user_id == inverse_lookup[j]].iloc[0]["aspect_rank"]
sim_rank[i,j] = (1 + kendalltau(x,y)[0] )/2
sim_rank[j,i] = sim_rank[i,j]
return sim_rank
@staticmethod
def distance_naive(x_,y_, p = 2):
"""
Inputs: x_, y_ two numpy arrays of the same shape
p = lp norm parameter (p = 0, count non-zero entries, p = 1,2 are usual lp nroms, p = np.inf for infinity norm)
Computes ||x_-y_||_p / len(x_) (as though x_ and y_ are vectors) but ignores any components of these vectors that have a nan value.
if all values are nan it returns 1.
Output: float in [0,1]
"""
def _remove_nans(x,y):
"""
Input: x,y: arrays of same shapes
Output: x1, y1: 1-d arrays with the corresponding entries of x,y that are both not nan.
(ignore any component where x or y is nan, then flatten what's left into a vector and return)
"""
x = np.array(x)
y = np.array(y)
assert x.shape == y.shape
return x[~np.isnan(x+y)], y[~np.isnan(x+y)]
x = x_.copy().flatten()
y = y_.copy().flatten()
denom = len(x)
(x1, y1) = _remove_nans(x,y)
# if len(x1)>0: return norm(x1-y1, p ) / len(x1)
if len(x1)>0: return norm(x1-y1, p ) / denom
else: return 1 ## if no mutual ratings, return max distance.
@staticmethod
def _make_nan_dist_probs():
"""
Returns dict of form {(a,b): { k: prob(|a-b| = k)} } where either a or b are nan
"""
nan_dists = { ("nan", "nan") : {0 : (5/25), 1: (8/25), 2: (6/25), 3: (4/25), 4: (2/25)}}
for a in range(1,6):
nan_dists[(a,"nan")] = defaultdict(float)
nan_dists[("nan", a)] = nan_dists[(a,"nan")] # symmetric
for i in range(1,6):
nan_dists[(a,"nan")][np.abs(a-i)] += 1/5
return nan_dists
def distance(self, x_, y_ , p = 2, N = None):
"""
Input: x_, y_: arrays of same shapes
p = lp norm parameter (p = 0, count non-zero entries, p = 1,2 are usual lp nroms, p = np.inf for infinity norm)
N = how many samples to use for the monte carlo simulation, if nothing passed, uses self.N
Output: ||x-y||_p / len(x) (should be in the interval [0,4]), but we take the expected value over any unseen components (nan values) w/ uniform prior (x[i] \in {1,2,3,4,5} each w/ probability 1/5).
This is intractable (e.g., if there are 20 missing values and each has 5 different values it can take, there are 5^20 vectors to compute distances on)
We use a monte carlo simulation
"""
if not N:
N = self.N
x = x_.copy().flatten()
y = y_.copy().flatten()
I = np.where(np.isnan(x+y))[0] ## components where x[i] or y[i] is nan
if len(I)==0: ## no nan values!
return norm( x - y, p ) /len(x)
# if not self.nan_dists:
# self.nan_dists = _make_nan_dist_probs()
w = (x-y).reshape(1, x.shape[0] ) ## column vector with x-y
W = np.concatenate([w]*N, axis = 0) ## N rows, each are w.
for i in I: # sample N values for w[i]
if np.isnan(x[i]): a = "nan"
else: a = x[i]
if np.isnan(y[i]): b = "nan"
else: b = y[i]
vals, probs = list(zip(*self.nan_dists[ (a,b) ].items()))
counts = np.random.multinomial(N, probs)
samples = []
for j in range(len(vals)): samples += [vals[j]] * counts[j]
np.random.shuffle(samples)
W[:,i] = samples ## replaces nan values with the N random samples for w[i] in W
return sum([norm( W[i,:], p)/N for i in range(N)])/len(x)
def make_sim_u_ratings(self, p = 2, naive_dist = False, N = None, r = None):
"""
Input: r = np.array of shape num_users, num_tools, num_ratings (0'th rating index for overall ratings. 1: indices for aspects)
usually r = self.r0
if r is not passed (r = None), then it uses self.r0
p = lp norm parameter (p = 0, count non-zero entries, p = 1,2 are usual lp nroms, p = np.inf for infinity norm)
naive_dist = bool. True uses self.distance_naive(), False uses self.distance()
N = positive int, if nothing passed, uses self.N
only used if naive_dist = False, in which case it is the monte carlo simulation number.
Note, this is all working in the matrix indices space, so indexes go from 0 to num_users. self._user_id_index maps from user_id to the indices in these matrices.
Output: sim_u_ratings = np.array of shape num_users x num_users, giving the similarity (np.exp(-dist(u,v))) of users (positive definite, 1's on diag)
"""
# if not naive_dist and not nan_dists: nan_dists = _make_nan_dist_probs()
if not r:
r = self.r0
if not N:
N = self.N
m = self.num_users
sim_u_ratings =
|
np.ones((m,m))
|
numpy.ones
|
"""
Limited dependent variable and qualitative variables.
Includes binary outcomes, count data, (ordered) ordinal data and limited
dependent variables.
General References
--------------------
<NAME> and <NAME>. `Regression Analysis of Count Data`.
Cambridge, 1998
<NAME>. `Limited-Dependent and Qualitative Variables in Econometrics`.
Cambridge, 1983.
<NAME>. `Econometric Analysis`. Prentice Hall, 5th. edition. 2003.
"""
__all__ = ["Poisson", "Logit", "Probit", "MNLogit", "NegativeBinomial",
"GeneralizedPoisson", "NegativeBinomialP", "CountModel"]
from statsmodels.compat.pandas import Appender
import warnings
import numpy as np
from pandas import MultiIndex, get_dummies
from scipy import special, stats
from scipy.special import digamma, gammaln, loggamma, polygamma
from scipy.stats import nbinom
from statsmodels.base.data import handle_data # for mnlogit
from statsmodels.base.l1_slsqp import fit_l1_slsqp
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.distributions import genpoisson_p
import statsmodels.regression.linear_model as lm
from statsmodels.tools import data as data_tools, tools
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.numdiff import approx_fprime_cs
from statsmodels.tools.sm_exceptions import (
PerfectSeparationError,
SpecificationWarning,
)
try:
import cvxopt # noqa:F401
have_cvxopt = True
except ImportError:
have_cvxopt = False
# TODO: When we eventually get user-settable precision, we need to change
# this
FLOAT_EPS = np.finfo(float).eps
# Limit for exponentials to avoid overflow
EXP_UPPER_LIMIT = np.log(np.finfo(np.float64).max) - 1.0
# TODO: add options for the parameter covariance/variance
# ie., OIM, EIM, and BHHH see Green 21.4
_discrete_models_docs = """
"""
_discrete_results_docs = """
%(one_line_description)s
Parameters
----------
model : A DiscreteModel instance
params : array_like
The parameters of a fitted model.
hessian : array_like
The hessian of the fitted model.
scale : float
A scale parameter for the covariance matrix.
Attributes
----------
df_resid : float
See model definition.
df_model : float
See model definition.
llf : float
Value of the loglikelihood
%(extra_attr)s"""
_l1_results_attr = """ nnz_params : int
The number of nonzero parameters in the model. Train with
trim_params == True or else numerical error will distort this.
trimmed : bool array
trimmed[i] == True if the ith parameter was trimmed from the model."""
_get_start_params_null_docs = """
Compute one-step moment estimator for null (constant-only) model
This is a preliminary estimator used as start_params.
Returns
-------
params : ndarray
parameter estimate based one one-step moment matching
"""
_check_rank_doc = """
check_rank : bool
Check exog rank to determine model degrees of freedom. Default is
True. Setting to False reduces model initialization time when
exog.shape[1] is large.
"""
# helper for MNLogit (will be generally useful later)
def _numpy_to_dummies(endog):
if endog.ndim == 2 and endog.dtype.kind not in ["S", "O"]:
endog_dummies = endog
ynames = range(endog.shape[1])
else:
dummies = get_dummies(endog, drop_first=False)
ynames = {i: dummies.columns[i] for i in range(dummies.shape[1])}
endog_dummies = np.asarray(dummies, dtype=float)
return endog_dummies, ynames
return endog_dummies, ynames
def _pandas_to_dummies(endog):
if endog.ndim == 2:
if endog.shape[1] == 1:
yname = endog.columns[0]
endog_dummies = get_dummies(endog.iloc[:, 0])
else: # series
yname = 'y'
endog_dummies = endog
else:
yname = endog.name
endog_dummies = get_dummies(endog)
ynames = endog_dummies.columns.tolist()
return endog_dummies, ynames, yname
def _validate_l1_method(method):
"""
As of 0.10.0, the supported values for `method` in `fit_regularized`
are "l1" and "l1_cvxopt_cp". If an invalid value is passed, raise
with a helpful error message
Parameters
----------
method : str
Raises
------
ValueError
"""
if method not in ['l1', 'l1_cvxopt_cp']:
raise ValueError('`method` = {method} is not supported, use either '
'"l1" or "l1_cvxopt_cp"'.format(method=method))
#### Private Model Classes ####
class DiscreteModel(base.LikelihoodModel):
"""
Abstract class for discrete choice models.
This class does not do anything itself but lays out the methods and
call signature expected of child classes in addition to those of
statsmodels.model.LikelihoodModel.
"""
def __init__(self, endog, exog, check_rank=True, **kwargs):
self._check_rank = check_rank
super().__init__(endog, exog, **kwargs)
self.raise_on_perfect_prediction = True
def initialize(self):
"""
Initialize is called by
statsmodels.model.LikelihoodModel.__init__
and should contain any preprocessing that needs to be done for a model.
"""
if self._check_rank:
# assumes constant
rank = tools.matrix_rank(self.exog, method="qr")
else:
# If rank check is skipped, assume full
rank = self.exog.shape[1]
self.df_model = float(rank - 1)
self.df_resid = float(self.exog.shape[0] - rank)
def cdf(self, X):
"""
The cumulative distribution function of the model.
"""
raise NotImplementedError
def pdf(self, X):
"""
The probability density (mass) function of the model.
"""
raise NotImplementedError
def _check_perfect_pred(self, params, *args):
endog = self.endog
fittedvalues = self.cdf(np.dot(self.exog, params[:self.exog.shape[1]]))
if (self.raise_on_perfect_prediction and
np.allclose(fittedvalues - endog, 0)):
msg = "Perfect separation detected, results not available"
raise PerfectSeparationError(msg)
@Appender(base.LikelihoodModel.fit.__doc__)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
"""
Fit the model using maximum likelihood.
The rest of the docstring is from
statsmodels.base.model.LikelihoodModel.fit
"""
if callback is None:
callback = self._check_perfect_pred
else:
pass # TODO: make a function factory to have multiple call-backs
mlefit = super().fit(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs)
return mlefit # It is up to subclasses to wrap results
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=True,
callback=None, alpha=0, trim_mode='auto',
auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03,
qc_verbose=False, **kwargs):
"""
Fit the model using a regularized maximum likelihood.
The regularization method AND the solver used is determined by the
argument method.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
method : 'l1' or 'l1_cvxopt_cp'
See notes for details.
maxiter : {int, 'defined_by_method'}
Maximum number of iterations to perform.
If 'defined_by_method', then use method defaults (see notes).
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool
Set to True to print convergence messages.
fargs : tuple
Extra arguments passed to the likelihood function, i.e.,
loglike(x,*args).
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
alpha : non-negative scalar or numpy array (same size as parameters)
The weight multiplying the l1 penalty term.
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been
zero if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value.
size_trim_tol : float or 'auto' (default = 'auto')
Tolerance used when trim_mode == 'size'.
auto_trim_tol : float
Tolerance used when trim_mode == 'auto'.
qc_tol : float
Print warning and do not allow auto trim when (ii) (above) is
violated by this much.
qc_verbose : bool
If true, print out a full QC report upon failure.
**kwargs
Additional keyword arguments used when fitting the model.
Returns
-------
Results
A results instance.
Notes
-----
Using 'l1_cvxopt_cp' requires the cvxopt module.
Extra parameters are not penalized if alpha is given as a scalar.
An example is the shape parameter in NegativeBinomial `nb1` and `nb2`.
Optional arguments for the solvers (available in Results.mle_settings)::
'l1'
acc : float (default 1e-6)
Requested accuracy as used by slsqp
'l1_cvxopt_cp'
abstol : float
absolute accuracy (default: 1e-7).
reltol : float
relative accuracy (default: 1e-6).
feastol : float
tolerance for feasibility conditions (default: 1e-7).
refinement : int
number of iterative refinement steps when solving KKT
equations (default: 1).
Optimization methodology
With :math:`L` the negative log likelihood, we solve the convex but
non-smooth problem
.. math:: \\min_\\beta L(\\beta) + \\sum_k\\alpha_k |\\beta_k|
via the transformation to the smooth, convex, constrained problem
in twice as many variables (adding the "added variables" :math:`u_k`)
.. math:: \\min_{\\beta,u} L(\\beta) + \\sum_k\\alpha_k u_k,
subject to
.. math:: -u_k \\leq \\beta_k \\leq u_k.
With :math:`\\partial_k L` the derivative of :math:`L` in the
:math:`k^{th}` parameter direction, theory dictates that, at the
minimum, exactly one of two conditions holds:
(i) :math:`|\\partial_k L| = \\alpha_k` and :math:`\\beta_k \\neq 0`
(ii) :math:`|\\partial_k L| \\leq \\alpha_k` and :math:`\\beta_k = 0`
"""
_validate_l1_method(method)
# Set attributes based on method
cov_params_func = self.cov_params_func_l1
### Bundle up extra kwargs for the dictionary kwargs. These are
### passed through super(...).fit() as kwargs and unpacked at
### appropriate times
alpha = np.array(alpha)
assert alpha.min() >= 0
try:
kwargs['alpha'] = alpha
except TypeError:
kwargs = dict(alpha=alpha)
kwargs['alpha_rescaled'] = kwargs['alpha'] / float(self.endog.shape[0])
kwargs['trim_mode'] = trim_mode
kwargs['size_trim_tol'] = size_trim_tol
kwargs['auto_trim_tol'] = auto_trim_tol
kwargs['qc_tol'] = qc_tol
kwargs['qc_verbose'] = qc_verbose
### Define default keyword arguments to be passed to super(...).fit()
if maxiter == 'defined_by_method':
if method == 'l1':
maxiter = 1000
elif method == 'l1_cvxopt_cp':
maxiter = 70
## Parameters to pass to super(...).fit()
# For the 'extra' parameters, pass all that are available,
# even if we know (at this point) we will only use one.
extra_fit_funcs = {'l1': fit_l1_slsqp}
if have_cvxopt and method == 'l1_cvxopt_cp':
from statsmodels.base.l1_cvxopt import fit_l1_cvxopt_cp
extra_fit_funcs['l1_cvxopt_cp'] = fit_l1_cvxopt_cp
elif method.lower() == 'l1_cvxopt_cp':
raise ValueError("Cannot use l1_cvxopt_cp as cvxopt "
"was not found (install it, or use method='l1' instead)")
if callback is None:
callback = self._check_perfect_pred
else:
pass # make a function factory to have multiple call-backs
mlefit = super().fit(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
extra_fit_funcs=extra_fit_funcs,
cov_params_func=cov_params_func,
**kwargs)
return mlefit # up to subclasses to wrap results
def cov_params_func_l1(self, likelihood_model, xopt, retvals):
"""
Computes cov_params on a reduced parameter space
corresponding to the nonzero parameters resulting from the
l1 regularized fit.
Returns a full cov_params matrix, with entries corresponding
to zero'd values set to np.nan.
"""
H = likelihood_model.hessian(xopt)
trimmed = retvals['trimmed']
nz_idx = np.nonzero(~trimmed)[0]
nnz_params = (~trimmed).sum()
if nnz_params > 0:
H_restricted = H[nz_idx[:, None], nz_idx]
# Covariance estimate for the nonzero params
H_restricted_inv = np.linalg.inv(-H_restricted)
else:
H_restricted_inv = np.zeros(0)
cov_params = np.nan * np.ones(H.shape)
cov_params[nz_idx[:, None], nz_idx] = H_restricted_inv
return cov_params
def predict(self, params, exog=None, linear=False):
"""
Predict response variable of a model given exogenous variables.
"""
raise NotImplementedError
def _derivative_exog(self, params, exog=None, dummy_idx=None,
count_idx=None):
"""
This should implement the derivative of the non-linear function
"""
raise NotImplementedError
def _derivative_exog_helper(self, margeff, params, exog, dummy_idx,
count_idx, transform):
"""
Helper for _derivative_exog to wrap results appropriately
"""
from .discrete_margins import _get_count_effects, _get_dummy_effects
if count_idx is not None:
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
class BinaryModel(DiscreteModel):
_continuous_ok = False
def __init__(self, endog, exog, check_rank=True, **kwargs):
# unconditional check, requires no extra kwargs added by subclasses
self._check_kwargs(kwargs)
super().__init__(endog, exog, check_rank, **kwargs)
if not issubclass(self.__class__, MultinomialModel):
if not np.all((self.endog >= 0) & (self.endog <= 1)):
raise ValueError("endog must be in the unit interval.")
if (not self._continuous_ok and
np.any(self.endog != np.round(self.endog))):
raise ValueError("endog must be binary, either 0 or 1")
def predict(self, params, exog=None, linear=False):
"""
Predict response variable of a model given exogenous variables.
Parameters
----------
params : array_like
Fitted parameters of the model.
exog : array_like
1d or 2d array of exogenous values. If not supplied, the
whole exog attribute of the model is used.
linear : bool, optional
If True, returns the linear predictor dot(exog,params). Else,
returns the value of the cdf at the linear predictor.
Returns
-------
array
Fitted values at exog.
"""
if exog is None:
exog = self.exog
if not linear:
return self.cdf(np.dot(exog, params))
else:
return np.dot(exog, params)
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
bnryfit = super().fit_regularized(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
alpha=alpha,
trim_mode=trim_mode,
auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol,
qc_tol=qc_tol,
**kwargs)
discretefit = L1BinaryResults(self, bnryfit)
return L1BinaryResultsWrapper(discretefit)
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""
For computing marginal effects standard errors.
This is used only in the case of discrete and count regressors to
get the variance-covariance of the marginal effects. It returns
[d F / d params] where F is the predict.
Transform can be 'dydx' or 'eydx'. Checking is done in margeff
computations for appropriate transform.
"""
if exog is None:
exog = self.exog
dF = self.pdf(np.dot(exog, params))[:,None] * exog
if 'ey' in transform:
dF /= self.predict(params, exog)[:,None]
return dF
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects returns dF(XB) / dX where F(.) is
the predicted probabilities
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# Note: this form should be appropriate for
# group 1 probit, logit, logistic, cloglog, heckprob, xtprobit
if exog is None:
exog = self.exog
margeff = np.dot(self.pdf(np.dot(exog, params))[:, None],
params[None, :])
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:, None]
return self._derivative_exog_helper(margeff, params, exog,
dummy_idx, count_idx, transform)
class MultinomialModel(BinaryModel):
def _handle_data(self, endog, exog, missing, hasconst, **kwargs):
if data_tools._is_using_ndarray_type(endog, None):
endog_dummies, ynames = _numpy_to_dummies(endog)
yname = 'y'
elif data_tools._is_using_pandas(endog, None):
endog_dummies, ynames, yname = _pandas_to_dummies(endog)
else:
endog = np.asarray(endog)
endog_dummies, ynames = _numpy_to_dummies(endog)
yname = 'y'
if not isinstance(ynames, dict):
ynames = dict(zip(range(endog_dummies.shape[1]), ynames))
self._ynames_map = ynames
data = handle_data(endog_dummies, exog, missing, hasconst, **kwargs)
data.ynames = yname # overwrite this to single endog name
data.orig_endog = endog
self.wendog = data.endog
# repeating from upstream...
for key in kwargs:
if key in ['design_info', 'formula']: # leave attached to data
continue
try:
setattr(self, key, data.__dict__.pop(key))
except KeyError:
pass
return data
def initialize(self):
"""
Preprocesses the data for MNLogit.
"""
super().initialize()
# This is also a "whiten" method in other models (eg regression)
self.endog = self.endog.argmax(1) # turn it into an array of col idx
self.J = self.wendog.shape[1]
self.K = self.exog.shape[1]
self.df_model *= (self.J-1) # for each J - 1 equation.
self.df_resid = self.exog.shape[0] - self.df_model - (self.J-1)
def predict(self, params, exog=None, linear=False):
"""
Predict response variable of a model given exogenous variables.
Parameters
----------
params : array_like
2d array of fitted parameters of the model. Should be in the
order returned from the model.
exog : array_like
1d or 2d array of exogenous values. If not supplied, the
whole exog attribute of the model is used. If a 1d array is given
it assumed to be 1 row of exogenous variables. If you only have
one regressor and would like to do prediction, you must provide
a 2d array with shape[1] == 1.
linear : bool, optional
If True, returns the linear predictor dot(exog,params). Else,
returns the value of the cdf at the linear predictor.
Notes
-----
Column 0 is the base case, the rest conform to the rows of params
shifted up one for the base case.
"""
if exog is None: # do here to accommodate user-given exog
exog = self.exog
if exog.ndim == 1:
exog = exog[None]
pred = super().predict(params, exog, linear)
if linear:
pred = np.column_stack((np.zeros(len(exog)), pred))
return pred
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
if start_params is None:
start_params = np.zeros((self.K * (self.J-1)))
else:
start_params = np.asarray(start_params)
callback = lambda x : None # placeholder until check_perfect_pred
# skip calling super to handle results from LikelihoodModel
mnfit = base.LikelihoodModel.fit(self, start_params = start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, **kwargs)
mnfit.params = mnfit.params.reshape(self.K, -1, order='F')
mnfit = MultinomialResults(self, mnfit)
return MultinomialResultsWrapper(mnfit)
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
if start_params is None:
start_params = np.zeros((self.K * (self.J-1)))
else:
start_params = np.asarray(start_params)
mnfit = DiscreteModel.fit_regularized(
self, start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
mnfit.params = mnfit.params.reshape(self.K, -1, order='F')
mnfit = L1MultinomialResults(self, mnfit)
return L1MultinomialResultsWrapper(mnfit)
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""
For computing marginal effects standard errors.
This is used only in the case of discrete and count regressors to
get the variance-covariance of the marginal effects. It returns
[d F / d params] where F is the predicted probabilities for each
choice. dFdparams is of shape nobs x (J*K) x (J-1)*K.
The zero derivatives for the base category are not included.
Transform can be 'dydx' or 'eydx'. Checking is done in margeff
computations for appropriate transform.
"""
if exog is None:
exog = self.exog
if params.ndim == 1: # will get flatted from approx_fprime
params = params.reshape(self.K, self.J-1, order='F')
eXB = np.exp(np.dot(exog, params))
sum_eXB = (1 + eXB.sum(1))[:,None]
J = int(self.J)
K = int(self.K)
repeat_eXB = np.repeat(eXB, J, axis=1)
X = np.tile(exog, J-1)
# this is the derivative wrt the base level
F0 = -repeat_eXB * X / sum_eXB ** 2
# this is the derivative wrt the other levels when
# dF_j / dParams_j (ie., own equation)
#NOTE: this computes too much, any easy way to cut down?
F1 = eXB.T[:,:,None]*X * (sum_eXB - repeat_eXB) / (sum_eXB**2)
F1 = F1.transpose((1,0,2)) # put the nobs index first
# other equation index
other_idx = ~np.kron(np.eye(J-1), np.ones(K)).astype(bool)
F1[:, other_idx] = (-eXB.T[:,:,None]*X*repeat_eXB / \
(sum_eXB**2)).transpose((1,0,2))[:, other_idx]
dFdX = np.concatenate((F0[:, None,:], F1), axis=1)
if 'ey' in transform:
dFdX /= self.predict(params, exog)[:, :, None]
return dFdX
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects returns dF(XB) / dX where F(.) is
the predicted probabilities
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
For Multinomial models the marginal effects are
P[j] * (params[j] - sum_k P[k]*params[k])
It is returned unshaped, so that each row contains each of the J
equations. This makes it easier to take derivatives of this for
standard errors. If you want average marginal effects you can do
margeff.reshape(nobs, K, J, order='F).mean(0) and the marginal effects
for choice J are in column J
"""
J = int(self.J) # number of alternative choices
K = int(self.K) # number of variables
# Note: this form should be appropriate for
# group 1 probit, logit, logistic, cloglog, heckprob, xtprobit
if exog is None:
exog = self.exog
if params.ndim == 1: # will get flatted from approx_fprime
params = params.reshape(K, J-1, order='F')
zeroparams = np.c_[np.zeros(K), params] # add base in
cdf = self.cdf(np.dot(exog, params))
# TODO: meaningful interpretation for `iterm`?
iterm = np.array([cdf[:, [i]] * zeroparams[:, i]
for i in range(int(J))]).sum(0)
margeff = np.array([cdf[:, [j]] * (zeroparams[:, j] - iterm)
for j in range(J)])
# swap the axes to make sure margeff are in order nobs, K, J
margeff = np.transpose(margeff, (1, 2, 0))
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:,None,:]
margeff = self._derivative_exog_helper(margeff, params, exog,
dummy_idx, count_idx, transform)
return margeff.reshape(len(exog), -1, order='F')
class CountModel(DiscreteModel):
def __init__(self, endog, exog, offset=None, exposure=None, missing='none',
check_rank=True, **kwargs):
self._check_kwargs(kwargs)
super().__init__(endog, exog, check_rank, missing=missing,
offset=offset, exposure=exposure, **kwargs)
if exposure is not None:
self.exposure = np.asarray(self.exposure)
self.exposure = np.log(self.exposure)
if offset is not None:
self.offset = np.asarray(self.offset)
self._check_inputs(self.offset, self.exposure, self.endog)
if offset is None:
delattr(self, 'offset')
if exposure is None:
delattr(self, 'exposure')
# promote dtype to float64 if needed
dt = np.promote_types(self.endog.dtype, np.float64)
self.endog = np.asarray(self.endog, dt)
dt = np.promote_types(self.exog.dtype, np.float64)
self.exog = np.asarray(self.exog, dt)
def _check_inputs(self, offset, exposure, endog):
if offset is not None and offset.shape[0] != endog.shape[0]:
raise ValueError("offset is not the same length as endog")
if exposure is not None and exposure.shape[0] != endog.shape[0]:
raise ValueError("exposure is not the same length as endog")
def _get_init_kwds(self):
# this is a temporary fixup because exposure has been transformed
# see #1609
kwds = super()._get_init_kwds()
if 'exposure' in kwds and kwds['exposure'] is not None:
kwds['exposure'] = np.exp(kwds['exposure'])
return kwds
def predict(self, params, exog=None, exposure=None, offset=None,
linear=False):
"""
Predict response variable of a count model given exogenous variables
Parameters
----------
params : array_like
Model parameters
exog : array_like, optional
Design / exogenous data. Is exog is None, model exog is used.
exposure : array_like, optional
Log(exposure) is added to the linear prediction with
coefficient equal to 1. If exposure is not provided and exog
is None, uses the model's exposure if present. If not, uses
0 as the default value.
offset : array_like, optional
Offset is added to the linear prediction with coefficient
equal to 1. If offset is not provided and exog
is None, uses the model's offset if present. If not, uses
0 as the default value.
linear : bool
If True, returns the linear predicted values. If False,
returns the exponential of the linear predicted value.
Notes
-----
If exposure is specified, then it will be logged by the method.
The user does not need to log it first.
"""
# the following is copied from GLM predict (without family/link check)
# Use fit offset if appropriate
if offset is None and exog is None and hasattr(self, 'offset'):
offset = self.offset
elif offset is None:
offset = 0.
# Use fit exposure if appropriate
if exposure is None and exog is None and hasattr(self, 'exposure'):
# Already logged
exposure = self.exposure
elif exposure is None:
exposure = 0.
else:
exposure = np.log(exposure)
if exog is None:
exog = self.exog
fitted = np.dot(exog, params[:exog.shape[1]])
linpred = fitted + exposure + offset
if not linear:
return np.exp(linpred) # not cdf
else:
return linpred
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""
For computing marginal effects standard errors.
This is used only in the case of discrete and count regressors to
get the variance-covariance of the marginal effects. It returns
[d F / d params] where F is the predict.
Transform can be 'dydx' or 'eydx'. Checking is done in margeff
computations for appropriate transform.
"""
if exog is None:
exog = self.exog
#NOTE: this handles offset and exposure
dF = self.predict(params, exog)[:,None] * exog
if 'ey' in transform:
dF /= self.predict(params, exog)[:,None]
return dF
def _derivative_exog(self, params, exog=None, transform="dydx",
dummy_idx=None, count_idx=None):
"""
For computing marginal effects. These are the marginal effects
d F(XB) / dX
For the Poisson model F(XB) is the predicted counts rather than
the probabilities.
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# group 3 poisson, nbreg, zip, zinb
if exog is None:
exog = self.exog
k_extra = getattr(self, 'k_extra', 0)
params_exog = params if k_extra == 0 else params[:-k_extra]
margeff = self.predict(params, exog)[:,None] * params_exog[None,:]
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:,None]
return self._derivative_exog_helper(margeff, params, exog,
dummy_idx, count_idx, transform)
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
cntfit = super().fit(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs)
discretefit = CountResults(self, cntfit)
return CountResultsWrapper(discretefit)
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
cntfit = super().fit_regularized(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
alpha=alpha,
trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol,
qc_tol=qc_tol,
**kwargs)
discretefit = L1CountResults(self, cntfit)
return L1CountResultsWrapper(discretefit)
# Public Model Classes
class Poisson(CountModel):
__doc__ = """
Poisson Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
""" % {'params': base._model_params_doc,
'extra_params':
"""offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
""" + base._missing_param_doc + _check_rank_doc}
@property
def family(self):
from statsmodels.genmod import families
return families.Poisson()
def cdf(self, X):
"""
Poisson model cumulative distribution function
Parameters
----------
X : array_like
`X` is the linear predictor of the model. See notes.
Returns
-------
The value of the Poisson CDF at each point.
Notes
-----
The CDF is defined as
.. math:: \\exp\\left(-\\lambda\\right)\\sum_{i=0}^{y}\\frac{\\lambda^{i}}{i!}
where :math:`\\lambda` assumes the loglinear model. I.e.,
.. math:: \\ln\\lambda_{i}=X\\beta
The parameter `X` is :math:`X\\beta` in the above formula.
"""
y = self.endog
return stats.poisson.cdf(y, np.exp(X))
def pdf(self, X):
"""
Poisson model probability mass function
Parameters
----------
X : array_like
`X` is the linear predictor of the model. See notes.
Returns
-------
pdf : ndarray
The value of the Poisson probability mass function, PMF, for each
point of X.
Notes
--------
The PMF is defined as
.. math:: \\frac{e^{-\\lambda_{i}}\\lambda_{i}^{y_{i}}}{y_{i}!}
where :math:`\\lambda` assumes the loglinear model. I.e.,
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
The parameter `X` is :math:`x_{i}\\beta` in the above formula.
"""
y = self.endog
return np.exp(stats.poisson.logpmf(y, np.exp(X)))
def loglike(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
--------
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
XB = np.dot(self.exog, params) + offset + exposure
endog = self.endog
return np.sum(
-np.exp(np.clip(XB, None, EXP_UPPER_LIMIT))
+ endog * XB
- gammaln(endog + 1)
)
def loglikeobs(self, params):
"""
Loglikelihood for observations of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : array_like
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
--------
.. math:: \\ln L_{i}=\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
for observations :math:`i=1,...,n`
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
XB = np.dot(self.exog, params) + offset + exposure
endog = self.endog
#np.sum(stats.poisson.logpmf(endog, np.exp(XB)))
return -np.exp(XB) + endog*XB - gammaln(endog+1)
@Appender(_get_start_params_null_docs)
def _get_start_params_null(self):
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
const = (self.endog / np.exp(offset + exposure)).mean()
params = [np.log(const)]
return params
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
if start_params is None and self.data.const_idx is not None:
# k_params or k_exog not available?
start_params = 0.001 * np.ones(self.exog.shape[1])
start_params[self.data.const_idx] = self._get_start_params_null()[0]
kwds = {}
if kwargs.get('cov_type') is not None:
kwds['cov_type'] = kwargs.get('cov_type')
kwds['cov_kwds'] = kwargs.get('cov_kwds', {})
cntfit = super(CountModel, self).fit(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs)
discretefit = PoissonResults(self, cntfit, **kwds)
return PoissonResultsWrapper(discretefit)
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = L1PoissonResults(self, cntfit)
return L1PoissonResultsWrapper(discretefit)
def fit_constrained(self, constraints, start_params=None, **fit_kwds):
"""fit the model subject to linear equality constraints
The constraints are of the form `R params = q`
where R is the constraint_matrix and q is the vector of
constraint_values.
The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.
Parameters
----------
constraints : formula expression or tuple
If it is a tuple, then the constraint needs to be given by two
arrays (constraint_matrix, constraint_value), i.e. (R, q).
Otherwise, the constraints can be given as strings or list of
strings.
see t_test for details
start_params : None or array_like
starting values for the optimization. `start_params` needs to be
given in the original parameter space and are internally
transformed.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the transformed model.
Returns
-------
results : Results instance
"""
#constraints = (R, q)
# TODO: temporary trailing underscore to not overwrite the monkey
# patched version
# TODO: decide whether to move the imports
from patsy import DesignInfo
from statsmodels.base._constraints import (fit_constrained,
LinearConstraints)
# same pattern as in base.LikelihoodModel.t_test
lc = DesignInfo(self.exog_names).linear_constraint(constraints)
R, q = lc.coefs, lc.constants
# TODO: add start_params option, need access to tranformation
# fit_constrained needs to do the transformation
params, cov, res_constr = fit_constrained(self, R, q,
start_params=start_params,
fit_kwds=fit_kwds)
#create dummy results Instance, TODO: wire up properly
res = self.fit(maxiter=0, method='nm', disp=0,
warn_convergence=False) # we get a wrapper back
res.mle_retvals['fcall'] = res_constr.mle_retvals.get('fcall', np.nan)
res.mle_retvals['iterations'] = res_constr.mle_retvals.get(
'iterations', np.nan)
res.mle_retvals['converged'] = res_constr.mle_retvals['converged']
res._results.params = params
res._results.cov_params_default = cov
cov_type = fit_kwds.get('cov_type', 'nonrobust')
if cov_type != 'nonrobust':
res._results.normalized_cov_params = cov # assume scale=1
else:
res._results.normalized_cov_params = None
k_constr = len(q)
res._results.df_resid += k_constr
res._results.df_model -= k_constr
res._results.constraints = LinearConstraints.from_patsy(lc)
res._results.k_constr = k_constr
res._results.results_constrained = res_constr
return res
def score(self, params):
"""
Poisson model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left(y_{i}-\\lambda_{i}\\right)x_{i}
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + offset + exposure)
return np.dot(self.endog - L, X)
def score_obs(self, params):
"""
Poisson model Jacobian of the log-likelihood for each observation
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : array_like
The score vector (nobs, k_vars) of the model evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\lambda_{i}\\right)x_{i}
for observations :math:`i=1,...,n`
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + offset + exposure)
return (self.endog - L)[:,None] * X
def score_factor(self, params):
"""
Poisson model score_factor for each observation
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : array_like
The score factor (nobs, ) of the model evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\lambda_{i}\\right)
for observations :math:`i=1,...,n`
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + offset + exposure)
return (self.endog - L)
def hessian(self, params):
"""
Poisson model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i=1}^{n}\\lambda_{i}x_{i}x_{i}^{\\prime}
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + exposure + offset)
return -np.dot(L*X.T, X)
def hessian_factor(self, params):
"""
Poisson model Hessian factor
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (nobs,)
The Hessian factor, second derivative of loglikelihood function
with respect to the linear predictor evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i=1}^{n}\\lambda_{i}
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + exposure + offset)
return L
class GeneralizedPoisson(CountModel):
__doc__ = """
Generalized Poisson Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
""" % {'params': base._model_params_doc,
'extra_params':
"""
p : scalar
P denotes parameterizations for GP regression. p=1 for GP-1 and
p=2 for GP-2. Default is p=1.
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.""" + base._missing_param_doc + _check_rank_doc}
def __init__(self, endog, exog, p=1, offset=None,
exposure=None, missing='none', check_rank=True, **kwargs):
super().__init__(endog,
exog,
offset=offset,
exposure=exposure,
missing=missing,
check_rank=check_rank,
**kwargs)
self.parameterization = p - 1
self.exog_names.append('alpha')
self.k_extra = 1
self._transparams = False
def _get_init_kwds(self):
kwds = super()._get_init_kwds()
kwds['p'] = self.parameterization + 1
return kwds
def loglike(self, params):
"""
Loglikelihood of Generalized Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
--------
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[\\mu_{i}+(y_{i}-1)*ln(\\mu_{i}+
\\alpha*\\mu_{i}^{p-1}*y_{i})-y_{i}*ln(1+\\alpha*\\mu_{i}^{p-1})-
ln(y_{i}!)-\\frac{\\mu_{i}+\\alpha*\\mu_{i}^{p-1}*y_{i}}{1+\\alpha*
\\mu_{i}^{p-1}}\\right]
"""
return np.sum(self.loglikeobs(params))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generalized Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
--------
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[\\mu_{i}+(y_{i}-1)*ln(\\mu_{i}+
\\alpha*\\mu_{i}^{p-1}*y_{i})-y_{i}*ln(1+\\alpha*\\mu_{i}^{p-1})-
ln(y_{i}!)-\\frac{\\mu_{i}+\\alpha*\\mu_{i}^{p-1}*y_{i}}{1+\\alpha*
\\mu_{i}^{p-1}}\\right]
for observations :math:`i=1,...,n`
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
endog = self.endog
mu = self.predict(params)
mu_p = np.power(mu, p)
a1 = 1 + alpha * mu_p
a2 = mu + (a1 - 1) * endog
return (np.log(mu) + (endog - 1) * np.log(a2) - endog *
np.log(a1) - gammaln(endog + 1) - a2 / a1)
@Appender(_get_start_params_null_docs)
def _get_start_params_null(self):
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
const = (self.endog / np.exp(offset + exposure)).mean()
params = [np.log(const)]
mu = const * np.exp(offset + exposure)
resid = self.endog - mu
a = self._estimate_dispersion(mu, resid, df_resid=resid.shape[0] - 1)
params.append(a)
return np.array(params)
def _estimate_dispersion(self, mu, resid, df_resid=None):
q = self.parameterization
if df_resid is None:
df_resid = resid.shape[0]
a = ((np.abs(resid) / np.sqrt(mu) - 1) * mu**(-q)).sum() / df_resid
return a
@Appender(
"""
use_transparams : bool
This parameter enable internal transformation to impose
non-negativity. True to enable. Default is False.
use_transparams=True imposes the no underdispersion (alpha > 0)
constraint. In case use_transparams=True and method="newton" or
"ncg" transformation is ignored.
""")
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None, use_transparams=False,
cov_type='nonrobust', cov_kwds=None, use_t=None, optim_kwds_prelim=None,
**kwargs):
if use_transparams and method not in ['newton', 'ncg']:
self._transparams = True
else:
if use_transparams:
warnings.warn('Parameter "use_transparams" is ignored',
RuntimeWarning)
self._transparams = False
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
kwds_prelim = {'disp': 0, 'skip_hessian': True,
'warn_convergence': False}
if optim_kwds_prelim is not None:
kwds_prelim.update(optim_kwds_prelim)
mod_poi = Poisson(self.endog, self.exog, offset=offset)
with warnings.catch_warnings():
warnings.simplefilter("always")
res_poi = mod_poi.fit(**kwds_prelim)
start_params = res_poi.params
a = self._estimate_dispersion(res_poi.predict(), res_poi.resid,
df_resid=res_poi.df_resid)
start_params = np.append(start_params, max(-0.1, a))
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super().fit(start_params=start_params,
maxiter=maxiter,
method=method,
disp=disp,
full_output=full_output,
callback=callback,
**kwargs)
if optim_kwds_prelim is not None:
mlefit.mle_settings["optim_kwds_prelim"] = optim_kwds_prelim
if use_transparams and method not in ["newton", "ncg"]:
self._transparams = False
mlefit._results.params[-1] = np.exp(mlefit._results.params[-1])
gpfit = GeneralizedPoissonResults(self, mlefit._results)
result = GeneralizedPoissonResultsWrapper(gpfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if np.size(alpha) == 1 and alpha != 0:
k_params = self.exog.shape[1] + self.k_extra
alpha = alpha * np.ones(k_params)
alpha[-1] = 0
alpha_p = alpha[:-1] if (self.k_extra and np.size(alpha) > 1) else alpha
self._transparams = False
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
mod_poi = Poisson(self.endog, self.exog, offset=offset)
with warnings.catch_warnings():
warnings.simplefilter("always")
start_params = mod_poi.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode,
auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol,
qc_tol=qc_tol, **kwargs).params
start_params = np.append(start_params, 0.1)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = L1GeneralizedPoissonResults(self, cntfit)
return L1GeneralizedPoissonResultsWrapper(discretefit)
def score_obs(self, params):
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
mu_p = np.power(mu, p)
a1 = 1 + alpha * mu_p
a2 = mu + alpha * mu_p * y
a3 = alpha * p * mu ** (p - 1)
a4 = a3 * y
dmudb = mu * exog
dalpha = (mu_p * (y * ((y - 1) / a2 - 2 / a1) + a2 / a1**2))
dparams = dmudb * (-a4 / a1 +
a3 * a2 / (a1 ** 2) +
(1 + a4) * ((y - 1) / a2 - 1 / a1) +
1 / mu)
return np.concatenate((dparams, np.atleast_2d(dalpha)),
axis=1)
def score(self, params):
score = np.sum(self.score_obs(params), axis=0)
if self._transparams:
score[-1] == score[-1] ** 2
return score
else:
return score
def _score_p(self, params):
"""
Generalized Poisson model derivative of the log-likelihood by p-parameter
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
dldp : float
dldp is first derivative of the loglikelihood function,
evaluated at `p-parameter`.
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
mu_p = np.power(mu, p)
a1 = 1 + alpha * mu_p
a2 = mu + alpha * mu_p * y
dp = np.sum((np.log(mu) * ((a2 - mu) * ((y - 1) / a2 - 2 / a1) +
(a1 - 1) * a2 / a1 ** 2)))
return dp
def hessian(self, params):
"""
Generalized Poisson model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
mu_p = np.power(mu, p)
a1 = 1 + alpha * mu_p
a2 = mu + alpha * mu_p * y
a3 = alpha * p * mu ** (p - 1)
a4 = a3 * y
a5 = p * mu ** (p - 1)
dmudb = mu * exog
# for dl/dparams dparams
dim = exog.shape[1]
hess_arr = np.empty((dim+1,dim+1))
for i in range(dim):
for j in range(i + 1):
hess_arr[i,j] = np.sum(mu * exog[:,i,None] * exog[:,j,None] *
(mu * (a3 * a4 / a1**2 -
2 * a3**2 * a2 / a1**3 +
2 * a3 * (a4 + 1) / a1**2 -
a4 * p / (mu * a1) +
a3 * p * a2 / (mu * a1**2) +
(y - 1) * a4 * (p - 1) / (a2 * mu) -
(y - 1) * (1 + a4)**2 / a2**2 -
a4 * (p - 1) / (a1 * mu)) +
((y - 1) * (1 + a4) / a2 -
(1 + a4) / a1)), axis=0)
tri_idx = np.triu_indices(dim, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
# for dl/dparams dalpha
dldpda = np.sum((2 * a4 * mu_p / a1**2 -
2 * a3 * mu_p * a2 / a1**3 -
mu_p * y * (y - 1) * (1 + a4) / a2**2 +
mu_p * (1 + a4) / a1**2 +
a5 * y * (y - 1) / a2 -
2 * a5 * y / a1 +
a5 * a2 / a1**2) * dmudb,
axis=0)
hess_arr[-1,:-1] = dldpda
hess_arr[:-1,-1] = dldpda
# for dl/dalpha dalpha
dldada = mu_p**2 * (3 * y / a1**2 -
(y / a2)**2. * (y - 1) -
2 * a2 / a1**3)
hess_arr[-1,-1] = dldada.sum()
return hess_arr
def predict(self, params, exog=None, exposure=None, offset=None,
which='mean'):
"""
Predict response variable of a count model given exogenous variables.
Notes
-----
If exposure is specified, then it will be logged by the method.
The user does not need to log it first.
"""
if exog is None:
exog = self.exog
if exposure is None:
exposure = getattr(self, 'exposure', 0)
elif exposure != 0:
exposure = np.log(exposure)
if offset is None:
offset = getattr(self, 'offset', 0)
fitted = np.dot(exog, params[:exog.shape[1]])
linpred = fitted + exposure + offset
if which == 'mean':
return np.exp(linpred)
elif which == 'linear':
return linpred
elif which =='prob':
counts = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
mu = self.predict(params, exog=exog, exposure=exposure,
offset=offset)[:,None]
return genpoisson_p.pmf(counts, mu, params[-1],
self.parameterization + 1)
else:
raise ValueError('keyword \'which\' not recognized')
class Logit(BinaryModel):
__doc__ = """
Logit Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + _check_rank_doc}
_continuous_ok = True
def cdf(self, X):
"""
The logistic cumulative distribution function
Parameters
----------
X : array_like
`X` is the linear predictor of the logit model. See notes.
Returns
-------
1/(1 + exp(-X))
Notes
-----
In the logit model,
.. math:: \\Lambda\\left(x^{\\prime}\\beta\\right)=
\\text{Prob}\\left(Y=1|x\\right)=
\\frac{e^{x^{\\prime}\\beta}}{1+e^{x^{\\prime}\\beta}}
"""
X = np.asarray(X)
return 1/(1+np.exp(-X))
def pdf(self, X):
"""
The logistic probability density function
Parameters
----------
X : array_like
`X` is the linear predictor of the logit model. See notes.
Returns
-------
pdf : ndarray
The value of the Logit probability mass function, PMF, for each
point of X. ``np.exp(-x)/(1+np.exp(-X))**2``
Notes
-----
In the logit model,
.. math:: \\lambda\\left(x^{\\prime}\\beta\\right)=\\frac{e^{-x^{\\prime}\\beta}}{\\left(1+e^{-x^{\\prime}\\beta}\\right)^{2}}
"""
X = np.asarray(X)
return np.exp(-X)/(1+np.exp(-X))**2
def loglike(self, params):
"""
Log-likelihood of logit model.
Parameters
----------
params : array_like
The parameters of the logit model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math::
\\ln L=\\sum_{i}\\ln\\Lambda
\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
Where :math:`q=2y-1`. This simplification comes from the fact that the
logistic distribution is symmetric.
"""
q = 2*self.endog - 1
X = self.exog
return np.sum(np.log(self.cdf(q*np.dot(X,params))))
def loglikeobs(self, params):
"""
Log-likelihood of logit model for each observation.
Parameters
----------
params : array_like
The parameters of the logit model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
-----
.. math::
\\ln L=\\sum_{i}\\ln\\Lambda
\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
for observations :math:`i=1,...,n`
where :math:`q=2y-1`. This simplification comes from the fact that the
logistic distribution is symmetric.
"""
q = 2*self.endog - 1
X = self.exog
return np.log(self.cdf(q*np.dot(X,params)))
def score(self, params):
"""
Logit model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left(y_{i}-\\Lambda_{i}\\right)x_{i}
"""
y = self.endog
X = self.exog
L = self.cdf(np.dot(X,params))
return np.dot(y - L,X)
def score_obs(self, params):
"""
Logit model Jacobian of the log-likelihood for each observation
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
jac : array_like
The derivative of the loglikelihood for each observation evaluated
at `params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\Lambda_{i}\\right)x_{i}
for observations :math:`i=1,...,n`
"""
y = self.endog
X = self.exog
L = self.cdf(np.dot(X, params))
return (y - L)[:,None] * X
def hessian(self, params):
"""
Logit model Hessian matrix of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i}\\Lambda_{i}\\left(1-\\Lambda_{i}\\right)x_{i}x_{i}^{\\prime}
"""
X = self.exog
L = self.cdf(np.dot(X,params))
return -np.dot(L*(1-L)*X.T,X)
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
bnryfit = super().fit(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs)
discretefit = LogitResults(self, bnryfit)
return BinaryResultsWrapper(discretefit)
class Probit(BinaryModel):
__doc__ = """
Probit Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + _check_rank_doc}
def cdf(self, X):
"""
Probit (Normal) cumulative distribution function
Parameters
----------
X : array_like
The linear predictor of the model (XB).
Returns
-------
cdf : ndarray
The cdf evaluated at `X`.
Notes
-----
This function is just an alias for scipy.stats.norm.cdf
"""
return stats.norm._cdf(X)
def pdf(self, X):
"""
Probit (Normal) probability density function
Parameters
----------
X : array_like
The linear predictor of the model (XB).
Returns
-------
pdf : ndarray
The value of the normal density function for each point of X.
Notes
-----
This function is just an alias for scipy.stats.norm.pdf
"""
X = np.asarray(X)
return stats.norm._pdf(X)
def loglike(self, params):
"""
Log-likelihood of probit model (i.e., the normal distribution).
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{i}\\ln\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
Where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
q = 2*self.endog - 1
X = self.exog
return np.sum(np.log(np.clip(self.cdf(q*np.dot(X,params)),
FLOAT_EPS, 1)))
def loglikeobs(self, params):
"""
Log-likelihood of probit model for each observation
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : array_like
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
-----
.. math:: \\ln L_{i}=\\ln\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
for observations :math:`i=1,...,n`
where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
q = 2*self.endog - 1
X = self.exog
return np.log(np.clip(self.cdf(q*np.dot(X,params)), FLOAT_EPS, 1))
def score(self, params):
"""
Probit model score (gradient) vector
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left[\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}\\right]x_{i}
Where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
y = self.endog
X = self.exog
XB = np.dot(X,params)
q = 2*y - 1
# clip to get rid of invalid divide complaint
L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS)
return np.dot(L,X)
def score_obs(self, params):
"""
Probit model Jacobian for each observation
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
jac : array_like
The derivative of the loglikelihood for each observation evaluated
at `params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left[\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}\\right]x_{i}
for observations :math:`i=1,...,n`
Where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
y = self.endog
X = self.exog
XB = np.dot(X,params)
q = 2*y - 1
# clip to get rid of invalid divide complaint
L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS)
return L[:,None] * X
def hessian(self, params):
"""
Probit model Hessian matrix of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\lambda_{i}\\left(\\lambda_{i}+x_{i}^{\\prime}\\beta\\right)x_{i}x_{i}^{\\prime}
where
.. math:: \\lambda_{i}=\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}
and :math:`q=2y-1`
"""
X = self.exog
XB = np.dot(X,params)
q = 2*self.endog - 1
L = q*self.pdf(q*XB)/self.cdf(q*XB)
return np.dot(-L*(L+XB)*X.T,X)
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
bnryfit = super().fit(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs)
discretefit = ProbitResults(self, bnryfit)
return BinaryResultsWrapper(discretefit)
class MNLogit(MultinomialModel):
__doc__ = """
Multinomial Logit Model
Parameters
----------
endog : array_like
`endog` is an 1-d vector of the endogenous response. `endog` can
contain strings, ints, or floats or may be a pandas Categorical Series.
Note that if it contains strings, every distinct string will be a
category. No stripping of whitespace is done.
exog : array_like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user. See `statsmodels.tools.add_constant`.
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
J : float
The number of choices for the endogenous variable. Note that this
is zero-indexed.
K : float
The actual number of parameters for the exogenous design. Includes
the constant if the design has one.
names : dict
A dictionary mapping the column number in `wendog` to the variables
in `endog`.
wendog : ndarray
An n x j array where j is the number of unique categories in `endog`.
Each column of j is a dummy variable indicating the category of
each observation. See `names` for a dictionary mapping each column to
its category.
Notes
-----
See developer notes for further information on `MNLogit` internals.
""" % {'extra_params': base._missing_param_doc + _check_rank_doc}
def __init__(self, endog, exog, check_rank=True, **kwargs):
super().__init__(endog, exog, check_rank=check_rank, **kwargs)
# Override cov_names since multivariate model
yname = self.endog_names
ynames = self._ynames_map
ynames = MultinomialResults._maybe_convert_ynames_int(ynames)
# use range below to ensure sortedness
ynames = [ynames[key] for key in range(int(self.J))]
idx = MultiIndex.from_product((ynames[1:], self.data.xnames),
names=(yname, None))
self.data.cov_names = idx
def pdf(self, eXB):
"""
NotImplemented
"""
raise NotImplementedError
def cdf(self, X):
"""
Multinomial logit cumulative distribution function.
Parameters
----------
X : ndarray
The linear predictor of the model XB.
Returns
-------
cdf : ndarray
The cdf evaluated at `X`.
Notes
-----
In the multinomial logit model.
.. math:: \\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}
"""
eXB = np.column_stack((np.ones(len(X)),
|
np.exp(X)
|
numpy.exp
|
import sys
sys.path.append("../")
from Utils import Obj
from sklearn.neighbors import NearestNeighbors
from extension.arap.cuda.arap import Arap
from extension.arap_closed.cuda.arap import ClosedArap as ArapSolveRhs
from extension.grad_arap.cuda.arap import ArapGrad
from extension.bending.cuda.arap import Bending
import numpy as np
import torch
import header
import os
import time
import trimesh
def find_nearest(array, value):
array = np.asarray(array)
idx = np.mean(np.abs(array - np.expand_dims(value,0)),1).argmin()
return idx
hi_arap = Arap()
hi_arap_solve_rhs = ArapSolveRhs()
bending = Bending()
from sklearn.neighbors import NearestNeighbors
clf = NearestNeighbors(n_neighbors=1,p=1,n_jobs=15)
'''
def norm_points(vertices,ref_verts):
bmin = np.min(ref_verts,axis=0)
bmax = np.max(ref_verts,axis=0)
diagsq = np.sum(np.power(bmax-bmin,2))
ref_diag = np.sqrt(diagsq)
bmin = np.min(vertices,axis=0)
bmax = np.max(vertices,axis=0)
diagsq = np.sum(np.power(bmax-bmin,2))
diag = np.sqrt(diagsq)
s = np.eye(3)
s *= (ref_diag/diag)
vertices = np.dot(vertices,s)
bmin = np.min(ref_verts,axis=0)
bmax = np.max(ref_verts,axis=0)
ref_bcenter = (bmin+bmax)/2.0
bmin = np.min(vertices,axis=0)
bmax = np.max(vertices,axis=0)
bcenter = (bmin+bmax)/2.0
#vertices = vertices + (ref_bcenter-bcenter)
return vertices
'''
def norm_points(vertices):
bmin = np.min(vertices,axis=0)
bmax =
|
np.max(vertices,axis=0)
|
numpy.max
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
from itertools import combinations
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import networkx as nx
import pandapower.topology as top
def plot_voltage_profile(net, plot_transformers=True, ax=None, xlabel="Distance from Slack [km]",
ylabel="Voltage [pu]", x0=0, trafocolor="r", bus_colors=None,
line_loading_weight=False, voltage_column=None, bus_size=3, lines=None,
**kwargs):
if ax is None:
plt.figure(facecolor="white", dpi=120)
ax = plt.gca()
if not net.converged:
raise ValueError("no results in this pandapower network")
if voltage_column is None:
voltage_column = net.res_bus.vm_pu
if lines is None:
lines = net.line.index
for eg in net.ext_grid[net.ext_grid.in_service == True].bus:
d = top.calc_distance_to_bus(net, eg)
for lix, line in net.line[(net.line.in_service == True) & net.line.index.isin(lines)].iterrows():
if line.from_bus not in d.index:
continue
if not ((net.switch.element == line.name) & ~net.switch.closed & (
net.switch.et == 'l')).any():
from_bus = line.from_bus
to_bus = line.to_bus
x = [x0 + d.at[from_bus], x0 + d.at[to_bus]]
try:
y = [voltage_column.at[from_bus], voltage_column.at[to_bus]]
except:
raise UserWarning
if "linewidth" in kwargs or not line_loading_weight:
ax.plot(x, y, **kwargs)
else:
ax.plot(x, y, linewidth=0.4 * np.sqrt(net.res_line.loading_percent.at[lix]),
**kwargs)
if bus_colors is not None:
for bus, x, y in zip((from_bus, to_bus), x, y):
if bus in bus_colors:
ax.plot(x, y, 'or', color=bus_colors[bus], ms=bus_size)
kwargs = {k: v for k, v in kwargs.items() if not k == "label"}
# if plot_transformers:
# if hasattr(plot_transformers, "__iter__"): # if is a list for example
# transformers = net.trafo.loc[list(plot_transformers)]
# else:
# transformers = net.trafo[net.trafo.in_service == True]
# for _, transformer in transformers.iterrows():
# if transformer.hv_bus not in d.index:
# continue
# ax.plot([x0 + d.loc[transformer.hv_bus],
# x0 + d.loc[transformer.lv_bus]],
# [voltage_column.loc[transformer.hv_bus],
# voltage_column.loc[transformer.lv_bus]], color=trafocolor,
# **{k: v for k, v in kwargs.items() if not k == "color"})
# trafo geodata
if plot_transformers:
for trafo_table in ['trafo', 'trafo3w']:
if trafo_table not in net.keys():
continue
transformers = net[trafo_table].query('in_service')
for tid, tr in transformers.iterrows():
t_buses = [tr[b_col] for b_col in ('lv_bus', 'mv_bus', 'hv_bus') if
b_col in tr.index]
if any([b not in d.index.values or b not in net.res_bus.index.values for b in
t_buses]):
# logger.info('cannot add trafo %d to plot' % tid)
continue
for bi, bj in combinations(t_buses, 2):
tr_coords = ([x0 + d.loc[bi], x0 + d.loc[bj]],
[net.res_bus.at[bi, 'vm_pu'], net.res_bus.at[bj, 'vm_pu']])
ax.plot(*tr_coords, color=trafocolor,
**{k: v for k, v in kwargs.items() if not k == "color"})
if xlabel:
ax.set_xlabel(xlabel, fontweight="bold", color=(.4, .4, .4))
if ylabel:
ax.set_ylabel(ylabel, fontweight="bold", color=(.4, .4, .4))
return ax
def plot_loading(net, element="line", boxcolor="b", mediancolor="r", whiskercolor="k", ax=None, index_subset=None):
if ax is None:
plt.figure(facecolor="white", dpi=80)
ax = plt.gca()
if index_subset is None:
index_subset = net[element].index
loadings = net["res_%s" % element].loading_percent.values[net["res_%s" % element].index.isin(index_subset)]
boxplot = ax.boxplot(loadings[~
|
np.isnan(loadings)
|
numpy.isnan
|
import os
import sys
import time
import glob
import cv2
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
# from skimage.external import tifffile as sktiff
import skimage.color
import skimage.io
from contextlib import suppress
import tensorflow as tf
def min_max_norm(images, norm_axis = 'channel_wise'):
"""
Min max normalization of images
Parameters:
images: Input stacked image list
norm_axis: axis where the normalize should be computed,
'channel_wise': min max norm along the channel
'frame_wise': min max norm frame wise
Return:
Image list after min max normalization
"""
assert norm_axis=='channel_wise' or norm_axis=='frame_wise',\
"Please input 'channel_wise' or 'frame_wise'"
if norm_axis == 'channel_wise':
m = np.max(images) #max val along the channel
mi = np.min(images) #min val along the channel
output = (images - mi)/ (m - mi)
elif norm_axis == 'frame_wise':
#tile the tensor with respect to input image
# so that the substaction with max and min val can be broadcasted
tile_coef = tf.constant([1,100,100,1], tf.int32)
#tile max
#reduce max val along the axis 1 & 2
#(images.shape[0], 1, 1,1) images.shape[0]=>max val per frames
max_tensor = tf.reshape(tf.math.reduce_max(tf.math.reduce_max(images, 1),1), (-1,1,1,1))
tile_max = tf.tile(max_tensor, tile_coef)
#tile min
#reduce min val along the axis 1 & 2
#(images.shape[0], 1, 1,1) images.shape[0]=>min val per frames
min_tensor = tf.reshape(tf.math.reduce_min(tf.math.reduce_min(images, 1),1), (-1,1,1,1))
tile_min = tf.tile(min_tensor, tile_coef)
#compute min max frame wise
output = (images-tile_min)/(tile_max-tile_min)
return output
def resize(img_list, NEW_SIZE, interpolation=cv2.INTER_LINEAR):
"""
Resize image
Parameter:
image list, new size for image
Return:
resize image list
"""
new_img_list = []
for img in img_list:
new_img = cv2.resize(img, (NEW_SIZE, NEW_SIZE), interpolation=interpolation)
new_img_list.append(new_img)
return np.asarray(new_img_list)
def tiff(dir_path):
'''
Read .tif extension
:param dir_path: directory path where data is stored
:return:
shape of the particular tif file, arrays of the tif file
'''
im = sktiff.imread(dir_path)
return im.shape, im
def append_tiff(path, verbose = True, timer = False):
'''
Append tiff image from path
:param path: data directory
:param verbose: output directory info
:param timer: time measurement
:return:
list of tiff images, list of directories of tiff images
'''
start = time.time()
dir_list = []
image_stack = []
for main_dir in sorted(os.listdir(path)):
if verbose:
print('Directory of mice index:', main_dir)
print('Directory of .tif files stored:')
merge_dir = os.path.join(path + main_dir)
for file in sorted(os.listdir(merge_dir)):
tif = glob.glob('{}/*.tif'.format(os.path.join(merge_dir + '/' + file)))
shape, im = tiff(tif)
dir_list.append(main_dir + '/' + file)
image_stack.append(im)
if verbose:
print('{}, {}'.format(tif, shape))
images = np.asarray(image_stack)
end = time.time()
if timer == True:
print('Total time elapsed: ', end - start)
return images, dir_list
def mat_2_npy(input_path, save_path):
'''
convert arrays in .mat to numpy array .npy
input_path: path where data files of LIN is store, no need on specific path of .mat!
input path must be located at Desktop!
save_path: where .npy is save
'''
for main_dir in sorted(os.listdir(input_path)):
print('Directory of mice index:',main_dir)
merge_dir = os.path.join(input_path + main_dir)
print('Directory of .mat files stored:')
print()
for file in sorted(os.listdir(merge_dir )):
mat_list = glob.glob('{}/*.mat'.format(os.path.join(merge_dir + '/'+ file)))
for mat in mat_list:
print(mat)
#obtain file name .mat for new file name during the conversion
mat_dir_split = mat.split(os.sep)
mat_name = mat_dir_split[-1]
#print(mat_name)
date_dir_split = file.split(os.sep)
date_name = date_dir_split[-1]
#print('{}_{}'.format(date_name, mat_name))
#returns dict
with suppress(Exception): #ignore exception caused by preprocessedFvid.mat
data = scipy.io.loadmat(mat)
for i in data:
if '__' not in i and 'readme' not in i:
print(data[i].shape)
save_file = (save_path + date_name + '/')
if not os.path.exists(save_file):
os.makedirs(save_file)
#save matlab arrays into .npy file
np.save(save_file + "{}_{}_{}.npy".format(date_name, mat_name, i), data[i])
print()
def vid_2_frames(vid_path, output_path, extension='.jpg', verbose = False):
'''
Converting video to image sequences with specified extension
Params:
vid_path: Path where video is stored
output_path: Path where the converted image should be stored
extension: Desired image extension, by DEFAULT .jpg
verbose: Print progress of image creating
Example:
vid_path = '7-12-17-preprocessed.avi'
output_path = retrieve_filename(vid_path)
vid_2_frames(vid_path, '/' + output_path, extension = '.jpg', verbose = True)
Return:
>> For: 7-12-17-preprocessed.avi
>> Creating..../7-12-17-preprocessed/frame_0000.jpg
>> Creating..../7-12-17-preprocessed/frame_0001.jpg
...
'''
# Read the video from specified path
cam = cv2.VideoCapture(vid_path)
try:
# creating a folder named output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
# if not created then raise error
except OSError:
print('Error: Creating directory of output path')
# frame
currentframe = 0
print('For: ', vid_path)
print()
while (True):
# reading from frame
ret, frame = cam.read()
if ret:
# if video is still left continue creating images
# name = ('./'+ output_path +'/frame_' + str(currentframe) + extension)
name = ('{}/frame_{:04d}{}').format(output_path, currentframe, extension)
if verbose:
print('Creating...' + name)
# writing the extracted images
cv2.imwrite(name, frame)
# increasing counter so that it will
# show how many frames are created
currentframe += 1
else:
break
# Release all space and windows once done
cam.release()
cv2.destroyAllWindows()
def retrieve_filename(file_path):
'''
Retrieve file name from path and remove file extension
Example:
file_path = 'home/user/Desktop/test.txt'
retrieve_filename(file_path)
Return:
>> test
'''
base_name = os.path.basename(file_path)
# extract base name without extension
base_name = os.path.splitext(base_name)[0]
# print(base_name)
return base_name
def vid2frames_from_files(input_path, save_path):
'''
Extension of vid_2_frames, which extract .avi from files
:param input_path: Directory where all the .avi files is stored
:param save_path: Specify safe path
'''
for main_dir in sorted(os.listdir(input_path)):
print('Directory of mice index:', main_dir)
merge_dir = os.path.join(input_path + main_dir)
print('Directory of .avi files stored:')
print()
for file in sorted(os.listdir(merge_dir)):
avi_list = glob.glob('{}/*.avi'.format(os.path.join(merge_dir + '/' + file)))
for avi in avi_list:
#print(avi)
# obtain file name .mat for new file name during the conversion
avi_dir_split = avi.split(os.sep)
avi_name = avi_dir_split[-1]
# print(avi_name)
date_dir_split = file.split(os.sep)
date_name = date_dir_split[-1]
# print('{}_{}'.format(date_name, avi_name))
vid_name = retrieve_filename(avi)
save_dir = (save_path + '{}_{}_{}'.format(main_dir,date_name, vid_name))
vid_2_frames(avi, save_dir, extension='.jpg')
print()
def img_to_array(inp_img, RGB=True):
'''
Convert single image from RGB or from Grayscale to array
Params:
inp_img: Desire image to convert to array
RGB: Convert RGB image to grayscale if FALSE
'''
if RGB:
return skimage.io.imread(inp_img)
else:
img = skimage.io.imread(inp_img)
grayscale = skimage.color.rgb2gray(img)
return grayscale
def imgs_to_arrays(inp_imgs, extension='.jpg', RGB=True, save_as_npy=False, img_resize = None, save_path=None):
'''
Convert image stacks from RGB or from Grayscale to array
Params:
inp_imgs: Desire image stacks to convert to array
extension: input images extension, by DEFAULT '.jpg'
RGB: Convert RGB image to grayscale if FALSE
save_as_npy: Save as .npy extension
save_path: Specify save path
'''
if img_resize != None:
IMG_SIZE = img_resize
imgs_list = []
for imgs in sorted(glob.glob('{}/*{}'.format(inp_imgs, extension))):
img_array = img_to_array(imgs, RGB)
img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
imgs_list.append(img_array)
imgs_list = np.asarray(imgs_list)
if save_as_npy:
assert save_path != None, "Save path not specified!"
# by default
if not os.path.exists(save_path):
os.makedirs(save_path)
save_name = retrieve_filename(inp_imgs)
np.save(save_path + '{}.npy'.format(save_name), imgs_list)
return imgs_list
def masked_img(mean_imgs, mean_roi):
'''
Plot masked image of an input mean image
'''
# operations require dtype = uint8 for bitwise comparison
scr1 = (mean_imgs * 255).astype(np.uint8) # scr image needs to be int(0,250)
scr2 = mean_roi # mask image needs to be float (0,1)
masked_output = scr1 * scr2
return masked_output.astype(np.uint8)
def dice_coef_py(y_true, y_pred):
'''
Dice coefficient for numpy
'''
eps = 1e-07
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + eps) /(np.sum(y_true_f) + np.sum(y_pred_f) + eps)
def retrieve_centroid(inp_img, centroid_rad=3):
'''
Estimate centroid from contour and plot centroids on mask image
Parameters:
inp_img: binarized input image
centroid_rad: specify centroid radius during plot by DEFAULT 3
Return:
centres list and img with centroids
'''
assert inp_img.max() == 1.0, "Image not binarized!"
# image needs to be binarized and of type int!
cast_img = (inp_img).astype(np.uint8)
print('Shape:{}, Min:{}, Max:{}, Type:{}'.format(cast_img.shape, cast_img.min(),
cast_img.max(), cast_img.dtype))
contours, a = cv2.findContours(cast_img.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_L1)
print('Number of detected ROIs:', len(contours))
centres = []
for i in range(len(contours)):
moments = cv2.moments(contours[i])
centres.append((int(moments['m10'] / moments['m00']), int(moments['m01'] / moments['m00'])))
# cv2.circle(img, (x,y), radius, (b, g, r), -1)
img_with_centroids = cv2.circle(cast_img, centres[-1], centroid_rad, (0, 0, 0), -1)
return centres, img_with_centroids
def mean_image(imgs, img_size):
'''
:param imgs: Image list
:param img_size: specify image size
:return:
Mean image of shape (img_size, img_size)
'''
sums = np.zeros((img_size, img_size))
total_index = 0
for i in range(len(imgs)):
sums += np.squeeze(imgs[i])
total_index += 1
mean_img_ori = sums / total_index
return mean_img_ori
def MSE_image(img1, img2, IMG_SIZE):
'''
:param img1: True image
:param img2: Predicted image
:return:
Mean squared error of two images
'''
img1, img2 = np.squeeze(img1), np.squeeze(img2)
sq_error = (img1 - img2) ** 2
sums = np.sum(sq_error)
return sums / (IMG_SIZE * IMG_SIZE)
def MAE_image(img1, img2, IMG_SIZE):
'''
:param img1: True image
:param img2: Predicted image
:return:
Mean absoluate error of two images
'''
img1, img2 = np.squeeze(img1), np.squeeze(img2)
ab_error = np.abs(img1 - img2)
sums = np.sum(ab_error)
return sums / (IMG_SIZE * IMG_SIZE)
def max_in_pro(img_stacks, n_imgs, n_rows, n_cols, norm=False):
'''
Calculate the maximum intensity projection of image stacks
(not optimized for tensorflow!)
'''
pixel_flat = []
mip = []
std_dev = []
# (i, j ,k) # of images, # of rows, # of cols
for j in range(n_rows):
for k in range(n_cols):
for i in range(n_imgs):
# print(i, j, k)
if img_stacks.ndim == 4:
pixel_flat.append(img_stacks[i, j, k, :])
else:
pixel_flat.append(img_stacks[i, j, k])
# acts as max. window of size n_imgs and strides of n_imgs
for n in range(n_cols * n_rows):
start = n * n_imgs
end = (start) + (n_imgs)
# print(start, end)
max_pixel = np.max(pixel_flat[start:end])
mip.append(max_pixel)
if norm:
# print('Normalizing!')
std_pixel = np.std(pixel_flat[start:end])
std_dev.append(std_pixel)
mip = np.asarray(mip)
if norm:
# print('Normalizing!')
std_dev = np.asarray(std_dev)
# mip /= std_dev
mip = np.multiply(mip, std_dev) # weight by std.dev
mip_re = np.reshape(mip, (n_rows, n_cols))
return np.expand_dims(mip_re, -1)
def batch_dataset(inp_imgs, BATCH_SIZE, IMG_SIZE):
'''
Custom function for creating mini-batch of dataset
:param inp_imgs: Input image list
:param BATCH_SIZE: batch size
:param IMG_SIZE: input image size
:return:
Batched dataset of dimension (n_batch, BATCH_SIZE, IMG_SIZE, IMG_SIZE, channel)
'''
n_batch = int(len(inp_imgs) / BATCH_SIZE)
mod = len(inp_imgs) % BATCH_SIZE
if mod == 0:
batch_imgs = np.reshape(inp_imgs, (n_batch, BATCH_SIZE, IMG_SIZE, IMG_SIZE, 1)).astype('float32')
else:
# divisible part
divisible = inp_imgs[:(len(inp_imgs) - mod)]
divisible_re = np.reshape(divisible, (n_batch, BATCH_SIZE, IMG_SIZE, IMG_SIZE, 1))
# remainder part
remainder = inp_imgs[(len(inp_imgs) - mod):]
# remainder shape must be padded to be the same as divisible shape
# else python will return array of type "object" which tensorflow
# cannot convert it to tensor
pad_dim = int(BATCH_SIZE - mod)
pad_array = np.zeros((pad_dim, IMG_SIZE, IMG_SIZE, 1))
remainder_pad = np.concatenate((remainder, pad_array), axis=0)
# normalize trick for remainder to balance the mean of zeros array padding
# such that in tf.reduce_mean, mean of remainder_pad = remainder_pad/BATCH_SIZE
# which in this case, the true mean becomes remainder_pad/len(remainder)
remainder_pad *= (BATCH_SIZE / len(remainder))
remainder_pad = np.expand_dims(remainder_pad, 0)
# stack divisible and remainder
batch_imgs = np.concatenate((divisible_re, remainder_pad), 0).astype('float32')
return batch_imgs
def stack4plot(one_hot_imgs):
'''
Functions to sum all one hot images along axis=0 for easy plot
'''
return tf.squeeze(tf.reduce_sum(one_hot_imgs, axis = 0))
# def similarity_multi(n_neurons, one_hot_imgs, similarity_score, img_size):
# '''
# @param n_neurons: number of neurons
# @param one_hot_imgs: one hot images generated by deconve model (100,100,1)
# @param similarity_scores: similarity scores after dot product
# @param img_size: image size
#
# This function multiply the similarity scores with the one hot image generate by a particular
# coordinate
#
# return:
# the sum of all the one hot image activations along the last channel
# '''
# stack_imgs = np.zeros((img_size, img_size))
# for idx in range(n_neurons):
# activations = similarity_score[idx] * np.squeeze(one_hot_imgs[idx])
# stack_imgs += activations
#
# return stack_imgs # (batch_size, img_size, img_size)
def similarity_multi(one_hot_imgs, similarity_score, thr=None):
'''
@param one_hot_imgs: one hot images generated by decoord-conv model (100,100,1) #(n_neurons, img_size, img_size, 1)
@param similarity_scores: similarity scores after dot product #(batch_size, n_neurons)
@param thr: threshold for sim scores multipied one hot pixel
This function multiply the similarity scores with the one hot image generate by a particular
coordinate
return:
the sum of all the one hot image activations along the last channel
'''
onehot_multi_sim = tf.einsum('ij,jklm->ijklm', similarity_score, one_hot_imgs) #(batch_size, n_neurons, img_size, img_size, 1)
onehot_multi_sim = tf.squeeze(tf.reduce_sum(onehot_multi_sim, axis=1))
if thr=='mean':
ta = tf.TensorArray(tf.float32, size=0, dynamic_size=True)
for i in tf.range(len(onehot_multi_sim)):
ta = ta.write(i, tf.where(onehot_multi_sim[i]<tf.math.reduce_mean(onehot_multi_sim[i]),0.0,onehot_multi_sim[i]))
onehot_multi_sim = tf.convert_to_tensor(ta.stack())
elif type(thr)==float:
onehot_multi_sim = tf.where(onehot_multi_sim<thr,0.0,onehot_multi_sim)
return onehot_multi_sim# (batch_size, img_size, img_size)
def concat_recursive(a, b, max_count, count):
'''
Recursively concatenate the image stacks with the next image stacks
@param a: Top first image stacks
@param b: Following image stacks
'''
if count < max_count - 1:
if (count == 0):
c = np.concatenate((a[count], b[count + 1]), axis=0)
else:
c = np.concatenate((a, b[count + 1]), axis=0)
a = c
count += 1
return concat_recursive(a, b, max_count, count)
if count == max_count - 1:
return a
def concat_batch(stack_batch_imgs):
if tf.rank(tf.convert_to_tensor(stack_batch_imgs[0]))>=3:
stack_list = []
for i in range(len(stack_batch_imgs)):
slices = stack_batch_imgs[i]
slices = tf.convert_to_tensor(slices, tf.float32)
concat_imgs = concat_recursive(slices, slices, len(slices), 0)
stack_list.append(concat_imgs)
return stack_list
else:
stack_batch_imgs = tf.convert_to_tensor(stack_batch_imgs, tf.float32)
concat_imgs = concat_recursive(stack_batch_imgs, stack_batch_imgs, len(stack_batch_imgs), 0)
return concat_imgs
def similarity_multiplication(similarity_list_npy, one_hot_imgs_list_npy, n_neurons, epoch_pos, img_size, threshold):
stack_batch_imgs = []
stack_batch_imgs_thr = []
for batch_similarity in similarity_list_npy[epoch_pos]:
stack_imgs = np.zeros((img_size,img_size))
for idx in range(n_neurons):
test = batch_similarity[idx]*
|
np.squeeze(one_hot_imgs_list_npy[epoch_pos, idx])
|
numpy.squeeze
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import ctypes
import hashlib
import inspect
import struct
import zlib
import numpy as np
from typing import Any
from typing import Tuple
from typing import TypeVar
from typing import Generic
class constant:
"""Class to declare compile-time constants accessible from Warp kernels
Args:
x: Compile-time constant value, can be any of the built-in math types.
"""
def __init__(self, x):
self.val = x
constant._hash.update(bytes(str(x), 'utf-8'))
def __eq__(self, other):
return self.val == other
# shared hash for all constants
_hash = hashlib.sha256()
#----------------------
# built-in types
class vec2(ctypes.Array):
_length_ = 2
_shape_ = (2,)
_type_ = ctypes.c_float
class vec3(ctypes.Array):
_length_ = 3
_shape_ = (3,)
_type_ = ctypes.c_float
class vec4(ctypes.Array):
_length_ = 4
_shape_ = (4,)
_type_ = ctypes.c_float
class quat(ctypes.Array):
_length_ = 4
_shape_ = (4,)
_type_ = ctypes.c_float
class mat22(ctypes.Array):
_length_ = 4
_shape_ = (2,2)
_type_ = ctypes.c_float
class mat33(ctypes.Array):
_length_ = 9
_shape_ = (3,3)
_type_ = ctypes.c_float
class mat44(ctypes.Array):
_length_ = 16
_shape_ = (4,4)
_type_ = ctypes.c_float
class spatial_vector(ctypes.Array):
_length_ = 6
_shape_ = (6,)
_type_ = ctypes.c_float
class spatial_matrix(ctypes.Array):
_length_ = 36
_shape_ = (6,6)
_type_ = ctypes.c_float
class transform(ctypes.Array):
_length_ = 7
_shape_ = (7,)
_type_ = ctypes.c_float
def __init__(self, p=(0.0, 0.0, 0.0), q=(0.0, 0.0, 0.0, 1.0)):
self[0:3] = vec3(*p)
self[3:7] = quat(*q)
@property
def p(self):
return self[0:3]
@property
def q(self):
return self[3:7]
class void:
def __init__(self):
pass
class float32:
_length_ = 1
_type_ = ctypes.c_float
def __init__(self, x=0.0):
self.value = x
class float64:
_length_ = 1
_type_ = ctypes.c_double
def __init__(self, x=0.0):
self.value = x
class int8:
_length_ = 1
_type_ = ctypes.c_int8
def __init__(self, x=0):
self.value = x
class uint8:
_length_ = 1
_type_ = ctypes.c_uint8
def __init__(self, x=0):
self.value = x
class int16:
_length_ = 1
_type_ = ctypes.c_int16
def __init__(self, x=0):
self.value = x
class uint16:
_length_ = 1
_type_ = ctypes.c_uint16
def __init__(self, x=0):
self.value = x
class int32:
_length_ = 1
_type_ = ctypes.c_int32
def __init__(self, x=0):
self.value = x
class uint32:
_length_ = 1
_type_ = ctypes.c_uint32
def __init__(self, x=0):
self.value = x
class int64:
_length_ = 1
_type_ = ctypes.c_int64
def __init__(self, x=0):
self.value = x
class uint64:
_length_ = 1
_type_ = ctypes.c_uint64
def __init__(self, x=0):
self.value = x
scalar_types = [int8, uint8, int16, uint16, int32, uint32, int64, uint64, float32, float64]
vector_types = [vec2, vec3, vec4, mat22, mat33, mat44, quat, transform, spatial_vector, spatial_matrix]
np_dtype_to_warp_type = {
|
np.dtype(np.int8)
|
numpy.dtype
|
#!/usr/bin/env python
# coding: utf-8
# # NaiveBayes_LogisticRegression_kNearestNeighbors
# ## Beta-Binomial Naive Bayes Classifier Full Implementation
# ## Gaussian Naive Bayes Classifier Full Implementation
# ## Logistic Regression(BP + Newton's Method Update) Full Implementation
# ## K-Nearest Neighbours Classifier Full Implementation
###########################################################################
# Author: <NAME>
# Date: 02/2019
# License: MIT License
# Copyright (c) 2020 <NAME>
###########################################################################
# Python imports
import numpy as np # Matrix and vector computation package
import matplotlib.pyplot as plt # Plotting library
import time
# Set the seed of the numpy random number generator
np.random.seed(seed=1)
from sklearn import metrics # data and evaluation utils
from matplotlib.colors import colorConverter, ListedColormap # some plotting functions
import scipy.io
EMAIL_DATASET_DICT = scipy.io.loadmat('spamData.mat')
X_TRAIN = np.array(EMAIL_DATASET_DICT["Xtrain"])
T_TRAIN = np.array(EMAIL_DATASET_DICT["ytrain"])
T_TRAIN = T_TRAIN.reshape(T_TRAIN.shape[0])
X_TEST = np.array(EMAIL_DATASET_DICT["Xtest"])
T_TEST = np.array(EMAIL_DATASET_DICT["ytest"])
T_TEST = T_TEST.reshape(T_TEST.shape[0])
EMAIL_DATASET = np.column_stack((np.vstack((X_TRAIN, X_TEST)),np.hstack((T_TRAIN, T_TEST))))
print(">>>>>> EMAIL DATASET FEATURE SPACE <<<<<<")
print(">>> FULL <<<")
print(np.array(EMAIL_DATASET).shape)
print(">>> TRAIN <<<")
print(X_TRAIN.shape)
print(T_TRAIN.shape)
print(">>> TEST <<<")
print(X_TEST.shape)
print(T_TEST.shape)
def log_transform(x):
"""Transform x to natural_log(x)."""
return np.log(x+0.1)
def binary_transform(x):
"""Transform x to binary based on sign."""
return np.where(x>0, 1, 0)
X_TRAIN_LOG = log_transform(X_TRAIN)
X_TEST_LOG = log_transform(X_TEST)
X_TRAIN_BNY = binary_transform(X_TRAIN)
X_TEST_BNY = binary_transform(X_TEST)
# SHARED HELPER FUNCTIONS FOR METRICS EXTRACTION & CONFUSION TABLE PLOT
def get_accuracy(targets, predictions):
"""Helper Function for calculating the (%) accuracy of 'predictions' compared to 'targets'."""
return (np.abs(targets - predictions) < 1e-10 ).mean() * 100.0
def get_errors(outputs, targets):
"""Helper Function for calculating the error rates of 'outputs' compared to 'targets'.
error - overall error rate.
false_negatives_error - the % of samples wrongly outputed as '0' instead of '1'.
false_positives_error - the % of samples wrongly outputed as '1' instead of '0'."""
error_per_sample = []
error = []
false_negatives_error = []
false_positives_error = []
if(np.array(outputs).ndim == 2):
for idx, outputs_per_par in enumerate(outputs):
error_per_sample.append(list(np.array(outputs_per_par) - np.array(targets)))
error.append(sum(abs(i) for i in error_per_sample[idx])/len(error_per_sample[idx])*100)
false_negatives_error.append(abs(sum(i for i in error_per_sample[idx] if i < 0)))
false_positives_error.append(sum(i for i in error_per_sample[idx] if i > 0))
elif(np.array(outputs).ndim == 1):
error_per_sample = (list(np.array(outputs) - np.array(targets)))
error = (sum(abs(i) for i in error_per_sample)/len(error_per_sample)*100)
false_negatives_error = (abs(sum(i for i in error_per_sample if i < 0)))
false_positives_error = (sum(i for i in error_per_sample if i > 0))
return error, false_negatives_error, false_positives_error
def get_cost(outputs, targets):
"""Return the cost/error rate at the output."""
cost_per_sample = []
cost = []
if(np.array(outputs).ndim == 2):
for idx, outputs_per_epoch in enumerate(outputs):
cost_per_sample.append(list(np.array(outputs_per_epoch) - np.array(targets)))
cost.append(sum(abs(i) for i in cost_per_sample[idx])/len(cost_per_sample[idx])*100)
elif(np.array(outputs).ndim == 1):
cost_per_sample = (list(np.array(outputs) - np.array(targets)))
cost = (sum(abs(i) for i in cost_per_sample)/len(cost_per_sample)*100)
return cost
def plot_confusion_table(y_true, y_pred, title):
"""Helper Function for displaying a confusion table of targets vs predictions."""
# Show confusion table
conf_matrix = metrics.confusion_matrix(y_true, y_pred, labels=None) # Get confustion matrix
# Plot the confusion table
class_names = ['${:d}$'.format(x) for x in range(0, 2)] # Binary class names
fig = plt.figure()
ax = fig.add_subplot(111)
# Show class labels on each axis
ax.xaxis.tick_top()
major_ticks = range(0,2)
minor_ticks = [x + 0.5 for x in range(0, 2)]
ax.xaxis.set_ticks(major_ticks, minor=False)
ax.yaxis.set_ticks(major_ticks, minor=False)
ax.xaxis.set_ticks(minor_ticks, minor=True)
ax.yaxis.set_ticks(minor_ticks, minor=True)
ax.xaxis.set_ticklabels(class_names, minor=False, fontsize=15)
ax.yaxis.set_ticklabels(class_names, minor=False, fontsize=15)
# Set plot labels
ax.yaxis.set_label_position("right")
ax.set_xlabel('Predicted Label', fontsize=15)
ax.set_ylabel('True Label', fontsize=15)
fig.suptitle(title, y=1.03, fontsize=15)
# Show a grid to seperate digits
ax.grid(b=True, which=u'minor')
# Color each grid cell according to the number classes predicted
ax.imshow(conf_matrix, interpolation='nearest', cmap='binary')
# Show the number of samples in each cell
for x in range(conf_matrix.shape[0]):
for y in range(conf_matrix.shape[1]):
color = 'w' if x == y else 'k'
ax.text(x, y, conf_matrix[y,x], ha="center", va="center", color=color)
plt.show()
# # Q1 Beta-Binomial Naive Bayes Classifier
class BetaBinomialNaiveBayes(object):
"""Beta-binomial naive Bayes classifier implementation."""
def __init__(self, n_features):
"""Initialize the log probability parameters.
n_features - the number of features per sample."""
self.C_Prior = np.zeros(2)
self.C_Cond = np.zeros((2,2,n_features))
def train(self, X_train, T_train, beta_par_1, beta_par_2, beta_prior = False, beta_cond = False):
"""Train the classifier by calculating the log class-prior & class-conditional probabilities.
X_train - training input samples.
T_train - training input sample labels.
beta_par_(1/2) - Beta Prior Distribution hyperparameters.
beta_prior - is a prior assumed for the class-prior probability.
beta_cond - is a prior assumed for the class-conditional probability."""
############################################
# Calculating Class Prior Probabilties
############################################
class_1_prior = (T_train.sum() + beta_prior*beta_par_1)/(T_train.shape[0] + beta_prior*(beta_par_1+beta_par_2))
self.C_Prior[1] = np.log(class_1_prior)
self.C_Prior[0] = np.log(1-class_1_prior)
#############################################
# Calculating Class_Conditional Probabilities
#############################################
#Setting all samples from class 0 to (0)
tmp = X_train*T_train[:, np.newaxis]
#Removing all samples from class 0
tmp = tmp[~np.all(tmp == 0, axis=1)]
#Calculating the maximum likelihood of the features to be 1 for class 1
class_1_1_cond = (np.sum(tmp, axis=0) + beta_cond*beta_par_1)/(tmp.shape[0] + beta_cond*(beta_par_1+beta_par_2))
#Calculating the log class=1 conditional probabilities
self.C_Cond[1][1] = np.log(class_1_1_cond)
self.C_Cond[1][0] = np.log(1-class_1_1_cond)
#Setting all samples from class 1 to (0)
tmp = X_train*np.logical_not(T_train).astype(int)[:, np.newaxis]
#Removing all samples from calss 1
tmp = tmp[~np.all(tmp == 0, axis=1)]
#Calculating the maximum likelihood of the features to be 1 for class 0
class_0_1_cond = (np.sum(tmp, axis=0) + beta_cond*beta_par_1)/(tmp.shape[0] + beta_cond*(beta_par_1+beta_par_2))
#Calculating the log class=0 conditional probabilities
self.C_Cond[0][1] = np.log(class_0_1_cond)
self.C_Cond[0][0] = np.log(1-class_0_1_cond)
def predict(self, X_pred):
"""The classifier makes a prediction about the input-samples' labels, based on its model.
X_pred - input samples."""
log_prob = []
#Calculating the probabilities for each sample to be of class 0
log_prob.append(self.C_Prior[0] + X_pred.dot(self.C_Cond[0][1]) + np.logical_not(X_pred).astype(int).dot(self.C_Cond[0][0]))
#Calculating the probabilities for each sample to be of class 1
log_prob.append(self.C_Prior[1] + X_pred.dot(self.C_Cond[1][1]) + np.logical_not(X_pred).astype(int).dot(self.C_Cond[1][0]))
return np.argmax(log_prob, axis=0)
def fit_beta_classifier(classifier, X_train, T_train, X_test, T_test):
"""Function which performs classifier training, prediction and data gathering.
classifier - a BetaBinomialNaiveBayes classifier object.
INPUTS:
X_train - train dataset samples.
T_train - train dataset targets.
X_test - test dataset samples.
T_train - test dataset targets.
OUTPUTS:
for train, test:
outputs - 2-D prediction matrix with all samples's predicted labels for all beta distributions.
error - 1-D error rates matrix for each beta distribution.
false_negative/positive - 1-D matrices for each beta distribution.
accuracy - 1-D accuracy matrix for each beta distribution.
t_time - time for performing the fit."""
outputs_train = []
outputs_test = []
accuracy_train = []
accuracy_test = []
start_time = time.time()
# Fit for all beta distributions
for beta_1, beta_2 in zip(beta_par_1, beta_par_2):
print("Classifier with Beta({},{})".format(beta_1, beta_2))
#Train
classifier.train(X_train, T_train, beta_1, beta_2, beta_prior, beta_cond)
#Predict Train
outputs_train.append(classifier.predict(X_train).reshape(X_train.shape[0]))
accuracy_train.append(get_accuracy(T_train, outputs_train[-1]))
#Predict Test
outputs_test.append(classifier.predict(X_test).reshape(X_test.shape[0]))
accuracy_test.append(get_accuracy(T_test, outputs_test[-1]))
end_time = time.time()
t_time = end_time - start_time
error_train, false_negatives_error_train, false_positives_error_train = get_errors(outputs_train, T_train)
error_test, false_negatives_error_test, false_positives_error_test = get_errors(outputs_test, T_test)
return classifier, outputs_train, outputs_test, error_train, error_test, false_negatives_error_train, false_negatives_error_test, false_positives_error_train, false_positives_error_test, accuracy_train, accuracy_test, t_time
def show_results_beta(outputs_train, outputs_test, T_train, T_test, beta_par, error_train, error_test, false_negatives_error_train, false_negatives_error_test, false_positives_error_train, false_positives_error_test, accuracy_train, accuracy_test, t_time):
"""Helper Function for displaying all data gathered from the fit."""
#PLOT TRAIN FALSE CLASSIFICATION
width = 0.35 # the width of the bars
p1 = plt.bar(beta_par, np.array(false_negatives_error_train)/T_train.shape[0]*100, width, color=(0.2588,0.4433,1.0))
p2 = plt.bar(beta_par, np.array(false_positives_error_train)/T_train.shape[0]*100, width, color=(1.0,0.5,0.62),
bottom=np.array(false_negatives_error_train)/T_train.shape[0]*100)
plt.ylabel('Rate (%)', fontsize=15)
plt.xlabel('Beta(a,a) Hyperparameter', fontsize=15)
plt.title('False Train Negatives/Postives Per Beta Distribution')
plt.yticks(np.arange(0, 51, 5))
plt.legend((p1[0], p2[0]), ('False Train Negatives', 'False Train Positives'))
plt.show()
#PLOT TEST FALSE CLASSIFICATION
width = 0.35 # the width of the bars
p1 = plt.bar(beta_par, np.array(false_negatives_error_test)/T_test.shape[0]*100, width, color=(0.2588,0.4433,1.0))
p2 = plt.bar(beta_par, np.array(false_positives_error_test)/T_test.shape[0]*100, width, color=(1.0,0.5,0.62),
bottom=np.array(false_negatives_error_test)/T_test.shape[0]*100)
plt.ylabel('Rate (%)', fontsize=15)
plt.xlabel('Beta(a,a) Hyperparameter', fontsize=15)
plt.title('False Test Negatives/Postives Per Beta Distribution')
plt.yticks(np.arange(0, 51, 5))
plt.legend((p1[0], p2[0]), ('False Test Negatives', 'False Test Positives'))
plt.show()
#PLOT OVERALL ERROR RATE
plt.figure()
plt.plot(beta_par, error_train, 'b-', linewidth=2, label='Train Error Rate')
plt.plot(beta_par, error_test, 'r-', linewidth=2, label='Test Error Rate')
# Add labels to the plot
plt.xlabel('Beta(a,a) Hyperparameter', fontsize=15)
plt.ylabel('Error Rate (%)', fontsize=15)
plt.title('Train/Test Error Rates Per Beta Distribution')
plt.legend()
x1,x2,y1,y2 = plt.axis()
plt.grid()
plt.show()
#PLOT ACCURACY
plt.figure()
plt.plot(beta_par, accuracy_train, 'b-', linewidth=2, label='Train Accuracy')
plt.plot(beta_par, accuracy_test, 'r-', linewidth=3, label='Test Accuracy')
# Add labels to the plot
plt.xlabel('Beta(a,a) Hyperparameter', fontsize=15)
plt.ylabel('Accuracy (%)', fontsize=15)
plt.title('Accuracy Per Beta Distribution')
plt.legend()
x1,x2,y1,y2 = plt.axis()
plt.grid()
plt.show()
#DISPLAY DATA
for idx, beta in enumerate(beta_par):
if beta in (1,10,100):
print("BAYES CLASSIFIER(Beta({},{}))".format(beta, beta))
print("TRAIN ACCURACY: {}% | TEST ACCURACY: {}%".format(accuracy_train[idx], accuracy_test[idx]))
print("TRAIN ERROR RATE: {:.2f}% | TEST ERROR RATE: {:.2f}%".format(error_train[idx], error_test[idx]))
print("TRAIN FALSE NEGATIVES ERROR RATE: {:.2f}% | TEST FALSE NEGATIVES ERROR RATE: {:.2f}%".format(false_negatives_error_train[idx]*100/T_train.shape[0], false_negatives_error_test[idx]*100/T_test.shape[0]))
print("TRAIN FALSE POSITIVES ERROR RATE: {:.2f}% | TEST FALSE POSITIVES ERROR RATE: {:.2f}%".format(false_positives_error_train[idx]*100/T_train.shape[0], false_positives_error_test[idx]*100/T_test.shape[0]))
print("---------------------------------------------------------------------------------------")
#Print time for experiment
m, s = divmod(t_time, 60)
h, m = divmod(m, 60)
print("OVERALL TIME FOR CLASSIFIER FIT: {}h:{}m:{:.5f}s".format(h,m,s))
#Display data about the optimal beta
best_beta_idx = np.argmax(accuracy_test)
print("==============================================================================================")
print("BEST PERFORMANCE ACHIEVED FOR: Beta({},{})".format(beta_par_1[best_beta_idx], beta_par_2[best_beta_idx]))
print("TRAIN ACCURACY: {}% | TEST ACCURACY: {}%".format(accuracy_train[best_beta_idx], accuracy_test[best_beta_idx]))
print("TRAIN ERROR RATE: {:.2f}% | TEST ERROR RATE: {:.2f}%".format(error_train[best_beta_idx], error_test[best_beta_idx]))
print("TRAIN FALSE NEGATIVES ERROR RATE: {:.2f}% | TEST FALSE NEGATIVES ERROR RATE: {:.2f}%".format(false_negatives_error_train[best_beta_idx]*100/T_train.shape[0], false_negatives_error_test[best_beta_idx]*100/T_test.shape[0]))
print("TRAIN FALSE POSITIVES ERROR RATE: {:.2f}% | TEST FALSE POSITIVES ERROR RATE: {:.2f}%".format(false_positives_error_train[best_beta_idx]*100/T_train.shape[0], false_positives_error_test[best_beta_idx]*100/T_test.shape[0]))
print("==============================================================================================")
plot_confusion_table(T_train, outputs_train[best_beta_idx], "Optimal Beta({},{}) Train Confusion Table".format(beta_par_1[best_beta_idx], beta_par_2[best_beta_idx]))
plot_confusion_table(T_test, outputs_test[best_beta_idx], "Optimal Beta({},{}) Test Confusion Table".format(beta_par_1[best_beta_idx], beta_par_2[best_beta_idx]))
# Control Hyperparameters
beta_prior = False
beta_cond = True
beta_par_1 = np.linspace(0,100,num=201)
beta_par_2 = np.linspace(0,100,num=201)
# Define the classifier to be trained
BBNB = BetaBinomialNaiveBayes(X_TRAIN_BNY.shape[1])
# Display Architecture
print('>>>>>> BETA-BINOMIAL NAIVE BAYES ARCHITECTURE <<<<<<')
print('LOG CLASS PRIORS: {}'.format(BBNB.C_Prior.shape))
print("LOG CLASS CONDITIONAL PROBABILITIES: {}".format(BBNB.C_Cond.shape))
# RUN
BBNB, OUTPUTS_TRAIN, OUTPUTS_TEST, ERROR_TRAIN, ERROR_TEST, FALSE_NEGATIVES_ERROR_TRAIN, FALSE_NEGATIVES_ERROR_TEST, FALSE_POSITIVES_ERROR_TRAIN, FALSE_POSITIVES_ERROR_TEST, ACCURACY_TRAIN, ACCURACY_TEST, T_TIME = fit_beta_classifier(BBNB, X_TRAIN_BNY, T_TRAIN, X_TEST_BNY, T_TEST)
show_results_beta(OUTPUTS_TRAIN, OUTPUTS_TEST, T_TRAIN, T_TEST, beta_par_1, ERROR_TRAIN, ERROR_TEST, FALSE_NEGATIVES_ERROR_TRAIN, FALSE_NEGATIVES_ERROR_TEST, FALSE_POSITIVES_ERROR_TRAIN, FALSE_POSITIVES_ERROR_TEST, ACCURACY_TRAIN, ACCURACY_TEST, T_TIME)
# # Q2 Gaussian Naive Bayes Classifier
class GaussianNaiveBayes(object):
"""Gaussian Naive Bayes classifier implementation."""
def __init__(self, n_features):
"""Initialize the log prior & class-cond mean/variance parameters.
n_features is the number of features per sample."""
self.C_Prior = np.zeros(2)
self.C_Mean = np.zeros((2, n_features))
self.C_Var = np.zeros((2, n_features))
def train(self, X_train, T_train):
"""Train the classifier by calculating the log class-prior & class mean/variance using Maximum Likelihood.
X_train - training input samples.
T_train - training input sample labels."""
############################################
# Calculating Class Prior Probabilties
############################################
class_1_prior = T_train.sum()/(T_train.shape[0])
self.C_Prior[1] = np.log(class_1_prior)
self.C_Prior[0] = np.log(1-class_1_prior)
############################################
# Calculating Class-Conditional Mean & Variance
############################################
#Setting all samples from class 0 to (0)
tmp = X_train*T_train[:, np.newaxis]
#Removing all samples from class 0
tmp = tmp[~np.all(tmp == 0, axis=1)]
#Maximum Likelihood estimation of the mean and variance of each feature based on all training samples from class 1
self.C_Mean[1] = np.mean(tmp,axis=0)
self.C_Var[1] = np.var(tmp,axis=0)
#Setting all samples from class 1 to (0)
tmp = X_train*np.logical_not(T_train).astype(int)[:, np.newaxis]
#Removing all samples from calss 1
tmp = tmp[~np.all(tmp == 0, axis=1)]
#Maximum Likelihood estimation of the mean and variance of each feature based on all training samples from class 0
self.C_Mean[0] = np.mean(tmp,axis=0)
self.C_Var[0] = np.var(tmp,axis=0)
def calculate_log_probability(self, X_pred):
"""Plug-in estimator of the natural log of the class-conditional probabilities.
X_pred - input samples."""
#Calculating the class-conditional probability of the features for class 1
exponent = np.exp(-((X_pred - self.C_Mean[1])**2 / (2 * self.C_Var[1])))
#Calculating the log class=1 conditional probabilities
class_1_log_cond = np.log((1 / (np.sqrt(2 * np.pi * self.C_Var[1]))) * exponent)
#Calculating the class-conditional probability of the features for class 0
exponent = np.exp(-((X_pred - self.C_Mean[0])**2 / (2 * self.C_Var[0])))
#Calculating the log class=0 conditional probabilities
class_0_log_cond = np.log((1 / (np.sqrt(2 * np.pi * self.C_Var[0]))) * exponent)
return class_0_log_cond, class_1_log_cond
def predict(self, X_pred):
"""The classifier makes a prediction about the inputs samples'labels, based on the trained priors
and plugged-in input samples.
X_pred - input samples."""
log_prob = []
class_0_log_cond, class_1_log_cond = self.calculate_log_probability(X_pred)
#Calculating the probabilities for each sample to be of class 0
log_prob.append(self.C_Prior[0] + np.sum(class_0_log_cond,axis=1))
#Calculating the probabilities for each sample to be of class 1
log_prob.append(self.C_Prior[1] + np.sum(class_1_log_cond,axis=1))
return np.argmax(log_prob, axis=0)
def fit_gaussian_classifier(classifier, X_train, T_train, X_test, T_test):
"""Function which performs classifier training, prediction and data gathering.
classifier - a GaussianNaiveBayes classifier object.
INPUTS:
X_train - train dataset samples.
T_train - train dataset targets.
X_test - test dataset samples.
T_train - test dataset targets.
OUTPUTS:
for train, test:
outputs - 1-D prediction matrix with all samples's predicted labels.
error - scalar error rate for the fit.
false_negative/positive - scalar error rates for the fit.
accuracy - scalar rate for the fit.
t_time - time for performing the fit."""
start_time = time.time()
print("Starting Gaussian Naive Bayes Classifier Fit")
#Train
classifier.train(X_train, T_train)
#Predict train dataset
outputs_train = classifier.predict(X_train).reshape(X_train.shape[0])
accuracy_train = get_accuracy(T_train, outputs_train)
#Predict test dataset
outputs_test = classifier.predict(X_test).reshape(X_test.shape[0])
accuracy_test = get_accuracy(T_test, outputs_test)
end_time = time.time()
t_time = end_time - start_time
error_train, false_negatives_error_train, false_positives_error_train = get_errors(outputs_train, T_train)
error_test, false_negatives_error_test, false_positives_error_test = get_errors(outputs_test, T_test)
return classifier, outputs_train, outputs_test, error_train, error_test, false_negatives_error_train, false_negatives_error_test, false_positives_error_train, false_positives_error_test, accuracy_train, accuracy_test, t_time
def show_results_gaussian(outputs_train, outputs_test, T_train, T_test, error_train, error_test, false_negatives_error_train, false_negatives_error_test, false_positives_error_train, false_positives_error_test, accuracy_train, accuracy_test, t_time):
"""Helper Function for displaying all data gathered from the fit."""
print(">>>>> GAUSSIAN NAIVE BAYES CLASSIFIER <<<<<")
print("TRAIN ACCURACY: {}% | TEST ACCURACY: {}%".format(accuracy_train, accuracy_test))
print("TRAIN ERROR RATE: {:.2f}% | TEST ERROR RATE: {:.2f}%".format(error_train, error_test))
print("TRAIN FALSE NEGATIVES ERROR RATE: {:.2f}% | TEST FALSE NEGATIVES ERROR RATE: {:.2f}%".format(false_negatives_error_train*100/T_train.shape[0], false_negatives_error_test*100/T_test.shape[0]))
print("TRAIN FALSE POSITIVES ERROR RATE: {:.2f}% | TEST FALSE POSITIVES ERROR RATE: {:.2f}%".format(false_positives_error_train*100/T_train.shape[0], false_positives_error_test*100/T_test.shape[0]))
#Print time for experiment
m, s = divmod(t_time, 60)
h, m = divmod(m, 60)
print("OVERALL TIME FOR CLASSIFIER FIT: {}h:{}m:{:.5f}s".format(h,m,s))
plot_confusion_table(T_train, outputs_train, "Gaussian Naive Bayes Train Confusion Table")
plot_confusion_table(T_test, outputs_test, "Gaussian Naive Bayes Test Confusion Table")
# Define the classifier to be trained
GNB = GaussianNaiveBayes(X_TRAIN_LOG.shape[1])
# Display Architecture
print('>>>>>> GAUSSIAN NAIVE BAYES ARCHITECTURE <<<<<<')
print('LOG CLASS PRIORS: {}'.format(GNB.C_Prior.shape))
print("CLASS-CONDITIONAL MEANS: {}".format(GNB.C_Mean.shape))
print("CLASS-CONDITIONAL VARIANCES: {}".format(GNB.C_Var.shape))
# RUN
GNB, OUTPUTS_TRAIN, OUTPUTS_TEST, ERROR_TRAIN, ERROR_TEST, FALSE_NEGATIVES_ERROR_TRAIN, FALSE_NEGATIVES_ERROR_TEST, FALSE_POSITIVES_ERROR_TRAIN, FALSE_POSITIVES_ERROR_TEST, ACCURACY_TRAIN, ACCURACY_TEST, T_TIME = fit_gaussian_classifier(GNB, X_TRAIN_LOG, T_TRAIN, X_TEST_LOG, T_TEST)
show_results_gaussian(OUTPUTS_TRAIN, OUTPUTS_TEST, T_TRAIN, T_TEST, ERROR_TRAIN, ERROR_TEST, FALSE_NEGATIVES_ERROR_TRAIN, FALSE_NEGATIVES_ERROR_TEST, FALSE_POSITIVES_ERROR_TRAIN, FALSE_POSITIVES_ERROR_TEST, ACCURACY_TRAIN, ACCURACY_TEST, T_TIME)
# # Q3 Logistic Regression
def logistic(z):
"""Logistic/Sigmoid/Squashing function. Can take scalars and matrices."""
return 1 / (1 + np.exp(-z))
def logistic_deriv(y):
"""Logistic/Sigmoid/Squashing function 1st derivative. Can take scalars and matrices."""
return np.multiply(y, (1 - y))
def step(x):
"""Step function with threhold 0.5. Can take scalars and matrices."""
return 1 * (x >= 0.5)
def step_Der(y):
"""Step function 1st derivative. Can take scalars and matrices."""
return np.full((y.shape), 1)
class LogisticNeuron(object):
"""Logistic Regression Neuron class implementation."""
def __init__(self, n_features):
"""Initialize the parameters.
n_features is the number of features per sample."""
self.weights = np.random.randn(n_features) * 0.1
self.bias = np.zeros(1)
def get_log_odds(self, X_train):
"""Calculating the net activation (In the Logistic Regression case - the log-odds estimation).
Equivalent to a linear forward pass.
X_train - input samples."""
return X_train.dot(self.weights) + self.bias
def get_posterior(self, log_odds):
"""Calculating the activation function output.
(In the Logistic Regression case - the posterior probabilities estimation)."""
return logistic(log_odds)
def get_output(self, posterior):
"""Calcualting the output prediction.
For Logistic Regression - it is a step function with decision boundary at 0.5."""
return step(posterior)
def update_params_back_prop(self, Posterior_train, T_train, X_train, L2_rate, regularization = False):
""""Function to calculate the Back Propagation Gradient Descent costs
& update the parameters of the neuron by given learning rate.
Weight decay (L2 regularization) is also implemented.
Y_train - neuron output predictions.
T_train - targets.
X_train - input samples.
L2_rate - regularization hyperparameter (lambda).
regularization - switch for the weight decay."""
#Single input sample
if(np.isscalar(T_train)):
self.weights = self.weights - learning_rate*(X_train*logistic_deriv(Posterior_train)*(Posterior_train - T_train) - regularization*L2_rate*self.weights)
self.bias = self.bias - learning_rate*logistic_deriv(Posterior_train)*(Posterior_train - T_train)
#Matrix of input samples
else:
cost = (Posterior_train - T_train) / Posterior_train.shape[0]
self.weights = self.weights - learning_rate*(X_train.T.dot(np.multiply(logistic_deriv(Posterior_train), cost)) - regularization*L2_rate*self.weights)
self.bias = self.bias - learning_rate*np.sum(np.multiply(logistic_deriv(Posterior_train), cost))
def update_params_Newton(self, Posterior_train, T_train, X_train, L2_rate, regularization = False):
""""Function to calculate the Newton's Method (2nd derivative estimation) costs
& update the parameters of the neuron.
Weight decay (L2 regularization) is also implemented.
Posterior_train - neuron posterior probabilities predictions.
T_train - targets.
X_train - input samples.
L2_rate - regularization hyperparameter (lambda).
regularization - switch for the weight decay."""
#Adding a bias input to X_train
bias_inputs = np.full((X_train.shape[0]), 1)
X = np.column_stack((bias_inputs, X_train))
#Calcualting the S Matrix
S = np.diag(Posterior_train*(1-Posterior_train))
#Buildinging the I Matrix
I = np.diag(np.concatenate([np.array([0]),np.full((self.weights.shape[0]), 1)]))
#Gradient
G = X.T.dot(Posterior_train-T_train) + regularization*L2_rate*np.concatenate([np.array([0]),self.weights])
#Hessian
H = X.T.dot(S).dot(X) + regularization*L2_rate*I
try:
H_inv = np.linalg.inv(H)
except np.linalg.LinAlgError:
# Not invertible.
H_inv = H
#Calculate the cost
COST = np.dot(H_inv, G)
#Update
self.weights = self.weights - COST[1:]
self.bias = self.bias - COST[0]
def forward_step(LogisticNeuron, X_pred):
"""Compute and return the posterior probabilities and output predictions.
LogisticNeuron - LogisticNeuron object.
X_pred - input samples."""
log_odds = LogisticNeuron.get_log_odds(X_pred)
posterior = LogisticNeuron.get_posterior(log_odds)
output = LogisticNeuron.get_output(posterior)
return posterior, output
def train_logistic(LogisticNeuron, X_train, T_train, X_val, T_val, L2_rate, NewtonUpdate=True):
"""Function to perform the forward propagation & update the model parameters until convergence.
It also gathers data for the training process. It is a good practise the models fit to be assesed
on a validation set. Sequential/Minibatches/Batch learning is implemented.
INPUTS:
X_train - train dataset samples.
T_train - train dataset targets.
X_val - test dataset samples.
T_val - test dataset targets.
L2_rate - regularization parameter.
OUTPUTS:
for train, validation:
error - 1-D matrix of error rates per epoch.
false_negative/positive - 1-D matrix of error rates per epoch.
accuracy - 1-D matrix of rates per epoch.
nb_of_epochs - number of training epochs to convergence.
nb_of_batches - number of minibatches.
minibatch_costs - the error rate per minibatch.
t_time - time for performing the fit."""
# Create batches (X,T) from the training set
nb_of_batches = X_train.shape[0] / batch_size # Number of batches
XT_batches = list(zip(
np.array_split(X_train, nb_of_batches, axis=0), # X samples
np.array_split(T_train, nb_of_batches, axis=0))) # T targets
cost_per_epoch = []
minibatch_costs = []
accuracy_t = []
outputs_train = []
outputs_validation = []
accuracy_v = []
start_time = time.time()
# Train for the maximum number of epochs
for epoch in range(max_nb_of_epochs):
for X, T in XT_batches: # For each minibatch sub-iteration
posterior, outputs = forward_step(LogisticNeuron, X) # Get the predictions
minibatch_costs.append(get_cost(outputs.reshape(outputs.shape[0]), T))
if NewtonUpdate:
LogisticNeuron.update_params_Newton(posterior, T, X, L2_rate, regularization) #Newton update
else:
LogisticNeuron.update_params_back_prop(posterior, T, X, L2_rate, regularization) #Back Prop update
# Get full training cost for future analysis (plots)
outputs = forward_step(LogisticNeuron, X_train)[1]
cost_per_epoch.append(get_cost(outputs.reshape(X_train.shape[0]), T_train.reshape(T_train.shape[0])))
outputs_train.append(outputs.reshape(outputs.shape[0]))
accuracy_t.append(get_accuracy(T_train, outputs_train[-1]))
# Get full validation cost for future analysis (plots)
outputs = forward_step(LogisticNeuron, X_val)[1]
outputs_validation.append(outputs.reshape(outputs.shape[0]))
accuracy_v.append(get_accuracy(T_val, outputs_validation[-1]))
if accuracy_v[-1] == 100:
print("CONVERGENCE ACHIEVED!")
break
if learning_cost_stop and len(cost_per_epoch) >= 3:
# Stop training if the cost doesn't decrease for 2 epochs
if cost_per_epoch[-1] >= cost_per_epoch[-2] >= cost_per_epoch[-2]:
print("CONVERGENCE ACHIEVED!")
break
end_time = time.time()
t_time = end_time - start_time
error_t, false_negatives_error_t, false_positives_error_t = get_errors(outputs_train, T_train)
error_v, false_negatives_error_v, false_positives_error_v = get_errors(outputs_validation, T_val)
nb_of_epochs = epoch + 1
return LogisticNeuron, nb_of_epochs, nb_of_batches, error_t, minibatch_costs, false_negatives_error_t, false_positives_error_t, accuracy_t, error_v, false_negatives_error_v, false_positives_error_v, accuracy_v, t_time
def show_results_logistic(X_train, X_val, nb_of_epochs, nb_of_batches, error_t, minibatch_costs, false_negatives_error_t, false_positives_error_t, accuracy_t, error_v, false_negatives_error_v, false_positives_error_v, accuracy_v, t_time):
"""Helper Function for displaying all data gathered during training."""
#PLOT TRAIN FALSE CLASSIFICATION
ind = np.linspace(1, nb_of_epochs, num=nb_of_epochs) # the x locations for the groups
width = 0.35 # the width of the bars
p1 = plt.bar(ind, np.array(false_negatives_error_t)/X_train.shape[0]*100, width, color=(0.2588,0.4433,1.0))
p2 = plt.bar(ind, np.array(false_positives_error_t)/X_train.shape[0]*100, width, color=(1.0,0.5,0.62),
bottom=np.array(false_negatives_error_t)/X_train.shape[0]*100)
plt.ylabel('Rate %', fontsize=15)
plt.xlabel('Epoch', fontsize=15)
plt.title('False Train Negatives/Postives Per Training Epoch')
plt.xticks(np.arange(1, nb_of_epochs+1))
plt.legend((p1[0], p2[0]), ('False Train Negatives', 'False Train Positives'))
plt.show()
#PLOT VALIDATION FALSE CLASSIFICATION
p1 = plt.bar(ind, np.array(false_negatives_error_v)/X_val.shape[0]*100, width, color=(0.2588,0.4433,1.0))
p2 = plt.bar(ind, np.array(false_positives_error_v)/X_val.shape[0]*100, width, color=(1.0,0.5,0.62),
bottom=np.array(false_negatives_error_v)/X_val.shape[0]*100)
plt.ylabel('Rate %', fontsize=15)
plt.xlabel('Epoch', fontsize=15)
plt.title('False Validation Negatives/Postives Per Training Epoch')
plt.xticks(np.arange(1, nb_of_epochs+1))
plt.legend((p1[0], p2[0]), ('False Validation Negatives', 'False Validation Positives'))
plt.show()
#PLOT OVERALL/MINIBATCH ERROR RATES
x_minibatch = np.linspace(1, nb_of_epochs, num=nb_of_epochs*nb_of_batches)
x = np.linspace(1, nb_of_epochs, num=nb_of_epochs)
plt.figure()
if NewtonUpdate:
plt.plot(x_minibatch, minibatch_costs, 'k-', linewidth=0.5, label='Minibatches Error Rates')
plt.plot(x, error_t, 'b-', linewidth=2, label='Train Error Rate')
plt.plot(x, error_v, 'r-', linewidth=2, label='Validation Error Rate')
# Add labels to the plot
plt.xlabel('Epoch', fontsize=15)
plt.ylabel('Error Rate (%)', fontsize=15)
plt.title('Error Rates Over Training Epochs')
plt.legend()
x1,x2,y1,y2 = plt.axis()
plt.grid()
plt.show()
#PLOT ACCURACY
x = np.linspace(1, nb_of_epochs, num=nb_of_epochs)
plt.figure()
plt.plot(x, accuracy_t, 'b-', linewidth=2, label='Accuracy Training Set')
plt.plot(x, accuracy_v, 'r-', linewidth=3, label='Accuracy Validation Set')
# Add labels to the plot
plt.xlabel('Epoch', fontsize=15)
plt.ylabel('Accuracy (%)', fontsize=15)
plt.title('Accuracy Over Training Epochs')
plt.legend()
x1,x2,y1,y2 = plt.axis()
plt.grid()
plt.show()
#DISPLAY DATA
m, s = divmod(t_time, 60)
h, m = divmod(m, 60)
print("NUMBER OF TRAINING EPOCHS: {}".format(nb_of_epochs))
print("OVERALL TIME FOR TRAINING: {}h:{}m:{:.5f}s".format(h,m,s))
print("LAST TWO VALIDATION COSTS: {:.5f} {:.5f}".format(error_v[-2], error_v[-1]))
def show_results_logistic_per_regularization(X_train, X_test, reg_rates, error_train_per_regrate, false_negatives_train_per_regrate, false_positives_train_per_regrate, accuracy_train_per_regrate, error_test_per_regrate, false_negatives_test_per_regrate, false_positives_test_per_regrate, accuracy_test_per_regrate, fit_time):
"""Helper Function for displaying all data gathered from the fit."""
#PLOT TRAIN FALSE CLASSIFICATION
if NewtonUpdate:
width = 0.7
else:
width = 0.0008 # the width of the bars
p1 = plt.bar(reg_rates, np.array(false_negatives_train_per_regrate)/X_train.shape[0]*100, width, color=(0.2588,0.4433,1.0))
p2 = plt.bar(reg_rates, np.array(false_positives_train_per_regrate)/X_train.shape[0]*100, width, color=(1.0,0.5,0.62),
bottom=np.array(false_negatives_train_per_regrate)/X_train.shape[0]*100)
plt.ylabel('Rate (%)', fontsize=15)
plt.xlabel('Regularization Rate', fontsize=15)
plt.title('False Train Negatives/Postives Per Regularization Rate')
plt.legend((p1[0], p2[0]), ('False Train Negatives', 'False Train Positives'))
plt.ylim(0, 13)
plt.show()
#PLOT TEST FALSE CLASSIFICATION
p1 = plt.bar(reg_rates, np.array(false_negatives_test_per_regrate)/X_test.shape[0]*100, width, color=(0.2588,0.4433,1.0))
p2 = plt.bar(reg_rates, np.array(false_positives_test_per_regrate)/X_test.shape[0]*100, width, color=(1.0,0.5,0.62),
bottom=np.array(false_negatives_test_per_regrate)/X_test.shape[0]*100)
plt.ylabel('Rate (%)')
plt.xlabel('Regularization Rate', fontsize=15)
plt.title('False Test Negatives/Postives Per Regularization Rate')
plt.legend((p1[0], p2[0]), ('False Test Negatives', 'False Test Positives'))
plt.ylim(0, 13)
plt.show()
#PLOT ERROR RATES
plt.figure()
plt.plot(reg_rates, error_train_per_regrate, 'b-', linewidth=2, label='Train Error Rate')
plt.plot(reg_rates, error_test_per_regrate, 'r-', linewidth=2, label='Test Error Rate')
# Add labels to the plot
plt.xlabel('Regularization Rate', fontsize=15)
plt.ylabel('Error Rate (%)', fontsize=15)
plt.title('Error Rate Per Regularization Rate')
plt.legend()
x1,x2,y1,y2 = plt.axis()
plt.grid()
plt.show()
#PLOT ACCURACY
plt.figure()
plt.plot(reg_rates, accuracy_train_per_regrate, 'b-', linewidth=2, label='Train Accuracy')
plt.plot(reg_rates, accuracy_test_per_regrate, 'r-', linewidth=3, label='Test Accuracy')
# Add labels to the plot
plt.xlabel('Regularization Rate', fontsize=15)
plt.ylabel('Accuracy (%)', fontsize=15)
plt.title('Accuracy Per Regularization Rate')
plt.legend()
x1,x2,y1,y2 = plt.axis()
plt.grid()
plt.show()
#DISPLAY DATA
m, s = divmod(fit_time, 60)
h, m = divmod(m, 60)
print("OVERALL TIME FOR FIT: {}h:{}m:{:.5f}s".format(h,m,s))
#Display data about the optimal regularization rate
best_regrate_idx = np.argmax(accuracy_test_per_regrate)
print("======================================================================================")
print("BEST PERFORMANCE ACHIEVED FOR: L2_rate = {}".format(reg_rates[best_regrate_idx]))
print("TRAIN ACCURACY: {}% | TEST ACCURACY: {}%".format(accuracy_train_per_regrate[best_regrate_idx], accuracy_test_per_regrate[best_regrate_idx]))
print("TRAIN ERROR RATE: {:.2f}% | TEST ERROR RATE: {:.2f}%".format(error_train_per_regrate[best_regrate_idx], error_test_per_regrate[best_regrate_idx]))
print("TRAIN FALSE NEGATIVES ERROR RATE: {:.2f}% | TEST FALSE NEGATIVES ERROR RATE: {:.2f}%".format(false_negatives_train_per_regrate[best_regrate_idx]*100/X_train.shape[0], false_negatives_test_per_regrate[best_regrate_idx]*100/X_test.shape[0]))
print("TRAIN FALSE POSITIVES ERROR RATE: {:.2f}% | TEST FALSE POSITIVES ERROR RATE: {:.2f}%".format(false_positives_train_per_regrate[best_regrate_idx]*100/X_train.shape[0], false_positives_test_per_regrate[best_regrate_idx]*100/X_test.shape[0]))
print("======================================================================================")
def test_logistic(LogisticNeuron, X_test, T_test):
"""Function to test the learnt fit; gather data about its performance."""
outputs_test = forward_step(LogisticNeuron, X_test)[1]
accuracy_test = get_accuracy(T_test, outputs_test)
error_test, false_negatives_error_test, false_positives_error_test = get_errors(outputs_test, T_test)
return outputs_test, accuracy_test, error_test, false_negatives_error_test, false_positives_error_test
def fit_logistic(Regularization_rates, X_train, T_train, X_val, T_val, X_test, T_test):
"""A Wrapper Function which encapsulates training, testing, data gathering & data display
for all regularization rates.
INPUTS:
X_train - train dataset samples.
T_train - train dataset targets.
X_val - validation dataset samples.
T_val - validation dataset targets.
X_test - test dataset samples.
T_train - test dataset targets."""
accuracy_train_per_l2rate = []
accuracy_test_per_l2rate = []
error_train_per_l2rate = []
error_test_per_l2rate = []
false_negatives_train_per_l2rate = []
false_negatives_test_per_l2rate = []
false_positives_train_per_l2rate = []
false_positives_test_per_l2rate = []
start_time = time.time()
for L2_rate in Regularization_rates:
#INITIALISE THE LOGISTIC NEURON
LP = LogisticNeuron(X_train.shape[1])
print("STARTING LOGISTIC FIT(L2_RATE: {})".format(L2_rate))
#TRAINING
LP, NB_OF_EPOCHS, NB_OF_BATCHES, ERROR_T, MINIBATCH_COST, FALSE_NEGATIVES_ERROR_T, FALSE_POSITIVES_ERROR_T, ACCURACY_T, ERROR_V, FALSE_NEGATIVES_ERROR_V, FALSE_PSOTIVES_ERROR_V, ACCURACY_V, T_TIME = train_logistic(LP, X_train, T_train, X_val, T_val, L2_rate, NewtonUpdate)
show_results_logistic(X_train, X_val, NB_OF_EPOCHS, NB_OF_BATCHES, ERROR_T, MINIBATCH_COST, FALSE_NEGATIVES_ERROR_T, FALSE_POSITIVES_ERROR_T, ACCURACY_T, ERROR_V, FALSE_NEGATIVES_ERROR_V, FALSE_PSOTIVES_ERROR_V, ACCURACY_V, T_TIME)
#TESTING
OUTPUTS_TRAIN, ACCURACY_TRAIN, ERROR_TRAIN, FALSE_NEGATIVES_ERROR_TRAIN, FALSE_POSTIVES_ERROR_TRAIN = test_logistic(LP, X_train, T_train)
OUTPUTS_TEST, ACCURACY_TEST, ERROR_TEST, FALSE_NEGATIVES_ERROR_TEST, FALSE_POSTIVES_ERROR_TEST = test_logistic(LP, X_test, T_test)
print("---------------------------------------------------------------------------------------")
print(">>>>> LOGISTIC REGRESSION NEURON(L2_rate = {}) <<<<<".format(L2_rate))
print("TRAIN ACCURACY: {:.2f}% | TEST ACCURACY: {:.2f}%".format(ACCURACY_TRAIN, ACCURACY_TEST))
print("TRAIN ERROR RATE: {:.2f}% | TEST ERROR RATE: {:.2f}%".format(ERROR_TRAIN, ERROR_TEST))
print("TRAIN FALSE NEGATIVES ERROR RATE: {:.2f}% | TEST FALSE NEGATIVES ERROR RATE: {:.2f}%".format(FALSE_NEGATIVES_ERROR_TRAIN*100/T_train.shape[0], FALSE_NEGATIVES_ERROR_TEST*100/T_test.shape[0]))
print("TRAIN FALSE POSITIVES ERROR RATE: {:.2f}% | TEST FALSE POSITIVES ERROR RATE: {:.2f}%".format(FALSE_POSTIVES_ERROR_TRAIN*100/T_train.shape[0], FALSE_POSTIVES_ERROR_TEST*100/T_test.shape[0]))
print("---------------------------------------------------------------------------------------")
plot_confusion_table(T_train, OUTPUTS_TRAIN, "Logistic FIt(L2_rate: {}) Train Confusion Table".format(L2_rate))
plot_confusion_table(T_test, OUTPUTS_TEST, "Logistic FIt(L2_rate: {}) Test Confusion Table".format(L2_rate))
#GATHERING DATA FOR PLOTS OVER REGULARIZATION RATES
accuracy_train_per_l2rate.append(ACCURACY_TRAIN)
accuracy_test_per_l2rate.append(ACCURACY_TEST)
error_train_per_l2rate.append(ERROR_TRAIN)
error_test_per_l2rate.append(ERROR_TEST)
false_negatives_train_per_l2rate.append(FALSE_NEGATIVES_ERROR_TRAIN)
false_negatives_test_per_l2rate.append(FALSE_NEGATIVES_ERROR_TEST)
false_positives_train_per_l2rate.append(FALSE_POSTIVES_ERROR_TRAIN)
false_positives_test_per_l2rate.append(FALSE_POSTIVES_ERROR_TEST)
fit_time = time.time() - start_time
print("DATA FOR THE FIT")
show_results_logistic_per_regularization(X_train, X_test, Regularization_rates, error_train_per_l2rate, false_negatives_train_per_l2rate, false_positives_train_per_l2rate, accuracy_train_per_l2rate, error_test_per_l2rate, false_negatives_test_per_l2rate, false_positives_test_per_l2rate, accuracy_test_per_l2rate, fit_time)
# Control Hyperparameters
print(">>> Would You Like to Learn with Newton's Method or Backpropagation? <<<")
print(">>>>> Type 'newton' or 'backprop' <<<<<")
USER_I = input()
if USER_I == "newton":
NewtonUpdate = True
elif USER_I == "backprop":
NewtonUpdate = False
else:
NewtonUpdate = False
print("Did not get that! Will run Backpropagation!")
learning_cost_stop = True
learning_rate = 0.002
if NewtonUpdate:
L2_rates = np.concatenate((np.linspace(1,10,num=10), np.linspace(15,100,num=18)))
batch_size = X_TRAIN_LOG.shape[0] # Approximately N samples per batch
else:
L2_rates = np.concatenate((np.linspace(1,10,num=10), np.linspace(15,100,num=18)))/1000
batch_size = X_TRAIN_LOG.shape[0]/X_TRAIN_LOG.shape[0] # Approximately N samples per batch
regularization = True
max_nb_of_epochs = 30
# RUN
fit_logistic(L2_rates, X_TRAIN_LOG, T_TRAIN, X_TEST_LOG, T_TEST, X_TEST_LOG, T_TEST)
# # Q4 K-Nearest Neighbors
class KNearestNeighbors(object):
"""k-Nearest Neighbors classifier with L2(Euclidean) distance."""
def __init__(self):
"""Declare the classifier fields."""
self.X_train = None
self.T_train = None
def train(self, X_train, T_train):
"""Initialize the classifier fields with the training dataset."""
self.X_train = X_train
self.T_train = T_train
def euclidean_distance(self, X_pred):
"""Calculate the Euclidean distances between the input samples (X_pred) and the training samples."""
return np.sqrt(-2*np.dot(X_pred, self.X_train.T) + np.sum(self.X_train**2,axis=1) + np.sum(X_pred**2, axis=1)[:, np.newaxis])
def predict(self, X_pred, k=1):
"""Predicts the labels of the input samples by calculating the Euclidean distances between them
& the training samples, and picking the most common label in the k-nearest training samples."""
num_pred = X_pred.shape[0]
num_train = self.X_train.shape[0]
#Calculating Euclidean distances
dists = np.zeros((num_pred, num_train))
dists = self.euclidean_distance(X_pred)
Y_pred = np.zeros(num_pred)
#Picking the indexes of the k-nearest training samples
KNN_idx = np.argpartition(dists, k, axis=1)[:, :k]
#Getting the k-nearest training samples' labels
KNN_lab = self.T_train[KNN_idx.reshape(num_pred*k)].reshape(num_pred,k)
#Making a prediction by picking the most common label from the k-nearest labels
Y_pred = np.apply_along_axis(lambda x: np.bincount(x, minlength=10), axis=1, arr=KNN_lab).argmax(axis=1)
return Y_pred
def cross_validate(KNearestNeighbors, X_train, T_train, K_values):
"""Perform a 5-fold cross-validation using the train dataset to pick the optimal K in between the K_values."""
accuracy_per_k = []
#Perform cross-validation across all Ks
for k in K_values:
print('Cross-Validation for K=',k)
k_accuracy = []
#Fit 5 folds for each K
for fold in range(5):
print('Processing Fold',fold+1)
#Create the sub-fold train dataset, consisting of 4 folds of input samples
fold_X_train = np.delete(X_train,range(int(fold*len(X_train)/5),int((fold+1)*len(X_train)/5)),axis=0)
fold_T_train = np.delete(T_train,range(int(fold*len(T_train)/5),int((fold+1)*len(T_train)/5)),axis=0)
#Create the sub-fold validation dataset, consisting of 1 fold of input samples
fold_X_val = X_train[int(fold*len(X_train)/5):int((fold+1)*len(X_train)/5)]
fold_T_val = T_train[int(fold*len(T_train)/5):int((fold+1)*len(T_train)/5)]
#Train
KNearestNeighbors.train(fold_X_train, fold_T_train)
#Predict
Y_pred = KNearestNeighbors.predict(fold_X_val, k)
#Calculate accuracy
k_accuracy.append(get_accuracy(fold_T_val, Y_pred))
accuracy_per_k.append(np.mean(k_accuracy))
return accuracy_per_k
def show_results_KNN(X_train, X_test, K_values, cross_val_accuracy, error_train_per_k, false_negatives_train_per_k, false_positives_train_per_k, accuracy_train_per_k, error_test_per_k, false_negatives_test_per_k, false_positives_test_per_k, accuracy_test_per_k, fit_time):
"""Helper Function for displaying all data gathered from the fit."""
#PLOT TRAIN FALSE CLASSIFICATION
width = 0.35 # the width of the bars
p1 = plt.bar(K_values, np.array(false_negatives_train_per_k)/X_train.shape[0]*100, width, color=(0.2588,0.4433,1.0))
p2 = plt.bar(K_values, np.array(false_positives_train_per_k)/X_train.shape[0]*100, width, color=(1.0,0.5,0.62),
bottom=
|
np.array(false_negatives_train_per_k)
|
numpy.array
|
"""
This module is a collection of metrics to assess the similarity between two images.
Currently implemented metrics are FSIM, ISSM, PSNR, RMSE, SAM, SRE, SSIM, UIQ.
"""
import math
import numpy as np
from skimage.metrics import structural_similarity
import phasepack.phasecong as pc
import cv2
def _assert_image_shapes_equal(org_img: np.ndarray, pred_img: np.ndarray, metric: str):
# shape of the image should be like this (rows, cols, bands)
# Please note that: The interpretation of a 3-dimension array read from rasterio is: (bands, rows, columns) while
# image processing software like scikit-image, pillow and matplotlib are generally ordered: (rows, columns, bands)
# in order efficiently swap the axis order one can use reshape_as_raster, reshape_as_image from rasterio.plot
msg = (
f"Cannot calculate {metric}. Input shapes not identical. y_true shape ="
f"{str(org_img.shape)}, y_pred shape = {str(pred_img.shape)}"
)
assert org_img.shape == pred_img.shape, msg
def rmse(org_img: np.ndarray, pred_img: np.ndarray, max_p: int = 4095) -> float:
"""
Root Mean Squared Error
Calculated individually for all bands, then averaged
"""
_assert_image_shapes_equal(org_img, pred_img, "RMSE")
rmse_bands = []
for i in range(org_img.shape[2]):
dif = np.subtract(org_img[:, :, i], pred_img[:, :, i])
m = np.mean(np.square(dif / max_p))
s = np.sqrt(m)
rmse_bands.append(s)
return np.mean(rmse_bands)
def psnr(org_img: np.ndarray, pred_img: np.ndarray, max_p: int = 4095) -> float:
"""
Peek Signal to Noise Ratio, implemented as mean squared error converted to dB.
It can be calculated as
PSNR = 20 * log10(MAXp) - 10 * log10(MSE)
When using 12-bit imagery MaxP is 4095, for 8-bit imagery 255. For floating point imagery using values between
0 and 1 (e.g. unscaled reflectance) the first logarithmic term can be dropped as it becomes 0
"""
_assert_image_shapes_equal(org_img, pred_img, "PSNR")
mse_bands = []
for i in range(org_img.shape[2]):
mse_bands.append(np.mean(
|
np.square(org_img[:, :, i] - pred_img[:, :, i])
|
numpy.square
|
# -------------------------------------#
# 调用摄像头或者视频进行检测
# 调用摄像头直接运行即可
# 调用视频可以将cv2.VideoCapture()指定路径
# 视频的保存并不难,可以百度一下看看
# -------------------------------------#
import time
import cv2
import numpy as np
from PIL import Image
import mediapipe as mp
from yolo import YOLO
yolo = YOLO()
mp_drawing = mp.solutions.drawing_utils
mp_holistic = mp.solutions.holistic
# -------------------------------------#
# 调用摄像头
# capture=cv2.VideoCapture("1.mp4")
# -------------------------------------#
with mp_holistic.Holistic(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as holistic:
# capture = cv2.VideoCapture(0)
capture = cv2.VideoCapture('C:\\Users\86151\Desktop\比赛资料\服创比赛说明材料\【A12】基于手势识别的会议控制系统【长安计算】-5 种基本动作示例视频\\2.平移.mp4')
fps = 0.0
while (True):
t1 = time.time()
# 读取某一帧
ref, frame = capture.read()
# 格式转变,BGRtoRGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# # 转变成Image
frame = Image.fromarray(
|
np.uint8(frame)
|
numpy.uint8
|
"""
DEPRECATED: please use MultiModeSystem instead.
Driven Double-Mode Linear System with Beam-Splitter Coupling
"""
import warnings
from typing import Dict
import numpy as np
from quantumnetworks.systems.base import SystemSolver
class DoubleModeSystem(SystemSolver):
def __init__(self, params: Dict[str, float], A_in=None, B_in=None) -> None:
"""
Arguments:
A_in (function): takes in time t and returns np.ndarray
"""
warnings.warn(
"Please use MultiModeSystem instead.", DeprecationWarning, stacklevel=2
)
super().__init__(params)
self._A = None
self._B = None
self.A_in = A_in if A_in else lambda t: 0
self.B_in = B_in if B_in else lambda t: 0
def _param_validation(self):
super()._param_validation()
if "omega_a" not in self.params:
self.params["omega_a"] = 2 * np.pi * 1 # 2pi * GHz
if "omega_b" not in self.params:
self.params["omega_b"] = 2 * np.pi * 2 # 2pi * GHz
if "kappa_a" not in self.params:
self.params["kappa_a"] = 2 * np.pi * 0.001 # 2pi * GHz
if "kappa_b" not in self.params:
self.params["kappa_b"] = 2 * np.pi * 0.005 # 2pi * GHz
if "gamma_a" not in self.params:
self.params["gamma_a"] = 2 * np.pi * 0.002 # 2pi * GHz
if "gamma_b" not in self.params:
self.params["gamma_b"] = 2 * np.pi * 0.002 # 2pi * GHz
if "kerr_a" not in self.params:
self.params["kerr_a"] = 2 * np.pi * 0.001 # 2pi * GHz
if "kerr_b" not in self.params:
self.params["kerr_b"] = 2 * np.pi * 0.001 # 2pi * GHz
if "g_ab" not in self.params:
self.params["g_ab"] = 2 * np.pi * 0.002 # 2pi * GHz
# Known System Parameters and Load
# =================================
@property
def A(self):
if self._A is None:
omega_a = self.params["omega_a"]
kappa_a = self.params["kappa_a"]
gamma_a = self.params["gamma_a"]
omega_b = self.params["omega_b"]
kappa_b = self.params["kappa_b"]
gamma_b = self.params["gamma_b"]
g_ab = self.params["g_ab"]
A = np.zeros((4, 4))
A[0, 0] = -kappa_a / 2 - gamma_a / 2
A[1, 1] = -kappa_a / 2 - gamma_a / 2
A[2, 2] = -kappa_b / 2 - gamma_b / 2
A[3, 3] = -kappa_b / 2 - gamma_b / 2
A[0, 1] = omega_a
A[1, 0] = -omega_a
A[2, 3] = omega_b
A[3, 2] = -omega_b
A[0, 3] = g_ab
A[1, 2] = -g_ab
A[2, 1] = g_ab
A[3, 0] = -g_ab
self._A = A
return self._A
@property
def B(self):
if self._B is None:
kappa_a = self.params["kappa_a"]
kappa_b = self.params["kappa_b"]
B = np.zeros((4, 4))
B[0, 0] = -np.sqrt(kappa_a)
B[1, 1] = -np.sqrt(kappa_a)
B[2, 2] = -np.sqrt(kappa_b)
B[3, 3] = -np.sqrt(kappa_b)
self._B = B
return self._B
# Nonlinear
# =================================
def f_nl(self, x: np.ndarray):
"""
Nonlinear part of eq of motion
"""
K_a = self.params["kerr_a"]
K_b = self.params["kerr_b"]
Ks = [K_a, K_b]
non_linearity = np.zeros_like(x)
for mode in [0, 1]:
qi = 0 + mode * 2
pi = 1 + mode * 2
q = x[qi]
p = x[pi]
K = Ks[mode]
non_linearity[qi] = 2 * K * (q ** 2 + p ** 2) * p
non_linearity[pi] = -2 * K * (q ** 2 + p ** 2) * q
return non_linearity
def Jf_nl(self, x: np.ndarray):
"""
Jacobian of nonlinear part of eq of motion
"""
K_a = self.params["kerr_a"]
K_b = self.params["kerr_b"]
Ks = [K_a, K_b]
nonlinear_Jf = np.zeros((x.size, x.size))
for mode in [0, 1]:
qi = 0 + mode * 2
pi = 1 + mode * 2
q = x[qi]
p = x[pi]
K = Ks[mode]
nonlinear_Jf[qi][qi] = 4 * K * q * p
nonlinear_Jf[qi][pi] = 2 * K * (q ** 2 + p ** 2) + 4 * K * p ** 2
nonlinear_Jf[pi][qi] = -2 * K * (q ** 2 + p ** 2) - 4 * K * q ** 2
nonlinear_Jf[pi][pi] = -4 * K * q * p
return nonlinear_Jf
# Eval
# =================================
def eval_f(self, x: np.ndarray, u: np.ndarray) -> np.ndarray:
f = self.A.dot(x) + self.f_nl(x) + u
return f
def eval_u(self, t: float):
A_in = self.A_in(t)
B_in = self.B_in(t)
in_vec = np.array([np.real(A_in),
|
np.imag(A_in)
|
numpy.imag
|
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interventional Robustness Score.
Based on the paper https://arxiv.org/abs/1811.00007.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from disentanglement_lib.evaluation.metrics import utils
import numpy as np
import gin.tf
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import tensorflow_hub as hub
import copy
import matplotlib.pyplot as plt
import disentanglement_lib.evaluation.metrics.dci as dci
from tensorflow.python.keras import losses
import time
import datetime
import os
from sklearn.manifold import TSNE
rho = 7
user = os.environ.get("USER")
path_to_save_images = '/home/{}/disentanglement_lib_cg/examples/s2_factor_vae_cg/'.format(user)
reconstructions_path = '/home/{}/disentanglement_lib_cg/examples/causal_dataset_s2_factor_vae_cg_output/vae/model/tfhub'.format(user)
if not os.path.exists(path_to_save_images):
os.mkdir(path_to_save_images)
def plot_latent_space(z_mu, z_label, i,dim=2):
tsne = TSNE(n_components=dim, random_state=0, perplexity=50, learning_rate=500, n_iter=300)
z_tsne = tsne.fit_transform(z_mu)
dic = {}
dic['dim1']= z_tsne[:, 0]
dic['dim2']= z_tsne[:, 1]
dic['label'] = z_label
np.save(path_to_save_images+str(i)+'.npy', dic)
def get_reconstructions(x):
module_path = reconstructions_path
with hub.eval_function_for_module(module_path) as f:
output = f(dict(latent_vectors=x), signature="decoder", as_dict=True)
return {key: np.array(values) for key, values in output.items()}
@gin.configurable(
"irs",
blacklist=["ground_truth_data", "representation_function", "random_state",
"artifact_dir"])
def compute_irs(ground_truth_data,
representation_function,
random_state,
artifact_dir=None,
diff_quantile=0.99,
num_train=gin.REQUIRED,
batch_size=gin.REQUIRED):
"""Computes the Interventional Robustness Score.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observations as input and
outputs a dim_representation sized representation for each observation.
random_state: Numpy random state used for randomness.
artifact_dir: Optional path to directory where artifacts can be saved.
diff_quantile: Float value between 0 and 1 to decide what quantile of diffs
to select (use 1.0 for the version in the paper).
num_train: Number of points used for training.
batch_size: Batch size for sampling.
Returns:
Dict with IRS and number of active dimensions.
"""
del artifact_dir
logging.info("Generating training set.")
dci_d=dci.compute_dci(ground_truth_data, representation_function, random_state,
artifact_dir=None,
num_train=gin.REQUIRED,
num_test=gin.REQUIRED,
batch_size=16)
mus, ys = utils.generate_batch_factor_code(ground_truth_data,
representation_function, num_train,
random_state, batch_size)
assert mus.shape[1] == num_train
discretizer = utils.make_discretizer(ys)
ys_discrete = discretizer
active_mus = _drop_constant_dims(mus)
if not active_mus.any():
irs_score = 0.0
else:
irs_score = scalable_disentanglement_score(ys_discrete.T, active_mus.T, np.transpose(ys), # active mus
diff_quantile)["avg_score"]
score_dict = {}
score_dict["IRS"] = irs_score
score_dict["num_active_dims"] = np.sum(active_mus) # active mus
print(dci_d)
return score_dict
def get_max_deviations(dims, latents):
indices_ = [[] for _ in range(latents.shape[0])]
for idx, dim in enumerate(dims):
for indx in range(latents.shape[0]):
max_deviation = np.argmax(np.abs(latents[:, dim] - latents[indx, dim]))
indices_[indx].append(latents[:, dim][max_deviation])
return indices_
def normalize(x):
x[:,:,0] = (x[:,:,0] - np.min(x[:,:,0])) / (np.max(x[:,:,0]) - np.min(x[:,:,0]))
x[:,:,1] = (x[:,:,1] - np.min(x[:,:,1])) / (np.max(x[:,:,1]) - np.min(x[:,:,1]))
x[:,:,2] = (x[:,:,2] - np.min(x[:,:,2])) / (np.max(x[:,:,2]) - np.min(x[:,:,2]))
return x
def _drop_constant_dims(ys):
"""Returns a view of the matrix `ys` with dropped constant rows."""
ys = np.asarray(ys)
if ys.ndim != 2:
raise ValueError("Expecting a matrix.")
variances = ys.var(axis=1)
active_mask = variances > 0.
return ys[active_mask, :]
def scalable_disentanglement_score(gen_factors, latents, factors, diff_quantile=0.99):
"""Computes IRS scores of a dataset.
Assumes no noise in X and crossed generative factors (i.e. one sample per
combination of gen_factors). Assumes each g_i is an equally probable
realization of g_i and all g_i are independent.
Args:
gen_factors: Numpy array of shape (num samples, num generative factors),
matrix of ground truth generative factors.
latents: Numpy array of shape (num samples, num latent dimensions), matrix
of latent variables.
diff_quantile: Float value between 0 and 1 to decide what quantile of diffs
to select (use 1.0 for the version in the paper).
Returns:
Dictionary with IRS scores.
"""
k = gen_factors.shape[1]
l = latents.shape[1]
for i in range(factors.shape[1]):
plot_latent_space(latents, factors[:,i],i)
# Compute normalizer EMPIDA.
max_deviations = np.max(np.abs(latents - latents.mean(axis=0)), axis=0)
cum_deviations = np.zeros([l, k])
for i in range(k):
unique_factors = np.unique(gen_factors[:, i], axis=0)
assert unique_factors.ndim == 1
num_distinct_factors = unique_factors.shape[0]
for k1 in range(num_distinct_factors):
# Compute E[Z | g_i].
match = gen_factors[:, i] == unique_factors[k1]
e_loc = np.mean(latents[match, :], axis=0)
# Difference of each value within that group of constant g_i to its mean.
# PIDA
diffs =
|
np.abs(latents[match, :] - e_loc)
|
numpy.abs
|
""" image processing:
class AIT for full sky
ZEA for square region
TSplot special adapter for ZEA
author: <NAME> <EMAIL>
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/utilities/image.py,v 1.47 2017/02/09 19:04:37 burnett Exp $
"""
version = '$Revision: 1.47 $'.split()[1]
import sys, pylab, types, os
import math
import numpy as np
import pylab as pl
import pylab as plt
from matplotlib import pyplot, ticker
import matplotlib as mpl
from skymaps import SkyImage, SkyDir, double2, SkyProj,PySkyFunction,Hep3Vector
from math import exp
from numpy.fft import fft2,ifft2,fftshift
from scipy import optimize
import keyword_options
SkyImage.setNaN(np.nan)
class Ellipse(object):
def __init__(self, q):
""" q: ellipical parameters
a, b, phi
"""
self.q = q
def contour(self, r=1, count=50):
""" return set of points in around closed figure"""
s,c = math.sin(-self.q[2]), math.cos(self.q[2])
a,b = self.q[0],self.q[1]
x = []
y = []
for t in np.linspace(0, 2*math.pi, count):
ct,st = math.cos(t), math.sin(t)
x.append( r*(a*ct*s - b*st*c))
y.append( r*(a*ct*c + b*st*s))
return x,y
class Rescale(object):
def __init__(self, image, nticks=5, galactic=False):
""" image: a SkyImage object
nticks: suggested number of ticks for the ticker
warning: fails if the north pole is in the image (TODO: figure out a sensible approach)
"""
# get ra range from top, dec range along center of SkyImage
nx,ny = image.nx, image.ny
self.nx=nx
self.ny=ny
# convenient lat, lon functions for pixel coords
lat = lambda x,y: image.skydir(x,y).l() if galactic else image.skydir(x,y).ra()
lon = lambda x,y: image.skydir(x,y).b() if galactic else image.skydir(x,y).dec()
xl,xr = lat(0,0), lat(nx,0)
if xl<xr: # did it span the boundary?
xr = xr-360
self.vmin, self.vmax = lon(0,0), lon(nx/2.,ny)
ticklocator = ticker.MaxNLocator(nticks, steps=[1,2,5])
self.uticks = [ix if ix>-1e-6 else ix+360\
#for ix in ticklocator.bin_boundaries(xr,xl)[::-1]] #reverse
for ix in ticklocator.tick_values(xr,xl)[::-1]] #reverse
self.ul = xl
self.ur = xr
#self.vticks = ticklocator.bin_boundaries(self.vmin,self.vmax)
self.vticks = ticklocator.tick_values(self.vmin,self.vmax)
if len(self.vticks)==0: # protect against rare situatin
self.vticks = [self.vmin,self.vmax]
# extract positions in image coords, text labels
self.xticks = [image.pixel(SkyDir(x,self.vmin,SkyDir.GALACTIC if galactic else SkyDir.EQUATORIAL))[0]\
for x in self.uticks]
#self.yticks = [image.pixel(SkyDir(xl,v))[1] for v in self.vticks]
# proportional is usually good?
try:
yscale = ny/(lon(0,ny)-self.vmin)
self.yticks = [ (v-self.vmin)*yscale for v in self.vticks]
self.xticklabels = self.formatter(self.uticks)
self.yticklabels = self.formatter(self.vticks)
except Exception as msg:
print ('formatting failure in image.py: {}'.format(msg))
self.xticks=self.yticks=None
def formatter(self, t):
n=0
s = np.abs(np.array(t))+1e-9
for i in range(4):
#print (s, s-np.floor(s), (s-np.floor(s)).max())
if (s-np.floor(s)).max()<1e-3: break
s = s*10
n+=1
fmt = '%%5.%df'%n
return [(fmt% x).strip() for x in t]
def apply(self, axes):
if self.xticks is None:
return
#note remove outer ones
if len(self.xticks)>=3:
axes.set_xticks(self.xticks[1:-1])
axes.set_xticklabels(self.xticklabels[1:-1])
axes.xaxis.set_ticks_position('bottom')
#axes.set_xlim((0.5,self.nx+0.5)) # have to do again?
if len(self.yticks)>=3:
axes.set_yticks(self.yticks[1:-1])
axes.set_yticklabels(self.yticklabels[1:-1])
axes.yaxis.set_ticks_position('left')
#axes.set_ylim((0.5,self.ny+0.5)) # have to do again?
class AITproj(object):
def __init__(self, proj):
self.proj = proj
self.center = proj(0,0)
self.scale = ((proj(180,0)[0]-self.center[0])/180., (proj(0,90)[1]-self.center[1])/90)
def __call__(self,l,b):
r = self.proj(l,b)
return [(r[i]-self.center[i])/self.scale[i] for i in range(2)]
def draw_grid(self, labels=True, color='gray', pixelsize=0.5, textsize=8):
label_offset = 5/pixelsize
#my_axes = pylab.axes() #creates figure and axes if not set
# # pylab.matplotlib.interactive(False)
#my_axes.set_autoscale_on(False)
#my_axes.set_xlim(0, 360/pixelsize)
#my_axes.set_ylim(0, 180/pixelsize)
#my_axes.set_axis_off()
#my_axes.set_aspect('equal')
ait = AITproj(self.projector.sph2pix) # the projector to use
axes = self.axes
bs = np.arange(-90, 91, 5)
for l in np.hstack((np.arange(0, 360, 45),[180.01])):
lstyle = '-' if int(l)==180 or int(l)==0 else '--'
# axes.plot([ait(l,b)[0] for b in bs], [ait(l,b)[1] for b in bs], lstyle, color=color)
axes.plot([ait(l,b)[0] for b in bs], [ait(l,b)[1] for b in bs], lstyle, color=color)
if labels:
x,y = ait(l, 45)
axes.text(x,y, '%3.0f'%l ,size=textsize, ha='center')
ls = np.hstack((np.arange(180, 0, -5), np.arange(355, 180,-5), [180.01]))
for b in np.arange(-60, 61, 30):
lstyle = '-' if int(b)==0 else '--'
axes.plot([ait(l,b)[0] for l in ls], [ait(l,b)[1] for l in ls], lstyle, color=color)
if labels:
x,y = ait(180.1, b)
axes.text(x+label_offset,y+b/60*label_offset, '%+3.0f'%b, size=textsize, ha='center',va='center')
if labels:
for b in [90,-90]:
x,y = ait(0,b)
axes.text(x,y+b/90*label_offset,'%+3.0f'%b, size=textsize, ha='center',va='center')
class AIT_grid():
def __init__(self, fignum=20, axes=None, labels=True, color='gray', pixelsize=0.5, textsize=8, linestyle='-'):
"""Draws gridlines and labels for map.
"""
if axes is None:
fig=plt.figure(fignum, figsize=(12,6))
fig.clf()
self.axes = fig.gca()
else: self.axes = axes
self.pixelsize = pixelsize
xsize,ysize = 325,162
crpix = double2(xsize/pixelsize/2., ysize/pixelsize/2.)
crval = double2(0,0)
cdelt = double2(-pixelsize, pixelsize)
self.proj = SkyProj('AIT', crpix, crval, cdelt, 0, True)
self.axes.set_autoscale_on(False)
self.axes.set_xlim(0, 360/self.pixelsize)
self.axes.set_ylim(0, 180/self.pixelsize)
self.axes.set_axis_off()
self.axes.set_aspect('equal')
self.extent= (self.ait(180,0)[0],self.ait(180.001,0)[0], self.ait(0,-90)[1], self.ait(0,90)[1])
label_offset = 5/self.pixelsize
bs = np.arange(-90, 91, 5)
for l in np.hstack((np.arange(0, 360, 45),[180.01])):
self.axes.plot([self.ait(l,b)[0] for b in bs], [self.ait(l,b)[1] for b in bs], linestyle, color=color)
if labels:
x,y = self.ait(l, 45)
self.axes.text(x,y, '%3.0f'%l ,size=textsize, ha='center')
ls = np.hstack((
|
np.arange(180, 0, -5)
|
numpy.arange
|
from __future__ import print_function
import os.path as op
from nose.tools import assert_true, assert_raises
import warnings
from copy import deepcopy
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
import pytest
from scipy.fftpack import fft
from mne.datasets import testing
from mne import (stats, SourceEstimate, VectorSourceEstimate,
VolSourceEstimate, Label, read_source_spaces,
read_evokeds, MixedSourceEstimate, find_events, Epochs,
read_source_estimate, morph_data, extract_label_time_course,
spatio_temporal_tris_connectivity,
spatio_temporal_src_connectivity,
spatial_inter_hemi_connectivity,
spatial_src_connectivity, spatial_tris_connectivity)
from mne.source_estimate import (compute_morph_matrix, grade_to_vertices,
grade_to_tris, _get_vol_mask)
from mne.minimum_norm import (read_inverse_operator, apply_inverse,
apply_inverse_epochs)
from mne.label import read_labels_from_annot, label_sign_flip
from mne.utils import (_TempDir, requires_pandas, requires_sklearn,
requires_h5py, run_tests_if_main, requires_nibabel)
from mne.io import read_raw_fif
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
fname_evoked = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-ave.fif')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_t1 = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
fname_src = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
fname_src_fs = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
fname_src_3 = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-oct-4-src.fif')
fname_stc = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg')
fname_smorph = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg')
fname_fmorph = op.join(data_path, 'MEG', 'sample',
'fsaverage_audvis_trunc-meg')
fname_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w')
fname_vsrc = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
rng = np.random.RandomState(0)
@testing.requires_testing_data
def test_spatial_inter_hemi_connectivity():
"""Test spatial connectivity between hemispheres."""
# trivial cases
conn = spatial_inter_hemi_connectivity(fname_src_3, 5e-6)
assert_equal(conn.data.size, 0)
conn = spatial_inter_hemi_connectivity(fname_src_3, 5e6)
assert_equal(conn.data.size, np.prod(conn.shape) // 2)
# actually interesting case (1cm), should be between 2 and 10% of verts
src = read_source_spaces(fname_src_3)
conn = spatial_inter_hemi_connectivity(src, 10e-3)
conn = conn.tocsr()
n_src = conn.shape[0]
assert_true(n_src * 0.02 < conn.data.size < n_src * 0.10)
assert_equal(conn[:src[0]['nuse'], :src[0]['nuse']].data.size, 0)
assert_equal(conn[-src[1]['nuse']:, -src[1]['nuse']:].data.size, 0)
c = (conn.T + conn) / 2. - conn
c.eliminate_zeros()
assert_equal(c.data.size, 0)
# check locations
upper_right = conn[:src[0]['nuse'], src[0]['nuse']:].toarray()
assert_equal(upper_right.sum(), conn.sum() // 2)
good_labels = ['S_pericallosal', 'Unknown', 'G_and_S_cingul-Mid-Post',
'G_cuneus']
for hi, hemi in enumerate(('lh', 'rh')):
has_neighbors = src[hi]['vertno'][np.where(np.any(upper_right,
axis=1 - hi))[0]]
labels = read_labels_from_annot('sample', 'aparc.a2009s', hemi,
subjects_dir=subjects_dir)
use_labels = [l.name[:-3] for l in labels
if np.in1d(l.vertices, has_neighbors).any()]
assert_true(set(use_labels) - set(good_labels) == set())
@pytest.mark.slowtest
@testing.requires_testing_data
def test_volume_stc():
"""Test volume STCs."""
tempdir = _TempDir()
N = 100
data = np.arange(N)[:, np.newaxis]
datas = [data, data, np.arange(2)[:, np.newaxis]]
vertno = np.arange(N)
vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis]]
vertno_reads = [vertno, vertno, np.arange(2)]
for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads):
stc = VolSourceEstimate(data, vertno, 0, 1)
fname_temp = op.join(tempdir, 'temp-vl.stc')
stc_new = stc
for _ in range(2):
stc_new.save(fname_temp)
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(vertno_read, stc_new.vertices)
assert_array_almost_equal(stc.data, stc_new.data)
# now let's actually read a MNE-C processed file
stc = read_source_estimate(fname_vol, 'sample')
assert_true(isinstance(stc, VolSourceEstimate))
assert_true('sample' in repr(stc))
stc_new = stc
assert_raises(ValueError, stc.save, fname_vol, ftype='whatever')
for _ in range(2):
fname_temp = op.join(tempdir, 'temp-vol.w')
stc_new.save(fname_temp, ftype='w')
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(stc.vertices, stc_new.vertices)
assert_array_almost_equal(stc.data, stc_new.data)
# save the stc as a nifti file and export
try:
import nibabel as nib
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
src = read_source_spaces(fname_vsrc)
vol_fname = op.join(tempdir, 'stc.nii.gz')
stc.save_as_volume(vol_fname, src,
dest='surf', mri_resolution=False)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == src[0]['shape'] + (len(stc.times),))
with warnings.catch_warnings(record=True): # nib<->numpy
t1_img = nib.load(fname_t1)
stc.save_as_volume(op.join(tempdir, 'stc.nii.gz'), src,
dest='mri', mri_resolution=True)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.affine, t1_img.affine, decimal=5)
# export without saving
img = stc.as_volume(src, dest='mri', mri_resolution=True)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.affine, t1_img.affine, decimal=5)
except ImportError:
print('Save as nifti test skipped, needs NiBabel')
@testing.requires_testing_data
def test_expand():
"""Test stc expansion."""
stc_ = read_source_estimate(fname_stc, 'sample')
vec_stc_ = VectorSourceEstimate(np.zeros((stc_.data.shape[0], 3,
stc_.data.shape[1])),
stc_.vertices, stc_.tmin, stc_.tstep,
stc_.subject)
for stc in [stc_, vec_stc_]:
assert_true('sample' in repr(stc))
labels_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)
new_label = labels_lh[0] + labels_lh[1]
stc_limited = stc.in_label(new_label)
stc_new = stc_limited.copy()
stc_new.data.fill(0)
for label in labels_lh[:2]:
stc_new += stc.in_label(label).expand(stc_limited.vertices)
assert_raises(TypeError, stc_new.expand, stc_limited.vertices[0])
assert_raises(ValueError, stc_new.expand, [stc_limited.vertices[0]])
# make sure we can't add unless vertno agree
assert_raises(ValueError, stc.__add__, stc.in_label(labels_lh[0]))
def _fake_stc(n_time=10):
verts = [np.arange(10), np.arange(90)]
return SourceEstimate(np.random.rand(100, n_time), verts, 0, 1e-1, 'foo')
def _fake_vec_stc(n_time=10):
verts = [np.arange(10), np.arange(90)]
return VectorSourceEstimate(np.random.rand(100, 3, n_time), verts, 0, 1e-1,
'foo')
def _real_vec_stc():
inv = read_inverse_operator(fname_inv)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0].crop(0, 0.01)
return apply_inverse(evoked, inv, pick_ori='vector')
def _test_stc_integrety(stc):
"""Test consistency of tmin, tstep, data.shape[-1] and times."""
n_times = len(stc.times)
assert_equal(stc._data.shape[-1], n_times)
assert_array_equal(stc.times, stc.tmin + np.arange(n_times) * stc.tstep)
def test_stc_attributes():
"""Test STC attributes."""
stc = _fake_stc(n_time=10)
vec_stc = _fake_vec_stc(n_time=10)
_test_stc_integrety(stc)
assert_array_almost_equal(
stc.times, [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
def attempt_times_mutation(stc):
stc.times -= 1
def attempt_assignment(stc, attr, val):
setattr(stc, attr, val)
# .times is read-only
assert_raises(ValueError, attempt_times_mutation, stc)
assert_raises(ValueError, attempt_assignment, stc, 'times', [1])
# Changing .tmin or .tstep re-computes .times
stc.tmin = 1
assert_true(type(stc.tmin) == float)
assert_array_almost_equal(
stc.times, [1., 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9])
stc.tstep = 1
assert_true(type(stc.tstep) == float)
assert_array_almost_equal(
stc.times, [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])
# tstep <= 0 is not allowed
assert_raises(ValueError, attempt_assignment, stc, 'tstep', 0)
assert_raises(ValueError, attempt_assignment, stc, 'tstep', -1)
# Changing .data re-computes .times
stc.data = np.random.rand(100, 5)
assert_array_almost_equal(
stc.times, [1., 2., 3., 4., 5.])
# .data must match the number of vertices
assert_raises(ValueError, attempt_assignment, stc, 'data', [[1]])
assert_raises(ValueError, attempt_assignment, stc, 'data', None)
# .data much match number of dimensions
assert_raises(ValueError, attempt_assignment, stc, 'data', np.arange(100))
assert_raises(ValueError, attempt_assignment, vec_stc, 'data',
[np.arange(100)])
assert_raises(ValueError, attempt_assignment, vec_stc, 'data',
[[[np.arange(100)]]])
# .shape attribute must also work when ._data is None
stc._kernel = np.zeros((2, 2))
stc._sens_data = np.zeros((2, 3))
stc._data = None
assert_equal(stc.shape, (2, 3))
def test_io_stc():
"""Test IO for STC files."""
tempdir = _TempDir()
stc = _fake_stc()
stc.save(op.join(tempdir, "tmp.stc"))
stc2 = read_source_estimate(op.join(tempdir, "tmp.stc"))
assert_array_almost_equal(stc.data, stc2.data)
assert_array_almost_equal(stc.tmin, stc2.tmin)
assert_equal(len(stc.vertices), len(stc2.vertices))
for v1, v2 in zip(stc.vertices, stc2.vertices):
assert_array_almost_equal(v1, v2)
assert_array_almost_equal(stc.tstep, stc2.tstep)
@requires_h5py
def test_io_stc_h5():
"""Test IO for STC files using HDF5."""
for stc in [_fake_stc(), _fake_vec_stc()]:
tempdir = _TempDir()
assert_raises(ValueError, stc.save, op.join(tempdir, 'tmp'),
ftype='foo')
out_name = op.join(tempdir, 'tmp')
stc.save(out_name, ftype='h5')
stc.save(out_name, ftype='h5') # test overwrite
stc3 = read_source_estimate(out_name)
stc4 = read_source_estimate(out_name + '-stc')
stc5 = read_source_estimate(out_name + '-stc.h5')
assert_raises(RuntimeError, read_source_estimate, out_name,
subject='bar')
for stc_new in stc3, stc4, stc5:
assert_equal(stc_new.subject, stc.subject)
assert_array_equal(stc_new.data, stc.data)
assert_array_equal(stc_new.tmin, stc.tmin)
assert_array_equal(stc_new.tstep, stc.tstep)
assert_equal(len(stc_new.vertices), len(stc.vertices))
for v1, v2 in zip(stc_new.vertices, stc.vertices):
assert_array_equal(v1, v2)
def test_io_w():
"""Test IO for w files."""
tempdir = _TempDir()
stc = _fake_stc(n_time=1)
w_fname = op.join(tempdir, 'fake')
stc.save(w_fname, ftype='w')
src = read_source_estimate(w_fname)
src.save(op.join(tempdir, 'tmp'), ftype='w')
src2 = read_source_estimate(op.join(tempdir, 'tmp-lh.w'))
assert_array_almost_equal(src.data, src2.data)
assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
def test_stc_arithmetic():
"""Test arithmetic for STC files."""
stc = _fake_stc()
data = stc.data.copy()
vec_stc = _fake_vec_stc()
vec_data = vec_stc.data.copy()
out = list()
for a in [data, stc, vec_data, vec_stc]:
a = a + a * 3 + 3 * a - a ** 2 / 2
a += a
a -= a
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
a /= 2 * a
a *= -a
a += 2
a -= 1
a *= -1
a /= 2
b = 2 + a
b = 2 - a
b = +a
assert_array_equal(b.data, a.data)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
a **= 3
out.append(a)
assert_array_equal(out[0], out[1].data)
assert_array_equal(out[2], out[3].data)
assert_array_equal(stc.sqrt().data, np.sqrt(stc.data))
assert_array_equal(vec_stc.sqrt().data, np.sqrt(vec_stc.data))
assert_array_equal(abs(stc).data, abs(stc.data))
assert_array_equal(abs(vec_stc).data, abs(vec_stc.data))
stc_mean = stc.mean()
assert_array_equal(stc_mean.data, np.mean(stc.data, 1)[:, None])
vec_stc_mean = vec_stc.mean()
assert_array_equal(vec_stc_mean.data,
np.mean(vec_stc.data, 2)[:, :, None])
@pytest.mark.slowtest
@testing.requires_testing_data
def test_stc_methods():
"""Test stc methods lh_data, rh_data, bin(), resample()."""
stc_ = read_source_estimate(fname_stc)
# Make a vector version of the above source estimate
x = stc_.data[:, np.newaxis, :]
yz = np.zeros((x.shape[0], 2, x.shape[2]))
vec_stc_ = VectorSourceEstimate(
np.concatenate((x, yz), 1),
stc_.vertices, stc_.tmin, stc_.tstep, stc_.subject
)
for stc in [stc_, vec_stc_]:
# lh_data / rh_data
assert_array_equal(stc.lh_data, stc.data[:len(stc.lh_vertno)])
assert_array_equal(stc.rh_data, stc.data[len(stc.lh_vertno):])
# bin
binned = stc.bin(.12)
a = np.mean(stc.data[..., :np.searchsorted(stc.times, .12)], axis=-1)
assert_array_equal(a, binned.data[..., 0])
stc = read_source_estimate(fname_stc)
stc.subject = 'sample'
label_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)[0]
label_rh = read_labels_from_annot('sample', 'aparc', 'rh',
subjects_dir=subjects_dir)[0]
label_both = label_lh + label_rh
for label in (label_lh, label_rh, label_both):
assert_true(isinstance(stc.shape, tuple) and len(stc.shape) == 2)
stc_label = stc.in_label(label)
if label.hemi != 'both':
if label.hemi == 'lh':
verts = stc_label.vertices[0]
else: # label.hemi == 'rh':
verts = stc_label.vertices[1]
n_vertices_used = len(label.get_vertices_used(verts))
assert_equal(len(stc_label.data), n_vertices_used)
stc_lh = stc.in_label(label_lh)
assert_raises(ValueError, stc_lh.in_label, label_rh)
label_lh.subject = 'foo'
assert_raises(RuntimeError, stc.in_label, label_lh)
stc_new = deepcopy(stc)
o_sfreq = 1.0 / stc.tstep
# note that using no padding for this STC reduces edge ringing...
stc_new.resample(2 * o_sfreq, npad=0, n_jobs=2)
assert_true(stc_new.data.shape[1] == 2 * stc.data.shape[1])
assert_true(stc_new.tstep == stc.tstep / 2)
stc_new.resample(o_sfreq, npad=0)
assert_true(stc_new.data.shape[1] == stc.data.shape[1])
assert_true(stc_new.tstep == stc.tstep)
assert_array_almost_equal(stc_new.data, stc.data, 5)
@testing.requires_testing_data
def test_center_of_mass():
"""Test computing the center of mass on an stc."""
stc = read_source_estimate(fname_stc)
assert_raises(ValueError, stc.center_of_mass, 'sample')
stc.lh_data[:] = 0
vertex, hemi, t = stc.center_of_mass('sample', subjects_dir=subjects_dir)
assert_true(hemi == 1)
# XXX Should design a fool-proof test case, but here were the
# results:
assert_equal(vertex, 124791)
assert_equal(np.round(t, 2), 0.12)
@testing.requires_testing_data
def test_extract_label_time_course():
"""Test extraction of label time courses from stc."""
n_stcs = 3
n_times = 50
src = read_inverse_operator(fname_inv)['src']
vertices = [src[0]['vertno'], src[1]['vertno']]
n_verts = len(vertices[0]) + len(vertices[1])
# get some labels
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
labels_rh = read_labels_from_annot('sample', hemi='rh',
subjects_dir=subjects_dir)
labels = list()
labels.extend(labels_lh[:5])
labels.extend(labels_rh[:4])
n_labels = len(labels)
label_means = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
label_maxs = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
# compute the mean with sign flip
label_means_flipped = np.zeros_like(label_means)
for i, label in enumerate(labels):
label_means_flipped[i] = i * np.mean(label_sign_flip(label, src))
# generate some stc's with known data
stcs = list()
for i in range(n_stcs):
data = np.zeros((n_verts, n_times))
# set the value of the stc within each label
for j, label in enumerate(labels):
if label.hemi == 'lh':
idx = np.intersect1d(vertices[0], label.vertices)
idx = np.searchsorted(vertices[0], idx)
elif label.hemi == 'rh':
idx = np.intersect1d(vertices[1], label.vertices)
idx = len(vertices[0]) + np.searchsorted(vertices[1], idx)
data[idx] = label_means[j]
this_stc = SourceEstimate(data, vertices, 0, 1)
stcs.append(this_stc)
# test some invalid inputs
assert_raises(ValueError, extract_label_time_course, stcs, labels,
src, mode='notamode')
# have an empty label
empty_label = labels[0].copy()
empty_label.vertices += 1000000
assert_raises(ValueError, extract_label_time_course, stcs, empty_label,
src, mode='mean')
# but this works:
with warnings.catch_warnings(record=True): # empty label
tc = extract_label_time_course(stcs, empty_label, src, mode='mean',
allow_empty=True)
for arr in tc:
assert_true(arr.shape == (1, n_times))
assert_array_equal(arr, np.zeros((1, n_times)))
# test the different modes
modes = ['mean', 'mean_flip', 'pca_flip', 'max']
for mode in modes:
label_tc = extract_label_time_course(stcs, labels, src, mode=mode)
label_tc_method = [stc.extract_label_time_course(labels, src,
mode=mode) for stc in stcs]
assert_true(len(label_tc) == n_stcs)
assert_true(len(label_tc_method) == n_stcs)
for tc1, tc2 in zip(label_tc, label_tc_method):
assert_true(tc1.shape == (n_labels, n_times))
assert_true(tc2.shape == (n_labels, n_times))
assert_true(np.allclose(tc1, tc2, rtol=1e-8, atol=1e-16))
if mode == 'mean':
assert_array_almost_equal(tc1, label_means)
if mode == 'mean_flip':
assert_array_almost_equal(tc1, label_means_flipped)
if mode == 'max':
assert_array_almost_equal(tc1, label_maxs)
# test label with very few vertices (check SVD conditionals)
label = Label(vertices=src[0]['vertno'][:2], hemi='lh')
x = label_sign_flip(label, src)
assert_true(len(x) == 2)
label = Label(vertices=[], hemi='lh')
x = label_sign_flip(label, src)
assert_true(x.size == 0)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_morph_data():
"""Test morphing of data."""
tempdir = _TempDir()
subject_from = 'sample'
subject_to = 'fsaverage'
stc_from = read_source_estimate(fname_smorph, subject='sample')
stc_to = read_source_estimate(fname_fmorph)
# make sure we can specify grade
stc_from.crop(0.09, 0.1) # for faster computation
stc_to.crop(0.09, 0.1) # for faster computation
assert_array_equal(stc_to.time_as_index([0.09, 0.1], use_rounding=True),
[0, len(stc_to.times) - 1])
assert_raises(ValueError, stc_from.morph, subject_to, grade=3, smooth=-1,
subjects_dir=subjects_dir)
stc_to1 = stc_from.morph(subject_to, grade=3, smooth=12, buffer_size=1000,
subjects_dir=subjects_dir)
stc_to1.save(op.join(tempdir, '%s_audvis-meg' % subject_to))
# Morphing to a density that is too high should raise an informative error
# (here we need to push to grade=6, but for some subjects even grade=5
# will break)
assert_raises(ValueError, stc_to1.morph, subject_from, grade=6,
subjects_dir=subjects_dir)
# make sure we can specify vertices
vertices_to = grade_to_vertices(subject_to, grade=3,
subjects_dir=subjects_dir)
stc_to2 = morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=12, buffer_size=1000,
subjects_dir=subjects_dir)
# make sure we can use different buffer_size
stc_to3 = morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=12, buffer_size=3,
subjects_dir=subjects_dir)
# make sure we get a warning about # of steps
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=1, buffer_size=3,
subjects_dir=subjects_dir)
assert_equal(len(w), 2)
assert_array_almost_equal(stc_to.data, stc_to1.data, 5)
assert_array_almost_equal(stc_to1.data, stc_to2.data)
assert_array_almost_equal(stc_to1.data, stc_to3.data)
# make sure precomputed morph matrices work
morph_mat = compute_morph_matrix(subject_from, subject_to,
stc_from.vertices, vertices_to,
smooth=12, subjects_dir=subjects_dir)
stc_to3 = stc_from.morph_precomputed(subject_to, vertices_to, morph_mat)
assert_array_almost_equal(stc_to1.data, stc_to3.data)
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, vertices_to, 'foo')
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, [vertices_to[0]], morph_mat)
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, [vertices_to[0][:-1], vertices_to[1]], morph_mat)
assert_raises(ValueError, stc_from.morph_precomputed, subject_to,
vertices_to, morph_mat, subject_from='foo')
# steps warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
compute_morph_matrix(subject_from, subject_to,
stc_from.vertices, vertices_to,
smooth=1, subjects_dir=subjects_dir)
assert_equal(len(w), 2)
mean_from = stc_from.data.mean(axis=0)
mean_to = stc_to1.data.mean(axis=0)
assert_true(np.corrcoef(mean_to, mean_from).min() > 0.999)
# make sure we can fill by morphing
stc_to5 = morph_data(subject_from, subject_to, stc_from, grade=None,
smooth=12, buffer_size=3, subjects_dir=subjects_dir)
assert_true(stc_to5.data.shape[0] == 163842 + 163842)
# Morph sparse data
# Make a sparse stc
stc_from.vertices[0] = stc_from.vertices[0][[100, 500]]
stc_from.vertices[1] = stc_from.vertices[1][[200]]
stc_from._data = stc_from._data[:3]
assert_raises(RuntimeError, stc_from.morph, subject_to, sparse=True,
grade=5, subjects_dir=subjects_dir)
stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
subjects_dir=subjects_dir)
assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
assert_equal(stc_to_sparse.subject, subject_to)
assert_equal(stc_from.tmin, stc_from.tmin)
assert_equal(stc_from.tstep, stc_from.tstep)
stc_from.vertices[0] = np.array([], dtype=np.int64)
stc_from._data = stc_from._data[:1]
stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
subjects_dir=subjects_dir)
assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
assert_equal(stc_to_sparse.subject, subject_to)
assert_equal(stc_from.tmin, stc_from.tmin)
assert_equal(stc_from.tstep, stc_from.tstep)
# Morph vector data
stc_vec = _real_vec_stc()
# Ignore warnings about number of steps
stc_vec_to1 = stc_vec.morph(subject_to, grade=3, smooth=12,
buffer_size=1000, subjects_dir=subjects_dir)
stc_vec_to2 = stc_vec.morph_precomputed(subject_to, vertices_to, morph_mat)
assert_array_almost_equal(stc_vec_to1.data, stc_vec_to2.data)
def _my_trans(data):
"""FFT that adds an additional dimension by repeating result."""
data_t = fft(data)
data_t = np.concatenate([data_t[:, :, None], data_t[:, :, None]], axis=2)
return data_t, None
def test_transform_data():
"""Test applying linear (time) transform to data."""
# make up some data
n_sensors, n_vertices, n_times = 10, 20, 4
kernel = rng.randn(n_vertices, n_sensors)
sens_data = rng.randn(n_sensors, n_times)
vertices = np.arange(n_vertices)
data = np.dot(kernel, sens_data)
for idx, tmin_idx, tmax_idx in\
zip([None, np.arange(n_vertices // 2, n_vertices)],
[None, 1], [None, 3]):
if idx is None:
idx_use = slice(None, None)
else:
idx_use = idx
data_f, _ = _my_trans(data[idx_use, tmin_idx:tmax_idx])
for stc_data in (data, (kernel, sens_data)):
stc = VolSourceEstimate(stc_data, vertices=vertices,
tmin=0., tstep=1.)
stc_data_t = stc.transform_data(_my_trans, idx=idx,
tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
assert_allclose(data_f, stc_data_t)
def test_transform():
"""Test applying linear (time) transform to data."""
# make up some data
n_verts_lh, n_verts_rh, n_times = 10, 10, 10
vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
data = rng.randn(n_verts_lh + n_verts_rh, n_times)
stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)
# data_t.ndim > 2 & copy is True
stcs_t = stc.transform(_my_trans, copy=True)
assert_true(isinstance(stcs_t, list))
assert_array_equal(stc.times, stcs_t[0].times)
assert_equal(stc.vertices, stcs_t[0].vertices)
data = np.concatenate((stcs_t[0].data[:, :, None],
stcs_t[1].data[:, :, None]), axis=2)
data_t = stc.transform_data(_my_trans)
assert_array_equal(data, data_t) # check against stc.transform_data()
# data_t.ndim > 2 & copy is False
assert_raises(ValueError, stc.transform, _my_trans, copy=False)
# data_t.ndim = 2 & copy is True
tmp = deepcopy(stc)
stc_t = stc.transform(np.abs, copy=True)
assert_true(isinstance(stc_t, SourceEstimate))
assert_array_equal(stc.data, tmp.data) # xfrm doesn't modify original?
# data_t.ndim = 2 & copy is False
times = np.round(1000 * stc.times)
verts = np.arange(len(stc.lh_vertno),
len(stc.lh_vertno) + len(stc.rh_vertno), 1)
verts_rh = stc.rh_vertno
tmin_idx = np.searchsorted(times, 0)
tmax_idx = np.searchsorted(times, 501) # Include 500ms in the range
data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
assert_true(isinstance(stc, SourceEstimate))
assert_equal(stc.tmin, 0.)
assert_equal(stc.times[-1], 0.5)
assert_equal(len(stc.vertices[0]), 0)
assert_equal(stc.vertices[1], verts_rh)
assert_array_equal(stc.data, data_t)
times = np.round(1000 * stc.times)
tmin_idx, tmax_idx = np.searchsorted(times, 0), np.searchsorted(times, 250)
data_t = stc.transform_data(np.abs, tmin_idx=tmin_idx, tmax_idx=tmax_idx)
stc.transform(np.abs, tmin=0, tmax=250, copy=False)
assert_equal(stc.tmin, 0.)
assert_equal(stc.times[-1], 0.2)
assert_array_equal(stc.data, data_t)
@requires_sklearn
def test_spatio_temporal_tris_connectivity():
"""Test spatio-temporal connectivity from triangles."""
tris = np.array([[0, 1, 2], [3, 4, 5]])
connectivity = spatio_temporal_tris_connectivity(tris, 2)
x = [1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
components = stats.cluster_level._get_components(np.array(x), connectivity)
# _get_components works differently now...
old_fmt = [0, 0, -2, -2, -2, -2, 0, -2, -2, -2, -2, 1]
new_fmt = np.array(old_fmt)
new_fmt = [np.nonzero(new_fmt == v)[0]
for v in np.unique(new_fmt[new_fmt >= 0])]
assert_true(len(new_fmt), len(components))
for c, n in zip(components, new_fmt):
assert_array_equal(c, n)
@testing.requires_testing_data
def test_spatio_temporal_src_connectivity():
"""Test spatio-temporal connectivity from source spaces."""
tris = np.array([[0, 1, 2], [3, 4, 5]])
src = [dict(), dict()]
connectivity = spatio_temporal_tris_connectivity(tris, 2)
src[0]['use_tris'] = np.array([[0, 1, 2]])
src[1]['use_tris'] = np.array([[0, 1, 2]])
src[0]['vertno'] = np.array([0, 1, 2])
src[1]['vertno'] = np.array([0, 1, 2])
src[0]['type'] = 'surf'
src[1]['type'] = 'surf'
connectivity2 = spatio_temporal_src_connectivity(src, 2)
assert_array_equal(connectivity.todense(), connectivity2.todense())
# add test for dist connectivity
src[0]['dist'] = np.ones((3, 3)) - np.eye(3)
src[1]['dist'] = np.ones((3, 3)) - np.eye(3)
src[0]['vertno'] = [0, 1, 2]
src[1]['vertno'] = [0, 1, 2]
src[0]['type'] = 'surf'
src[1]['type'] = 'surf'
connectivity3 = spatio_temporal_src_connectivity(src, 2, dist=2)
assert_array_equal(connectivity.todense(), connectivity3.todense())
# add test for source space connectivity with omitted vertices
inverse_operator = read_inverse_operator(fname_inv)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
src_ = inverse_operator['src']
connectivity = spatio_temporal_src_connectivity(src_, n_times=2)
assert_equal(len(w), 1)
a = connectivity.shape[0] / 2
b = sum([s['nuse'] for s in inverse_operator['src']])
assert_true(a == b)
assert_equal(grade_to_tris(5).shape, [40960, 3])
@requires_pandas
def test_to_data_frame():
"""Test stc Pandas exporter."""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
data = rng.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
subject='sample')
for stc in [stc_surf, stc_vol]:
assert_raises(ValueError, stc.to_data_frame, index=['foo', 'bar'])
for ncat, ind in zip([1, 0], ['time', ['subject', 'time']]):
df = stc.to_data_frame(index=ind)
assert_true(df.index.names == ind
if isinstance(ind, list) else [ind])
assert_array_equal(df.values.T[ncat:], stc.data)
# test that non-indexed data were present as categorial variables
assert_true(all([c in ['time', 'subject'] for c in
df.reset_index().columns][:2]))
def test_get_peak():
"""Test peak getter."""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
data = rng.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
subject='sample')
for ii, stc in enumerate([stc_surf, stc_vol]):
assert_raises(ValueError, stc.get_peak, tmin=-100)
assert_raises(ValueError, stc.get_peak, tmax=90)
assert_raises(ValueError, stc.get_peak, tmin=0.002, tmax=0.001)
vert_idx, time_idx = stc.get_peak()
vertno = np.concatenate(stc.vertices) if ii == 0 else stc.vertices
assert_true(vert_idx in vertno)
assert_true(time_idx in stc.times)
ch_idx, time_idx = stc.get_peak(vert_as_index=True,
time_as_index=True)
assert_true(vert_idx < stc.data.shape[0])
assert_true(time_idx < len(stc.times))
@testing.requires_testing_data
def test_mixed_stc():
"""Test source estimate from mixed source space."""
N = 90 # number of sources
T = 2 # number of time points
S = 3 # number of source spaces
data = rng.randn(N, T)
vertno = S * [np.arange(N // S)]
# make sure error is raised if vertices are not a list of length >= 2
assert_raises(ValueError, MixedSourceEstimate, data=data,
vertices=[np.arange(N)])
stc = MixedSourceEstimate(data, vertno, 0, 1)
vol = read_source_spaces(fname_vsrc)
# make sure error is raised for plotting surface with volume source
assert_raises(ValueError, stc.plot_surface, src=vol)
def test_vec_stc():
"""Test vector source estimate."""
nn = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[np.sqrt(1 / 3.)] * 3
])
src = [dict(nn=nn[:2]), dict(nn=nn[2:])]
verts = [np.array([0, 1]), np.array([0, 1])]
data = np.array([
[1, 0, 0],
[0, 2, 0],
[3, 0, 0],
[1, 1, 1],
])[:, :, np.newaxis]
stc = VectorSourceEstimate(data, verts, 0, 1, 'foo')
# Magnitude of the vectors
assert_array_equal(stc.magnitude().data[:, 0], [1, 2, 3, np.sqrt(3)])
# Vector components projected onto the vertex normals
normal = stc.normal(src)
assert_array_equal(normal.data[:, 0], [1, 2, 0,
|
np.sqrt(3)
|
numpy.sqrt
|
# -*- coding: utf-8 -*-
# Copyright 2018 the HERA Project
# Licensed under the MIT License
import hera_cal.redcal as om
import numpy as np
import unittest
from copy import deepcopy
from hera_cal.utils import split_pol, conj_pol
np.random.seed(0)
def build_linear_array(nants, sep=14.7):
antpos = {i: np.array([sep * i, 0, 0]) for i in range(nants)}
return antpos
def build_hex_array(hexNum, sep=14.7):
antpos, i = {}, 0
for row in range(hexNum - 1, -(hexNum), -1):
for col in range(2 * hexNum - abs(row) - 1):
xPos = ((-(2 * hexNum - abs(row)) + 2) / 2.0 + col) * sep
yPos = row * sep * 3**.5 / 2
antpos[i] = np.array([xPos, yPos, 0])
i += 1
return antpos
class TestMethods(unittest.TestCase):
def test_noise(self):
n = om.noise((1024, 1024))
self.assertEqual(n.shape, (1024, 1024))
self.assertAlmostEqual(np.var(n), 1, 2)
def test_sim_red_data(self):
antpos = build_linear_array(10)
reds = om.get_reds(antpos, pols=['XX'], pol_mode='1pol')
gains, true_vis, data = om.sim_red_data(reds)
self.assertEqual(len(gains), 10)
self.assertEqual(len(data), 45)
for bls in reds:
bl0 = bls[0]
ai, aj, pol = bl0
ans0 = data[bl0] / (gains[(ai, 'jxx')] * gains[(aj, 'jxx')].conj())
for bl in bls[1:]:
ai, aj, pol = bl
ans = data[bl] / (gains[(ai, 'jxx')] * gains[(aj, 'jxx')].conj())
np.testing.assert_almost_equal(ans0, ans, 7)
reds = om.get_reds(antpos, pols=['XX', 'YY', 'XY', 'YX'], pol_mode='4pol')
gains, true_vis, data = om.sim_red_data(reds)
self.assertEqual(len(gains), 20)
self.assertEqual(len(data), 4 * (45))
for bls in reds:
bl0 = bls[0]
ai, aj, pol = bl0
ans0xx = data[(ai, aj, 'XX',)] / (gains[(ai, 'jxx')] * gains[(aj, 'jxx')].conj())
ans0xy = data[(ai, aj, 'XY',)] / (gains[(ai, 'jxx')] * gains[(aj, 'jyy')].conj())
ans0yx = data[(ai, aj, 'YX',)] / (gains[(ai, 'jyy')] * gains[(aj, 'jxx')].conj())
ans0yy = data[(ai, aj, 'YY',)] / (gains[(ai, 'jyy')] * gains[(aj, 'jyy')].conj())
for bl in bls[1:]:
ai, aj, pol = bl
ans_xx = data[(ai, aj, 'XX',)] / (gains[(ai, 'jxx')] * gains[(aj, 'jxx')].conj())
ans_xy = data[(ai, aj, 'XY',)] / (gains[(ai, 'jxx')] * gains[(aj, 'jyy')].conj())
ans_yx = data[(ai, aj, 'YX',)] / (gains[(ai, 'jyy')] * gains[(aj, 'jxx')].conj())
ans_yy = data[(ai, aj, 'YY',)] / (gains[(ai, 'jyy')] * gains[(aj, 'jyy')].conj())
np.testing.assert_almost_equal(ans0xx, ans_xx, 7)
np.testing.assert_almost_equal(ans0xy, ans_xy, 7)
np.testing.assert_almost_equal(ans0yx, ans_yx, 7)
np.testing.assert_almost_equal(ans0yy, ans_yy, 7)
reds = om.get_reds(antpos, pols=['XX', 'YY', 'XY', 'yX'], pol_mode='4pol_minV')
gains, true_vis, data = om.sim_red_data(reds)
self.assertEqual(len(gains), 20)
self.assertEqual(len(data), 4 * (45))
for bls in reds:
bl0 = bls[0]
ai, aj, pol = bl0
ans0xx = data[(ai, aj, 'XX',)] / (gains[(ai, 'jxx')] * gains[(aj, 'jxx')].conj())
ans0xy = data[(ai, aj, 'XY',)] / (gains[(ai, 'jxx')] * gains[(aj, 'jyy')].conj())
ans0yx = data[(ai, aj, 'YX',)] / (gains[(ai, 'jyy')] * gains[(aj, 'jxx')].conj())
ans0yy = data[(ai, aj, 'YY',)] / (gains[(ai, 'jyy')] * gains[(aj, 'jyy')].conj())
np.testing.assert_almost_equal(ans0xy, ans0yx, 7)
for bl in bls[1:]:
ai, aj, pol = bl
ans_xx = data[(ai, aj, 'XX',)] / (gains[(ai, 'jxx')] * gains[(aj, 'jxx')].conj())
ans_xy = data[(ai, aj, 'XY',)] / (gains[(ai, 'jxx')] * gains[(aj, 'jyy')].conj())
ans_yx = data[(ai, aj, 'YX',)] / (gains[(ai, 'jyy')] * gains[(aj, 'jxx')].conj())
ans_yy = data[(ai, aj, 'YY',)] / (gains[(ai, 'jyy')] * gains[(aj, 'jyy')].conj())
np.testing.assert_almost_equal(ans0xx, ans_xx, 7)
np.testing.assert_almost_equal(ans0xy, ans_xy, 7)
np.testing.assert_almost_equal(ans0yx, ans_yx, 7)
np.testing.assert_almost_equal(ans0yy, ans_yy, 7)
def test_check_polLists_minV(self):
polLists = [['XY']]
self.assertFalse(om.check_polLists_minV(polLists))
polLists = [['XX', 'XY']]
self.assertFalse(om.check_polLists_minV(polLists))
polLists = [['XX', 'XY', 'YX']]
self.assertFalse(om.check_polLists_minV(polLists))
polLists = [['XY', 'YX'], ['XX'], ['YY'], ['XX'], ['YX', 'XY'], ['YY']]
self.assertTrue(om.check_polLists_minV(polLists))
def test_parse_pol_mode(self):
reds = [[(0, 1, 'XX')]]
self.assertEqual(om.parse_pol_mode(reds), '1pol')
reds = [[(0, 1, 'XX')], [(0, 1, 'YY')]]
self.assertEqual(om.parse_pol_mode(reds), '2pol')
reds = [[(0, 1, 'XX')], [(0, 1, 'XY')], [(0, 1, 'YX')], [(0, 1, 'YY')]]
self.assertEqual(om.parse_pol_mode(reds), '4pol')
reds = [[(0, 1, 'XX')], [(0, 1, 'XY'), (0, 1, 'YX')], [(0, 1, 'YY')]]
self.assertEqual(om.parse_pol_mode(reds), '4pol_minV')
reds = [[(0, 1, 'XX')], [(0, 1, 'XY'), (0, 1, 'YX')], [(0, 1, 'LR')]]
self.assertEqual(om.parse_pol_mode(reds), 'unrecognized_pol_mode')
reds = [[(0, 1, 'XX')], [(0, 1, 'XY')]]
self.assertEqual(om.parse_pol_mode(reds), 'unrecognized_pol_mode')
reds = [[(0, 1, 'XY')]]
self.assertEqual(om.parse_pol_mode(reds), 'unrecognized_pol_mode')
reds = [[(0, 1, 'XX')], [(0, 1, 'XY'), (0, 1, 'YY')], [(0, 1, 'YX')]]
self.assertEqual(om.parse_pol_mode(reds), 'unrecognized_pol_mode')
def test_get_pos_red(self):
pos = build_hex_array(3, sep=14.7)
self.assertEqual(len(om.get_pos_reds(pos)), 30)
pos = build_hex_array(7, sep=14.7)
self.assertEqual(len(om.get_pos_reds(pos)), 234)
for ant, r in pos.items():
pos[ant] += [0, 0, 1 * r[0] - .5 * r[1]]
self.assertEqual(len(om.get_pos_reds(pos)), 234)
pos = build_hex_array(7, sep=1)
self.assertLess(len(om.get_pos_reds(pos)), 234)
self.assertEqual(len(om.get_pos_reds(pos, bl_error_tol=.1)), 234)
pos = build_hex_array(7, sep=14.7)
blerror = 1.0 - 1e-12
error = blerror / 4
for key, val in pos.items():
th = np.random.choice([0, np.pi / 2, np.pi])
phi = np.random.choice([0, np.pi / 2, np.pi, 3 * np.pi / 2])
pos[key] = val + error * np.array([np.sin(th) * np.cos(phi), np.sin(th) * np.sin(phi), np.cos(th)])
self.assertEqual(len(om.get_pos_reds(pos, bl_error_tol=1.0)), 234)
self.assertGreater(len(om.get_pos_reds(pos, bl_error_tol=.99)), 234)
pos = {0: np.array([0, 0, 0]), 1: np.array([20, 0, 0]), 2: np.array([10, 0, 0])}
self.assertEqual(om.get_pos_reds(pos), [[(0, 2), (2, 1)], [(0, 1)]])
self.assertEqual(om.get_pos_reds(pos, low_hi=True), [[(0, 2), (1, 2)], [(0, 1)]])
def test_add_pol_reds(self):
reds = [[(1, 2)]]
polReds = om.add_pol_reds(reds, pols=['XX'], pol_mode='1pol')
self.assertEqual(polReds, [[(1, 2, 'XX')]])
polReds = om.add_pol_reds(reds, pols=['XX', 'YY'], pol_mode='2pol')
self.assertEqual(polReds, [[(1, 2, 'XX')], [(1, 2, 'YY')]])
polReds = om.add_pol_reds(reds, pols=['XX', 'XY', 'YX', 'YY'], pol_mode='4pol')
self.assertEqual(polReds, [[(1, 2, 'XX')], [(1, 2, 'XY')], [(1, 2, 'YX')], [(1, 2, 'YY')]])
polReds = om.add_pol_reds(reds, pols=['XX', 'XY', 'YX', 'YY'], pol_mode='4pol_minV')
self.assertEqual(polReds, [[(1, 2, 'XX')], [(1, 2, 'XY'), (1, 2, 'YX')], [(1, 2, 'YY')]])
polReds = om.add_pol_reds(reds, pols=['XX', 'YY'], pol_mode='2pol', ex_ants=[(2, 'jyy')])
self.assertEqual(polReds, [[(1, 2, 'XX')], []])
polReds = om.add_pol_reds(reds, pols=['XX', 'XY', 'YX', 'YY'], pol_mode='4pol', ex_ants=[(2, 'jyy')])
self.assertEqual(polReds, [[(1, 2, 'XX')], [], [(1, 2, 'YX')], []])
polReds = om.add_pol_reds(reds, pols=['XX', 'XY', 'YX', 'YY'], pol_mode='4pol_minV', ex_ants=[(2, 'jyy')])
self.assertEqual(polReds, [[(1, 2, 'XX')], [(1, 2, 'YX')], []])
def test_multiply_by_gains(self):
vis_in = {(1, 2, 'XX'): 1.6 + 2.3j}
gains = {(1, 'jxx'): .3 + 2.6j, (2, 'jxx'): -1.2 - 7.3j}
vis_out = om.multiply_by_gains(vis_in, gains, target_type='vis')
self.assertAlmostEqual(1.6 + 2.3j, vis_in[(1, 2, 'XX')], 10)
self.assertAlmostEqual(-28.805 - 45.97j, vis_out[(1, 2, 'XX')], 10)
gains_out = om.multiply_by_gains(gains, gains, target_type='gain')
self.assertAlmostEqual(.3 + 2.6j, gains[(1, 'jxx')], 10)
self.assertAlmostEqual(-6.67 + 1.56j, gains_out[(1, 'jxx')], 10)
def test_divide_by_gains(self):
vis_in = {(1, 2, 'XX'): 1.6 + 2.3j}
gains = {(1, 'jxx'): .3 + 2.6j, (2, 'jxx'): -1.2 - 7.3j}
vis_out = om.divide_by_gains(vis_in, gains, target_type='vis')
self.assertAlmostEqual(1.6 + 2.3j, vis_in[(1, 2, 'XX')], 10)
self.assertAlmostEqual(-0.088244747606364887 - 0.11468109538397521j, vis_out[(1, 2, 'XX')], 10)
gains_out = om.divide_by_gains(gains, gains, target_type='gain')
self.assertAlmostEqual(.3 + 2.6j, gains[(1, 'jxx')], 10)
self.assertAlmostEqual(1.0, gains_out[(1, 'jxx')], 10)
class TestRedundantCalibrator(unittest.TestCase):
def test_build_eq(self):
antpos = build_linear_array(3)
reds = om.get_reds(antpos, pols=['XX'], pol_mode='1pol')
gains, true_vis, data = om.sim_red_data(reds)
info = om.RedundantCalibrator(reds)
eqs = info.build_eqs(data.keys())
self.assertEqual(len(eqs), 3)
self.assertEqual(eqs['g_0_jxx * g_1_jxx_ * u_0_XX'], (0, 1, 'XX'))
self.assertEqual(eqs['g_1_jxx * g_2_jxx_ * u_0_XX'], (1, 2, 'XX'))
self.assertEqual(eqs['g_0_jxx * g_2_jxx_ * u_1_XX'], (0, 2, 'XX'))
reds = om.get_reds(antpos, pols=['XX', 'YY', 'XY', 'YX'], pol_mode='4pol')
gains, true_vis, data = om.sim_red_data(reds)
info = om.RedundantCalibrator(reds)
eqs = info.build_eqs(data.keys())
self.assertEqual(len(eqs), 3 * 4)
self.assertEqual(eqs['g_0_jxx * g_1_jyy_ * u_4_XY'], (0, 1, 'XY'))
self.assertEqual(eqs['g_1_jxx * g_2_jyy_ * u_4_XY'], (1, 2, 'XY'))
self.assertEqual(eqs['g_0_jxx * g_2_jyy_ * u_5_XY'], (0, 2, 'XY'))
self.assertEqual(eqs['g_0_jyy * g_1_jxx_ * u_6_YX'], (0, 1, 'YX'))
self.assertEqual(eqs['g_1_jyy * g_2_jxx_ * u_6_YX'], (1, 2, 'YX'))
self.assertEqual(eqs['g_0_jyy * g_2_jxx_ * u_7_YX'], (0, 2, 'YX'))
reds = om.get_reds(antpos, pols=['XX', 'YY', 'XY', 'YX'], pol_mode='4pol_minV')
gains, true_vis, data = om.sim_red_data(reds)
info = om.RedundantCalibrator(reds)
eqs = info.build_eqs(data.keys())
self.assertEqual(len(eqs), 3 * 4)
self.assertEqual(eqs['g_0_jxx * g_1_jyy_ * u_4_XY'], (0, 1, 'XY'))
self.assertEqual(eqs['g_1_jxx * g_2_jyy_ * u_4_XY'], (1, 2, 'XY'))
self.assertEqual(eqs['g_0_jxx * g_2_jyy_ * u_5_XY'], (0, 2, 'XY'))
self.assertEqual(eqs['g_0_jyy * g_1_jxx_ * u_4_XY'], (0, 1, 'YX'))
self.assertEqual(eqs['g_1_jyy * g_2_jxx_ * u_4_XY'], (1, 2, 'YX'))
self.assertEqual(eqs['g_0_jyy * g_2_jxx_ * u_5_XY'], (0, 2, 'YX'))
def test_solver(self):
antpos = build_linear_array(3)
reds = om.get_reds(antpos, pols=['XX'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = om.sim_red_data(reds)
w = {}
w = dict([(k, 1.) for k in d.keys()])
def solver(data, wgts, sparse, **kwargs):
np.testing.assert_equal(data['g_0_jxx * g_1_jxx_ * u_0_XX'], d[0, 1, 'XX'])
np.testing.assert_equal(data['g_1_jxx * g_2_jxx_ * u_0_XX'], d[1, 2, 'XX'])
np.testing.assert_equal(data['g_0_jxx * g_2_jxx_ * u_1_XX'], d[0, 2, 'XX'])
if len(wgts) == 0:
return
np.testing.assert_equal(wgts['g_0_jxx * g_1_jxx_ * u_0_XX'], w[0, 1, 'XX'])
np.testing.assert_equal(wgts['g_1_jxx * g_2_jxx_ * u_0_XX'], w[1, 2, 'XX'])
np.testing.assert_equal(wgts['g_0_jxx * g_2_jxx_ * u_1_XX'], w[0, 2, 'XX'])
return
info._solver(solver, d)
info._solver(solver, d, w)
def test_logcal(self):
NANTS = 18
antpos = build_linear_array(NANTS)
reds = om.get_reds(antpos, pols=['XX'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = om.sim_red_data(reds, gain_scatter=.05)
w = dict([(k, 1.) for k in d.keys()])
sol = info.logcal(d)
for i in xrange(NANTS):
self.assertEqual(sol[(i, 'jxx')].shape, (10, 10))
for bls in reds:
ubl = sol[bls[0]]
self.assertEqual(ubl.shape, (10, 10))
for bl in bls:
d_bl = d[bl]
mdl = sol[(bl[0], 'jxx')] * sol[(bl[1], 'jxx')].conj() * ubl
np.testing.assert_almost_equal(np.abs(d_bl), np.abs(mdl), 10)
np.testing.assert_almost_equal(np.angle(d_bl * mdl.conj()), 0, 10)
def test_lincal(self):
NANTS = 18
antpos = build_linear_array(NANTS)
reds = om.get_reds(antpos, pols=['XX'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = om.sim_red_data(reds, gain_scatter=.0099999)
w = dict([(k, 1.) for k in d.keys()])
sol0 = dict([(k, np.ones_like(v)) for k, v in gains.items()])
sol0.update(info.compute_ubls(d, sol0))
#sol0 = info.logcal(d)
#for k in sol0: sol0[k] += .01*capo.oqe.noise(sol0[k].shape)
meta, sol = info.lincal(d, sol0)
for i in xrange(NANTS):
self.assertEqual(sol[(i, 'jxx')].shape, (10, 10))
for bls in reds:
ubl = sol[bls[0]]
self.assertEqual(ubl.shape, (10, 10))
for bl in bls:
d_bl = d[bl]
mdl = sol[(bl[0], 'jxx')] * sol[(bl[1], 'jxx')].conj() * ubl
np.testing.assert_almost_equal(np.abs(d_bl), np.abs(mdl), 10)
np.testing.assert_almost_equal(np.angle(d_bl * mdl.conj()), 0, 10)
def test_lincal_hex_end_to_end_1pol_with_remove_degen_and_firstcal(self):
antpos = build_hex_array(3)
reds = om.get_reds(antpos, pols=['XX'], pol_mode='1pol')
rc = om.RedundantCalibrator(reds)
freqs = np.linspace(.1, .2, 10)
gains, true_vis, d = om.sim_red_data(reds, gain_scatter=.1, shape=(1, len(freqs)))
fc_delays = {ant: 100 * np.random.randn() for ant in gains.keys()} # in ns
fc_gains = {ant: np.reshape(np.exp(-2.0j * np.pi * freqs * delay), (1, len(freqs))) for ant, delay in fc_delays.items()}
for ant1, ant2, pol in d.keys():
d[(ant1, ant2, pol)] *= fc_gains[(ant1, split_pol(pol)[0])] * np.conj(fc_gains[(ant2, split_pol(pol)[1])])
for ant in gains.keys():
gains[ant] *= fc_gains[ant]
w = dict([(k, 1.) for k in d.keys()])
sol0 = rc.logcal(d, sol0=fc_gains, wgts=w)
meta, sol = rc.lincal(d, sol0, wgts=w)
np.testing.assert_array_less(meta['iter'], 50 * np.ones_like(meta['iter']))
np.testing.assert_almost_equal(meta['chisq'], np.zeros_like(meta['chisq']), decimal=10)
np.testing.assert_almost_equal(meta['chisq'], 0, 10)
for i in xrange(len(antpos)):
self.assertEqual(sol[(i, 'jxx')].shape, (1, len(freqs)))
for bls in reds:
ubl = sol[bls[0]]
self.assertEqual(ubl.shape, (1, len(freqs)))
for bl in bls:
d_bl = d[bl]
mdl = sol[(bl[0], 'jxx')] * sol[(bl[1], 'jxx')].conj() * ubl
np.testing.assert_almost_equal(np.abs(d_bl), np.abs(mdl), 10)
np.testing.assert_almost_equal(np.angle(d_bl * mdl.conj()), 0, 10)
sol_rd = rc.remove_degen(antpos, sol)
g, v = om.get_gains_and_vis_from_sol(sol_rd)
ants = [key for key in sol_rd.keys() if len(key) == 2]
gainSols = np.array([sol_rd[ant] for ant in ants])
meanSqAmplitude = np.mean([np.abs(g[key1] * g[key2]) for key1 in g.keys()
for key2 in g.keys() if key1[1] == 'jxx' and key2[1] == 'jxx' and key1[0] != key2[0]], axis=0)
np.testing.assert_almost_equal(meanSqAmplitude, 1, 10)
#np.testing.assert_almost_equal(np.mean(np.angle(gainSols), axis=0), 0, 10)
for bls in reds:
ubl = sol_rd[bls[0]]
self.assertEqual(ubl.shape, (1, len(freqs)))
for bl in bls:
d_bl = d[bl]
mdl = sol_rd[(bl[0], 'jxx')] * sol_rd[(bl[1], 'jxx')].conj() * ubl
np.testing.assert_almost_equal(np.abs(d_bl), np.abs(mdl), 10)
np.testing.assert_almost_equal(np.angle(d_bl * mdl.conj()), 0, 10)
sol_rd = rc.remove_degen(antpos, sol, degen_sol=gains)
g, v = om.get_gains_and_vis_from_sol(sol_rd)
meanSqAmplitude = np.mean([np.abs(g[key1] * g[key2]) for key1 in g.keys()
for key2 in g.keys() if key1[1] == 'jxx' and key2[1] == 'jxx' and key1[0] != key2[0]], axis=0)
degenMeanSqAmplitude = np.mean([np.abs(gains[key1] * gains[key2]) for key1 in g.keys()
for key2 in g.keys() if key1[1] == 'jxx' and key2[1] == 'jxx' and key1[0] != key2[0]], axis=0)
np.testing.assert_almost_equal(meanSqAmplitude, degenMeanSqAmplitude, 10)
#np.testing.assert_almost_equal(np.mean(np.angle(gainSols), axis=0), 0, 10)
for key, val in sol_rd.items():
if len(key) == 2:
np.testing.assert_almost_equal(val, gains[key], 10)
if len(key) == 3:
np.testing.assert_almost_equal(val, true_vis[key], 10)
rc.pol_mode = 'unrecognized_pol_mode'
with self.assertRaises(ValueError):
sol_rd = rc.remove_degen(antpos, sol)
def test_lincal_hex_end_to_end_4pol_with_remove_degen_and_firstcal(self):
antpos = build_hex_array(3)
reds = om.get_reds(antpos, pols=['XX', 'XY', 'YX', 'YY'], pol_mode='4pol')
rc = om.RedundantCalibrator(reds)
freqs = np.linspace(.1, .2, 10)
gains, true_vis, d = om.sim_red_data(reds, gain_scatter=.09, shape=(1, len(freqs)))
fc_delays = {ant: 100 * np.random.randn() for ant in gains.keys()} # in ns
fc_gains = {ant: np.reshape(np.exp(-2.0j * np.pi * freqs * delay), (1, len(freqs))) for ant, delay in fc_delays.items()}
for ant1, ant2, pol in d.keys():
d[(ant1, ant2, pol)] *= fc_gains[(ant1, split_pol(pol)[0])] * np.conj(fc_gains[(ant2, split_pol(pol)[1])])
for ant in gains.keys():
gains[ant] *= fc_gains[ant]
w = dict([(k, 1.) for k in d.keys()])
sol0 = rc.logcal(d, sol0=fc_gains, wgts=w)
meta, sol = rc.lincal(d, sol0, wgts=w)
np.testing.assert_array_less(meta['iter'], 50 * np.ones_like(meta['iter']))
np.testing.assert_almost_equal(meta['chisq'], np.zeros_like(meta['chisq']), decimal=10)
np.testing.assert_almost_equal(meta['chisq'], 0, 10)
for i in xrange(len(antpos)):
self.assertEqual(sol[(i, 'jxx')].shape, (1, len(freqs)))
self.assertEqual(sol[(i, 'jyy')].shape, (1, len(freqs)))
for bls in reds:
for bl in bls:
ubl = sol[bls[0]]
self.assertEqual(ubl.shape, (1, len(freqs)))
d_bl = d[bl]
mdl = sol[(bl[0], split_pol(bl[2])[0])] * sol[(bl[1], split_pol(bl[2])[1])].conj() * ubl
np.testing.assert_almost_equal(np.abs(d_bl), np.abs(mdl), 10)
np.testing.assert_almost_equal(np.angle(d_bl * mdl.conj()), 0, 10)
sol_rd = rc.remove_degen(antpos, sol)
ants = [key for key in sol_rd.keys() if len(key) == 2]
gainPols = np.array([ant[1] for ant in ants])
bl_pairs = [key for key in sol.keys() if len(key) == 3]
visPols = np.array([[bl[2][0], bl[2][1]] for bl in bl_pairs])
bl_vecs = np.array([antpos[bl_pair[0]] - antpos[bl_pair[1]] for bl_pair in bl_pairs])
gainSols = np.array([sol_rd[ant] for ant in ants])
g, v = om.get_gains_and_vis_from_sol(sol_rd)
meanSqAmplitude = np.mean([np.abs(g[key1] * g[key2]) for key1 in g.keys()
for key2 in g.keys() if key1[1] == 'jxx' and key2[1] == 'jxx' and key1[0] != key2[0]], axis=0)
np.testing.assert_almost_equal(meanSqAmplitude, 1, 10)
meanSqAmplitude = np.mean([np.abs(g[key1] * g[key2]) for key1 in g.keys()
for key2 in g.keys() if key1[1] == 'jyy' and key2[1] == 'jyy' and key1[0] != key2[0]], axis=0)
np.testing.assert_almost_equal(meanSqAmplitude, 1, 10)
#np.testing.assert_almost_equal(np.mean(np.angle(gainSols[gainPols=='jxx']), axis=0), 0, 10)
#np.testing.assert_almost_equal(np.mean(np.angle(gainSols[gainPols=='jyy']), axis=0), 0, 10)
for bls in reds:
for bl in bls:
ubl = sol_rd[bls[0]]
self.assertEqual(ubl.shape, (1, len(freqs)))
d_bl = d[bl]
mdl = sol_rd[(bl[0], split_pol(bl[2])[0])] * sol_rd[(bl[1], split_pol(bl[2])[1])].conj() * ubl
np.testing.assert_almost_equal(np.abs(d_bl), np.abs(mdl), 10)
np.testing.assert_almost_equal(np.angle(d_bl * mdl.conj()), 0, 10)
sol_rd = rc.remove_degen(antpos, sol, degen_sol=gains)
g, v = om.get_gains_and_vis_from_sol(sol_rd)
meanSqAmplitude = np.mean([np.abs(g[key1] * g[key2]) for key1 in g.keys()
for key2 in g.keys() if key1[1] == 'jxx' and key2[1] == 'jxx' and key1[0] != key2[0]], axis=0)
degenMeanSqAmplitude = np.mean([np.abs(gains[key1] * gains[key2]) for key1 in g.keys()
for key2 in g.keys() if key1[1] == 'jxx' and key2[1] == 'jxx' and key1[0] != key2[0]], axis=0)
|
np.testing.assert_almost_equal(meanSqAmplitude, degenMeanSqAmplitude, 10)
|
numpy.testing.assert_almost_equal
|
"""
This is used for rllib training purpose.
"""
from dispatch.location.models import Location
from dispatch.plugins.kandbox_planner.planner_engine.naive_manual_planner_shared_jobs_in_slots import (
NaivePlannerJobsInSlots,
)
from dispatch.plugins.kandbox_planner.util.kandbox_date_util import extract_minutes_from_datetime
from dispatch.plugins.bases.kandbox_planner import (
KandboxEnvPlugin,
KandboxPlannerPluginType,
KandboxEnvProxyPlugin,
)
from dispatch.plugins.kandbox_planner.rule.lunch_break import KandboxRulePluginLunchBreak
from dispatch.plugins.kandbox_planner.rule.travel_time import KandboxRulePluginSufficientTravelTime
from dispatch.plugins.kandbox_planner.rule.working_hour import KandboxRulePluginWithinWorkingHour
from dispatch.plugins.kandbox_planner.rule.requested_skills import KandboxRulePluginRequestedSkills
from dispatch.plugins.kandbox_planner.travel_time_plugin import HaversineTravelTime
# from dispatch.plugins.kandbox_planner.routing.travel_time_routingpy_redis import RoutingPyRedisTravelTime
from dispatch.config import (
REDIS_HOST,
REDIS_PORT,
REDIS_PASSWORD,
NBR_OF_OBSERVED_WORKERS,
MINUTES_PER_DAY,
MAX_NBR_OF_JOBS_PER_DAY_WORKER,
SCORING_FACTOR_STANDARD_TRAVEL_MINUTES,
DATA_START_DAY,
MIN_START_MINUTES_FROM_NOW,
TESTING_MODE,
)
from dispatch import config
from dispatch.service.planner_models import SingleJobDropCheckOutput
from dispatch.plugins.kandbox_planner.env.env_enums import (
EnvRunModeType,
JobPlanningStatus,
)
from dispatch.plugins.kandbox_planner.env.env_enums import *
from dispatch.plugins.kandbox_planner.env.env_models import (
WorkingTimeSlot,
LocationTuple,
JobLocation,
Worker,
Job,
BaseJob,
Appointment,
ActionDict,
Absence,
SingleJobCommitInternalOutput,
ActionEvaluationScore,
JobLocationBase
)
import dataclasses
import dispatch.config as kandbox_config
import dispatch.plugins.kandbox_planner.util.kandbox_date_util as date_util
# from dispatch.plugins.kandbox_planner.env.recommendation_server import RecommendationServer
from dispatch.plugins.kandbox_planner.data_adapter.kafka_adapter import KafkaAdapter
from dispatch.plugins.kandbox_planner.data_adapter.kplanner_db_adapter import KPlannerDBAdapter
from dispatch.plugins.kandbox_planner.env.cache_only_slot_server import CacheOnlySlotServer
from dispatch.plugins.kandbox_planner.env.working_time_slot import (
WorkingTimeSlotServer,
MissingSlotException,
)
from dispatch.config import PLANNER_SERVER_ROLE, MAX_MINUTES_PER_TECH
import socket
import logging
from redis.exceptions import LockError
import redis
import pandas as pd
import pprint
import math
import random
import numpy as np
import sys
from itertools import combinations
from typing import List
import copy
import time
from datetime import datetime, timedelta
import json
from gym import spaces
from ray.rllib.utils.spaces.repeated import Repeated
# This version works on top of json input and produce json out
# Observation: each worker has multiple working_time=days, then divided by slots, each slot with start and end time,
# import holidays
hostname = socket.gethostname()
log = logging.getLogger("rllib_env_job2slot")
# log.setLevel(logging.ERROR)
# log.setLevel(logging.WARN)
# log.setLevel(logging.DEBUG)
RULE_PLUGIN_DICT = {
"kandbox_rule_within_working_hour": KandboxRulePluginWithinWorkingHour,
"kandbox_rule_sufficient_travel_time": KandboxRulePluginSufficientTravelTime,
"kandbox_rule_requested_skills": KandboxRulePluginRequestedSkills,
"kandbox_rule_lunch_break": KandboxRulePluginLunchBreak,
#
}
def min_max_normalize(x, input_min, input_max):
y = (x - input_min) / (input_max - input_min)
return y
class KPlannerJob2SlotEnv(KandboxEnvPlugin):
"""
Action should be compatible with GYM, then it should be either categorical or numerical. ActionDict can be converted..
New design of actions are:
[
vector of worker1_prob, worker2_prob (for N workers),
start_day_i, job_start_minutes, shared_worker_count (shared = 1..M)
]
# Benefit of using start_minutes is that I can use start minutes to find insert job i
# but it is bigger value space for algorithm to decide.
"""
title = "Kandbox Environment Job2Slot"
slug = "kprl_env_job2slot"
author = "Kandbox"
author_url = "https://github.com/qiyangduan"
description = "Env for GYM for RL."
version = "0.1.0"
metadata = {"render.modes": ["human"]}
NBR_FEATURE_PER_SLOT = 24
NBR_FEATURE_PER_UNPLANNED_JOB = 12
NBR_FEATURE_OVERVIEW = 8
def __del__(self):
try:
del self.kp_data_adapter
except:
log.warn("rl_env: Error when releasing kp_data_adapter.")
def __init__(self, config=None):
#
# each worker and job is a dataclass object, internally transformed
#
env_config = config
kp_data_adapter = None
reset_cache = False
self.workers_dict = {} # Dictionary of all workers, indexed by worker_code
self.workers = [] # List of Workers, pointing to same object in self.workers_dict
self.jobs_dict = {} # Dictionary of of all jobs, indexed by job_code
self.jobs = [] # List of Jobs
self.locations_dict = {} # Dictionary of JobLocation, indexed by location_code
self.changed_job_codes_set = set()
self.trial_count = 0 # used only for GYM game online training process
self.trial_step_count = 0
self.run_mode = EnvRunModeType.PREDICT
self.unplanned_job_code_list = []
self.job_generation_count = 0
self.all_locations = []
self.current_job_code = None
self.current_job_i = 0
self.total_assigned_job_duration = 0
self.nbr_inplanning = 0
self.expected_travel_time_so_far = 0
self.total_travel_time = 0
self.total_travel_worker_job_count = 0
self.kafka_input_window_offset = 0
self.kafka_slot_changes_offset = 0
self.unplanned_job_codes = []
# self.config["daily_overtime_minutes"] = []
self.national_holidays = [] # TODO, change it to set() @duan
self.weekly_working_days_flag = []
# self.daily_weekday_sequence = [] # Replaced by self.env_encode_day_seq_to_weekday(day_seq)
# 2020-11-09 04:41:16 Duan, changed from [] to {}, since this will keep growing and should be purged regularly
self.daily_working_flag = {}
self.internal_obs_slot_list = []
self.config = {
# "data_start_day": "20200501",
# "nbr_of_days_planning_window": 2,
"planner_code": "rl_job2slot",
"allow_overtime": False,
"nbr_observed_slots": NBR_OF_OBSERVED_WORKERS,
"minutes_per_day": MINUTES_PER_DAY,
"max_nbr_of_jobs_per_day_worker": MAX_NBR_OF_JOBS_PER_DAY_WORKER,
# in minutes, 100 - travel / 1000 as the score
"scoring_factor_standard_travel_minutes": SCORING_FACTOR_STANDARD_TRAVEL_MINUTES,
# Team.flex_form_data will be copied here in the env top level.
# The env does NOT have flex_form_data anymore ... 2021-07-05 12:18:45
# "flex_form_data": {
"holiday_days": "20210325",
"weekly_rest_day": "0",
"travel_speed_km_hour": 40,
"travel_min_minutes": 10,
"planning_working_days": 1,
# },
}
if env_config is None:
log.error("error, no env_config is provided!")
raise ValueError("No env_config is provided!")
if "rules" not in env_config.keys():
rule_set = []
for rule_slug_config in env_config["rules_slug_config_list"]:
rule_plugin = RULE_PLUGIN_DICT[rule_slug_config[0]](config=rule_slug_config[1])
rule_set.append(rule_plugin)
self.rule_set = rule_set
else:
self.rule_set = env_config["rules"]
env_config.pop("rules", None)
for x in env_config.keys():
self.config[x] = env_config[x]
# TODO, 2021-06-14 07:47:06
self.config["nbr_of_days_planning_window"] = int(self.config["nbr_of_days_planning_window"])
self.PLANNING_WINDOW_LENGTH = (
self.config["minutes_per_day"] * self.config["nbr_of_days_planning_window"]
)
evaluate_RequestedSkills = KandboxRulePluginRequestedSkills()
self.rule_set_worker_check = [evaluate_RequestedSkills] # , evaluate_RetainTech
self.data_start_datetime = datetime.strptime(
DATA_START_DAY, kandbox_config.KANDBOX_DATE_FORMAT
)
if kp_data_adapter is None:
log.info("env is creating db_adapter by itslef, not injected.")
kp_data_adapter = KPlannerDBAdapter(team_id=self.config["team_id"])
else:
log.error("INTERNAL:kp_data_adapter is not None for env.__init__")
self.kp_data_adapter = kp_data_adapter
if REDIS_PASSWORD == "":
self.redis_conn = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=None)
else:
self.redis_conn = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD)
# This team_env_key uniquely identify this Env by starting date and team key. Multiple instances on different servers share same key.
self.team_env_key = "env_{}_{}".format(self.config["org_code"], self.config["team_id"])
# env_inst_seq is numeric and unique inside the team_env_key, uniquely identify one instance on a shared Env
self.env_inst_seq = self.redis_conn.incr(self.get_env_inst_counter_redis_key())
# env_inst_code is a code for an env Instance, which **globally**, uniquely identify one instance on a shared Env
self.env_inst_code = "{}_{}_{}".format(self.team_env_key, self.env_inst_seq, hostname)
self.parse_team_flex_form_config()
SharingEfficiencyLogic = "1_1.0;2_1.6;3_2.1"
self._reset_horizon_start_minutes()
self.efficiency_dict = {}
for day_eff in SharingEfficiencyLogic.split(";"):
self.efficiency_dict[int(day_eff.split("_")[0])] = float(day_eff.split("_")[1])
self.kp_data_adapter.reload_data_from_db()
if PLANNER_SERVER_ROLE == "trainer":
self.kafka_server = KafkaAdapter(
env=None, role=KafkaRoleType.FAKE, team_env_key=self.team_env_key
)
self.slot_server = CacheOnlySlotServer(env=self, redis_conn=self.redis_conn)
self.kafka_input_window_offset = 0
else:
self.kafka_server = KafkaAdapter(env=self, role=KafkaRoleType.ENV_ADAPTER)
self.slot_server = WorkingTimeSlotServer(env=self, redis_conn=self.redis_conn)
self.kafka_input_window_offset = (
self.kp_data_adapter.get_team_env_window_latest_offset()
)
self.config.update(self.kp_data_adapter.get_team_flex_form_data())
# self.recommendation_server = RecommendationServer(env=self, redis_conn=self.redis_conn)
if "data_end_day" in self.config.keys():
log.error("data_end_day is not supported from config!")
# I don't know why, self.kp_data_adapter loses this workers_dict_by_id after next call.
# 2021-05-06 21:34:01
self.workers_dict_by_id = copy.deepcopy(self.kp_data_adapter.workers_dict_by_id)
# self._reset_data()
self.replay_env()
log.info("replay_env is done, for training purpose")
# To be compatible with GYM Environment
self._set_spaces()
log.info("__init__ done, reloaded data from db, env is ready, please call reset to start")
# Temp fix
self.naive_opti_slot = NaivePlannerJobsInSlots(env=self)
self.env_bootup_datetime = datetime.now()
def reset(self, shuffle_jobs=False):
# print("env reset")
self._reset_data()
# self._reset_gym_appt_data()
self.slot_server.reset()
self.trial_count += 1
self.trial_step_count = 0
self.inplanning_job_count = sum(
[1 if j.planning_status != JobPlanningStatus.UNPLANNED else 0 for j in self.jobs]
)
self.current_job_i = 0
self.total_assigned_job_duration = 0
self.nbr_inplanning = 0
self.expected_travel_time_so_far = 0
self.total_travel_time = 0
self.total_travel_worker_job_count = 0
self.current_observed_worker_list = self._get_sorted_worker_code_list(self.current_job_i)
self.current_appt_i = 0
self._move_to_next_unplanned_job()
return self._get_observation()
def _reset_gym_appt_data(self):
self.appts = list(self.kp_data_adapter.appointment_db_dict.keys())
self.appt_scores = {}
# Used for get_reward, only for gym training
self.job_travel_time_sample_list_static = [
(
self._get_travel_time_2_job_indices(ji, (ji + 1) % len(self.jobs))
+ self._get_travel_time_2_job_indices(ji, (ji + 2) % len(self.jobs))
+ self._get_travel_time_2_job_indices(ji, (ji + 3) % len(self.jobs))
)
/ 3
for ji in range(0, len(self.jobs))
]
self.total_travel_time_static = sum(
[self._get_travel_time_2_job_indices(ji, ji + 1) for ji in range(0, len(self.jobs) - 1)]
)
def normalize(self, x, data_type: str):
if data_type == "duration":
return min_max_normalize(x, 0, 300)
elif data_type == "start_minutes":
return min_max_normalize(x, 0, 1440 * self.config["nbr_of_days_planning_window"])
elif data_type == "day_minutes_1440":
return min_max_normalize(x, 0, 1440 * 1)
elif data_type == "longitude":
return min_max_normalize(
x, self.config["geo_longitude_min"], self.config["geo_longitude_max"]
)
elif data_type == "latitude":
return min_max_normalize(
x, self.config["geo_latitude_min"], self.config["geo_latitude_max"]
)
elif data_type == "max_nbr_shared_workers":
return min_max_normalize(x, 0, 4)
elif data_type == "max_job_in_slot":
return min_max_normalize(x, 0, 10)
else:
# log.error(f"Unknow data_type = {data_type}")
raise ValueError(f"Unknow data_type = {data_type}")
return x
def _reset_data(self):
"""It should be used only inside reset ()
Location (self.locations_dict) remain unchanged in this process.
"""
self.workers = self.load_transformed_workers()
self.workers_dict = {} # Dictionary of dict
self.permanent_pairs = set()
#
for ji, x in enumerate(self.workers):
self.workers_dict[x.worker_code] = x
x.worker_index = ji
for ji, x in self.workers_dict.items():
if x.belongs_to_pair is not None:
is_valid_pair = True
for paired_code in x.belongs_to_pair:
if paired_code not in self.workers_dict.keys():
log.error(
f"WORKER:{paired_code}:PAIR_TO:{x.belongs_to_pair}: the paired worker is not found."
)
is_valid_pair = False
if not is_valid_pair:
continue
self.permanent_pairs.add(x.belongs_to_pair)
for paired_code in x.belongs_to_pair:
self.workers_dict[paired_code].belongs_to_pair = x.belongs_to_pair
# This function also alters self.locations_dict
self.jobs = self.load_transformed_jobs()
if len(self.jobs) < 1:
log.debug("No Jobs on initialized Env.")
# This encoding includes appointments for now
# TODO, sperate appointment out? 2020-11-06 14:28:59
self.jobs_dict = {} # Dictionary of dict
for ji, job in enumerate(self.jobs):
if job.job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug(f"_reset_data: {job.job_code}")
job.requested_duration_minutes = int(job.requested_duration_minutes)
# This reset status and is for training.
# job.planning_status = "U"
self.jobs_dict[job.job_code] = job
job.job_index = ji
log.debug("env _reset_data finished")
# This is a key to map job_code to its sorted set of all recommendations (RecommendedAction)
def get_recommendation_job_key(self, job_code: str, action_day: int) -> str:
if action_day < 0:
return "{}/rec/{}".format(self.team_env_key, job_code)
else:
return "{}/rec/{}/{}".format(self.team_env_key, job_code, action_day)
# This is a key to map a slot to all recommendations (RecommendedAction) which are using this slot
def get_slot_2_recommendation_job_key(self, slot_code: str) -> str:
return "{}/slot_rec/{}".format(self.team_env_key, slot_code)
# This is a key to store latest offset on kafka ENV_WINDOW, which is already replayed.
# This is different from database team.latest_env_kafka_offset since that's indicating latest offset in PG/Mysql DB.
def get_env_window_replay_till_offset_key(self) -> str:
return "{}/env/env_window_replay_till_offset".format(self.team_env_key)
def get_env_out_kafka_offset_key(self) -> str:
return "{}/env/env_out_till_offset".format(self.team_env_key)
def get_env_window_replay_till_offset(self) -> int:
return int(self.redis_conn.get(self.get_env_window_replay_till_offset_key()))
def set_env_window_replay_till_offset(self, offset: int):
return self.redis_conn.set(self.get_env_window_replay_till_offset_key(), offset)
def get_env_inst_counter_redis_key(self) -> int:
return "{}/env/counter".format(self.team_env_key)
def get_env_config_redis_key(self) -> int:
return "{}/env/config".format(self.team_env_key)
def get_env_planning_day_redis_key(self) -> int:
return "{}/env/planning_days".format(self.team_env_key)
def get_env_replay_lock_redis_key(self) -> int:
return "lock_env/{}".format(self.team_env_key)
def get_recommened_locked_slots_by_job_code_redis_key(self, job_code: str) -> str:
return "{}/env_lock/by_job/{}".format(self.team_env_key, job_code)
def get_redis_key_commited_slot_lock(self, slot_code: str) -> str:
return "{}/env_lock/after_commit_slot/{}".format(self.team_env_key, slot_code)
def get_recommened_locked_slot_redis_key(self, slot_code: str) -> str:
return "{}/env_lock/slot/{}".format(self.team_env_key, slot_code)
def get_env_planning_horizon_start_minutes(self) -> int:
# worker_id, start_minutes, end_minutes, slot_type, worker_id,start_minutes, end_minutes, slot_type
if self.horizon_start_minutes is not None:
return self.horizon_start_minutes
return int((datetime.now() - self.data_start_datetime).total_seconds() / 60)
def get_env_planning_horizon_end_minutes(self) -> int:
# worker_id, start_minutes, end_minutes, slot_type, worker_id,start_minutes, end_minutes, slot_type
return 1440 * (
int(self.config["nbr_of_days_planning_window"])
+ int(self.get_env_planning_horizon_start_minutes() / 1440)
)
def get_start_gps_for_worker_day(self, w: Worker, day_seq: int) -> LocationTuple:
return w.weekly_start_gps[self.env_encode_day_seq_to_weekday(day_seq)]
def get_end_gps_for_worker_day(self, w: Worker, day_seq: int) -> LocationTuple:
return w.weekly_end_gps[self.env_encode_day_seq_to_weekday(day_seq)]
def get_worker_available_overtime_minutes(self, worker_code: str, day_seq: int) -> int:
worker = self.workers_dict[worker_code]
available_overtime_list = []
for limit_days_key in worker.overtime_limits.keys():
if day_seq in limit_days_key:
total_overtime = sum([worker.used_overtime_minutes[dsq] for dsq in limit_days_key])
available_overtime_list.append(
worker.overtime_limits[limit_days_key] - total_overtime
)
if len(available_overtime_list) < 1:
return 0
available_overtime = min(available_overtime_list)
return available_overtime
def get_worker_floating_slots(self, worker_code: str, query_start_minutes: int) -> List:
overlap_slots = self.slot_server.get_overlapped_slots(
worker_id=worker_code,
start_minutes=0,
end_minutes=MAX_MINUTES_PER_TECH,
)
floating_slots = []
for a_slot in overlap_slots: #
if self.slot_server.get_time_slot_key(a_slot) in kandbox_config.DEBUGGING_SLOT_CODE_SET:
log.debug("debug atomic_slot_delete_and_add_back")
if (a_slot.slot_type == TimeSlotType.FLOATING) or (
a_slot.start_overtime_minutes + a_slot.end_overtime_minutes > 0
):
if a_slot.start_minutes - query_start_minutes < 0:
continue
# if worker_code == "MY|D|3|CT29":
# print("pause")
floating_slots.append(
[
a_slot.start_minutes - query_start_minutes,
a_slot.end_minutes - query_start_minutes,
a_slot.start_overtime_minutes,
a_slot.end_overtime_minutes,
]
)
return floating_slots
def env_encode_day_seq_to_weekday(self, day_seq: int) -> int:
today_start_date = self.data_start_datetime + timedelta(days=day_seq)
return (today_start_date.weekday() + 1) % 7
def env_encode_from_datetime_to_minutes(self, input_datetime: datetime) -> int:
# worker_id, start_minutes, end_minutes, slot_type, worker_id,start_minutes, end_minutes, slot_type
assigned_start_minutes = int(
(input_datetime - self.data_start_datetime).total_seconds() / 60
)
return assigned_start_minutes
def env_encode_from_datetime_to_day_with_validation(self, input_datetime: datetime) -> int:
# worker_id, start_minutes, end_minutes, slot_type, worker_id,start_minutes, end_minutes, slot_type
the_minutes = self.env_encode_from_datetime_to_minutes(input_datetime)
if (the_minutes < self.get_env_planning_horizon_start_minutes()) or (
the_minutes >= self.get_env_planning_horizon_end_minutes()
): # I removed 10 seconds from end of window.
raise ValueError("Out of Planning Window")
return int(the_minutes / self.config["minutes_per_day"])
def env_decode_from_minutes_to_datetime(self, input_minutes: int) -> datetime:
# worker_id, start_minutes, end_minutes, slot_type, worker_id,start_minutes, end_minutes, slot_type
assigned_start_datetime = self.data_start_datetime + timedelta(minutes=input_minutes)
return assigned_start_datetime
def get_encode_shared_duration_by_planning_efficiency_factor(
self, requested_duration_minutes: int, nbr_workers: int
) -> int:
factor = self.efficiency_dict[nbr_workers]
return int(requested_duration_minutes * factor / nbr_workers)
def parse_team_flex_form_config(self):
# TODO team.country_code
# country_code = "CN" team.country_code
# self.national_holidays = holidays.UnitedStates()
# self.national_holidays = holidays.CountryHoliday('US')
if self.config["holiday_days"] is not None:
self.national_holidays = self.config["holiday_days"].split(";")
else:
self.national_holidays = []
if self.config["weekly_rest_day"] is not None:
__split = str(self.config["weekly_rest_day"]).split(";")
else:
__split = []
self.weekly_working_days_flag = [True for _ in range(7)]
for day_s in __split:
self.weekly_working_days_flag[int(day_s)] = False
# 1. Travel time formula is defined as GPS straight line distance *1.5/ (40 KM/Hour), minimum 10 minutes. Those numbers like 1.5, 40, 10 minutes,
# self.config["travel_speed_km_hour"] = self.config["flex_form_data"]["travel_speed_km_hour"]
# self.config["travel_min_minutes"] = self.config["flex_form_data"]["travel_min_minutes"]
self.travel_router = HaversineTravelTime(
travel_speed=self.config["travel_speed_km_hour"],
min_minutes=self.config["travel_min_minutes"],
)
# self.travel_router = RoutingPyRedisTravelTime(
# travel_speed=self.config["travel_speed_km_hour"],
# min_minutes=self.config["travel_min_minutes"],
# redis_conn=self.redis_conn, travel_mode="car"
# )
def reload_env_from_redis(self):
# observation = self.reset(shuffle_jobs=False)
self.mutate_refresh_planning_window_from_redis()
self._reset_data()
if len(self.workers) < 1:
log.warn(
"No workers in env, skipped loading slots. Maybe this is the first initialization?"
)
return
for absence_code in self.kp_data_adapter.absence_db_dict:
# This one loads all data first. The env will decide to replay it or not depending on cache_server(redis)
job = self.env_encode_single_absence(self.kp_data_adapter.absence_db_dict[absence_code])
absence_job = self.mutate_create_worker_absence(job, auto_replay=False)
for job_code in self.kp_data_adapter.appointment_db_dict:
# This one loads all data first. The env will decide to replay it or not depending on cache_server(redis)
if job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug(f"reload_env_from_redis: {job_code}")
appt = self.env_encode_single_appointment(
self.kp_data_adapter.appointment_db_dict[job_code]
)
job = self.mutate_create_appointment(appt, auto_replay=False)
self.slot_server.reload_from_redis_server()
def mutate_replay_jobs_single_working_day(self, day_seq: int):
for job_code in self.jobs_dict.keys():
this_job = self.jobs_dict[job_code]
if this_job.planning_status == JobPlanningStatus.UNPLANNED:
continue
if (this_job.scheduled_start_minutes < day_seq * 1440) or (
this_job.scheduled_start_minutes > (day_seq + 1) * 1440
):
continue
if this_job.job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug("mutate_replay_jobs_single_working_day: DEBUGGING_JOB_CODE_SET ")
action_dict = self.gen_action_dict_from_job(job=this_job, is_forced_action=True)
info = self.mutate_update_job_by_action_dict(
a_dict=action_dict, post_changes_flag=False
)
if info.status_code != ActionScoringResultType.OK:
log.error(
f"Error in job replay , but it will continue. code= {job_code}, info: {info} "
)
def replay_env_to_redis(self):
# self.slot_server.reset()
for key in self.redis_conn.scan_iter(f"{self.team_env_key}/s/*"):
self.redis_conn.delete(key)
observation = self.reset(shuffle_jobs=False)
self.mutate_extend_planning_window()
if len(self.jobs) < 1:
log.error("No jobs in the env, len(self.jobs) < 1:")
return
# raise ValueError("No jobs in the env, len(self.jobs) < 1:")
# we assume sequence of I P U
sorted_jobs = sorted(
self.jobs, key=lambda job__i_: (job__i_.planning_status, job__i_.job_code)
)
self.jobs = sorted_jobs
for ji, job___ in enumerate(self.jobs):
self.jobs_dict[job___.job_code].job_index = ji
self.run_mode = EnvRunModeType.REPLAY
self.current_job_i = 0
# previous_observation = self._get_observation()
for step_job_code in self.jobs_dict.keys():
this_job = self.jobs_dict[step_job_code]
if this_job.job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug(
f"JOB:{this_job.job_code}:INDEX:{self.current_job_i}: replaying on pause: starting at {this_job.scheduled_start_minutes}."
)
if (
this_job.scheduled_start_minutes < self.get_env_planning_horizon_start_minutes()
) or (this_job.scheduled_start_minutes > self.get_env_planning_horizon_end_minutes()):
log.warn(
f"JOB:{this_job.job_code}:INDEX:{self.current_job_i}: is out of horizon, starting at {this_job.scheduled_start_minutes} and therefore is skipped."
)
self.current_job_i += 1
continue
if self.current_job_i >= len(self.jobs):
break
if this_job.planning_status not in [
JobPlanningStatus.IN_PLANNING,
JobPlanningStatus.PLANNED,
JobPlanningStatus.COMPLETED,
]:
log.info(
f"Replayed until first U-status, self.current_job_i = {self.current_job_i} "
)
break
# return observation, -1, False, None
# break
if this_job.scheduled_duration_minutes < 1:
log.error(
f"JOB:{this_job.job_code}:scheduled_duration_minutes = {this_job.scheduled_duration_minutes} < 1 , not allowed, skipped from replay"
)
self.current_job_i += 1
continue
# action = self.gen_action_from_one_job(self.jobs[self.current_job_i])
# action_dict = self.decode_action_into_dict(action)
# This should work for both appt and absence.
action_dict = self.gen_action_dict_from_job(job=this_job, is_forced_action=True)
info = self.mutate_update_job_by_action_dict(
a_dict=action_dict, post_changes_flag=False
)
if info.status_code != ActionScoringResultType.OK:
print(
f"Error game replay got error, but it will continue: job_code={this_job.job_code}, current_job_i: {self.current_job_i}, info: {info} "
)
# TODO
self.replay_worker_absence_to_redis()
self.replay_appointment_to_redis()
self.run_mode = EnvRunModeType.PREDICT
def replay_appointment_to_redis(self):
# local_loader.load_batch_local_appointment_TODO(env=self)
for appt_code, value in self.kp_data_adapter.appointment_db_dict.items():
appt = self.env_encode_single_appointment(value)
self.mutate_create_appointment(appt, auto_replay=True)
self.recommendation_server.search_for_recommendations(job_code=appt.job_code)
log.info(f"APPT:{appt.job_code}: SUCCESSFULLY replayed one appointment")
def replay_worker_absence_to_redis(self):
for absence_code in self.kp_data_adapter.absence_db_dict:
job = self.env_encode_single_absence(self.kp_data_adapter.absence_db_dict[absence_code])
absence_job = self.mutate_create_worker_absence(job, auto_replay=True)
def replay_env(self):
global_env_config = self.redis_conn.get(self.get_env_config_redis_key())
if global_env_config is None:
# This initialize the env dataset in redis for this envionrment.
# all subsequent env replay_env should read from this .
log.info("Trying to get lock over the env key to applying messages")
with self.redis_conn.lock(
self.get_env_replay_lock_redis_key(), timeout=60 * 10
) as lock:
self.redis_conn.set(self.get_env_config_redis_key(), json.dumps(self.config))
self.set_env_window_replay_till_offset(0)
self.replay_env_to_redis()
# code you want executed only after the lock has been acquired
# lock.release()
log.info("Done with redis lock to applying messages")
else:
self.reload_env_from_redis()
# Catch up with recent messages on Kafka. Optional ? TODO
self.mutate_check_env_window_n_replay()
self._move_to_next_unplanned_job()
obs = None # self._get_observation()
reward = -1
done = False
if obs is None:
done = True
info = {"message": f"Replay done"}
return (obs, reward, done, info)
# Is this timeout seconds?
# TODO, replace two with locks by this unified locking
def mutate_check_env_window_n_replay(self):
# I need to lock it to protect kafka, generator not threadsafe.
# ValueError: generator already executing
# with lock:
if PLANNER_SERVER_ROLE == "trainer":
return
self.kafka_server.consume_env_messages()
def env_encode_single_worker(self, worker=None):
assert type(worker) == dict, "Wrong type, it must be dict"
if worker["worker_code"] in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug("env_encode_single_worker debug MY|D|3|CT07")
# worker_week_day = (self.data_start_datetime.weekday() + 1) % 7
flex_form_data = worker["flex_form_data"].copy()
# db_session = self.cnx
# _workers = pd.read_sql(
# db_session.query(Location).filter(Location.location_code ==
# worker['location_id']).statement,
# db_session.bind,
# )
# worker_info = _workers.set_index("id").to_dict(orient="index")
# Here it makes planner respect initial travel time
if self.config["respect_initial_travel"] == True:
location_type = LocationType.JOB
else:
location_type = LocationType.HOME
home_location = JobLocationBase(
float(worker["geo_longitude"]),
float(worker["geo_latitude"]),
location_type,
worker["location_code"]
)
# working_minutes_array = flex_form_data["StartEndTime"].split(";")
weekly_start_gps = [home_location for _ in range(7)]
weekly_end_gps = [home_location for _ in range(7)]
_weekly_working_slots = [None for _ in range(7)]
week_day_i = 0
for week_day_code in [
"sunday",
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
]:
# (week_day_i * 1440) +
today_start_minutes = date_util.int_hhmm_to_minutes(
int(worker["business_hour"][week_day_code][0]["open"])
if worker["business_hour"][week_day_code][0]["isOpen"]
else 0
)
today_end_minutes = date_util.int_hhmm_to_minutes(
int(worker["business_hour"][week_day_code][0]["close"])
if worker["business_hour"][week_day_code][0]["isOpen"]
else 0
)
_weekly_working_slots[week_day_i] = (
today_start_minutes,
today_end_minutes,
)
week_day_i += 1
skills = {
"skills": set(flex_form_data["skills"]),
}
# loc = LocationTuple(loc_t[0], loc_t[1], LocationType.HOME, f"worker_loc_{worker['id']}",)
belongs_to_pair = None
if "assistant_to" in flex_form_data.keys():
if flex_form_data["assistant_to"]:
belongs_to_pair = (
flex_form_data["assistant_to"],
worker["code"],
)
if "is_assistant" not in flex_form_data.keys():
flex_form_data["is_assistant"] = False
overtime_minutes = 0
if "max_overtime_minutes" in flex_form_data.keys():
overtime_minutes = int(float(flex_form_data["max_overtime_minutes"]))
w_r = Worker(
worker_id=worker["id"],
worker_code=worker["code"],
flex_form_data=flex_form_data,
# "level= worker.flex_form_data["level"],
skills=skills,
# location=loc,
#
# TODO, @duan, only 1 slot per day? 2020-11-17 15:36:00
# 6 times for _ in range(len(k_worker['result']))
weekly_working_slots=_weekly_working_slots,
weekly_start_gps=weekly_start_gps,
weekly_end_gps=weekly_end_gps,
linear_working_slots=[], # 6 times for _ in range(len(k_worker['result']))
# linear_daily_start_gps=daily_start_gps,
# linear_daily_end_gps=daily_end_gps,
historical_job_location_distribution=worker["job_history_feature_data"],
worker_index=worker["id"],
belongs_to_pair=belongs_to_pair,
is_active=worker["is_active"],
daily_max_overtime_minutes=overtime_minutes,
weekly_max_overtime_minutes=60 * 10,
overtime_limits={},
used_overtime_minutes={},
)
return w_r
def load_transformed_workers(self):
# start_date = self.data_start_datetime
w = []
# w_dict = {}
#
# index = 0
for wk, worker in self.kp_data_adapter.workers_db_dict.items():
active_int = 1 if worker["is_active"] else 0
if active_int != 1:
print(
"worker {} is not active, maybe it shoud be skipped from loading? ",
worker.id,
)
# TODO
# included for now , since maybe job on it?
continue
# worker_dict = worker.__dict__
# if type(worker_dict["location"]) != dict:
# worker_dict["location"] = worker.location.__dict__
w_r = self.env_encode_single_worker(worker)
w.append(w_r)
# w_dict[w_r.worker_code] = index
# index += 1
sorted_workers = sorted(w, key=lambda x: x.worker_code)
return sorted_workers
def env_encode_single_appointment(self, appointment=None):
assigned_start_minutes = self.env_encode_from_datetime_to_minutes(
appointment["scheduled_start_datetime"]
)
job_form = appointment["flex_form_data"]
try:
abs_location = self.locations_dict[appointment["location_code"]]
except:
included_job_code = appointment["included_job_codes"][0]
if included_job_code not in self.jobs_dict.keys():
log.error(
f"{appointment['appointment_code']}: I failed to find the job by code= {included_job_code}, and then failed to find the Location. Skipped this appointment"
)
#
return
# abs_location = None # TODO?
else:
abs_location = self.jobs_dict[included_job_code].location
requested_skills = {}
scheduled_worker_codes = []
for jc in job_form["included_job_codes"]:
if jc not in self.jobs_dict.keys():
log.error(
f"missing included_job_codes, appointment_code= {appointment['appointment_code']}, job_code={jc} "
)
return None
if self.jobs_dict[jc].planning_status != JobPlanningStatus.UNPLANNED:
# Keep last one for now
scheduled_worker_codes = self.jobs_dict[jc].scheduled_worker_codes
if len(scheduled_worker_codes) < 1:
#
log.debug(
f"APPT:{appointment['appointment_code']}:Only u status in appt? I will take requested. "
)
scheduled_worker_codes.append(self.jobs_dict[jc].requested_primary_worker_code)
requested_day_minutes = int(assigned_start_minutes / 1440) * 1440
appt_job = Job(
job_code=appointment["appointment_code"],
job_id=-1,
job_index=appointment["id"],
job_type=JobType.APPOINTMENT,
job_schedule_type=JobScheduleType.FIXED_SCHEDULE, # job_seq: 89, job['job_code']
planning_status=JobPlanningStatus.PLANNED,
location=abs_location,
requested_skills=requested_skills,
#
scheduled_worker_codes=scheduled_worker_codes,
scheduled_start_minutes=assigned_start_minutes,
scheduled_duration_minutes=appointment["scheduled_duration_minutes"],
#
requested_start_min_minutes=(job_form["ToleranceLower"] * 1440) + requested_day_minutes,
requested_start_max_minutes=(job_form["ToleranceUpper"] * 1440) + requested_day_minutes,
requested_start_minutes=requested_day_minutes,
requested_time_slots=[], # job["requested_time_slots"]
#
requested_primary_worker_code=requested_primary_worker_code,
requested_duration_minutes=appointment["scheduled_duration_minutes"],
flex_form_data=job_form,
included_job_codes=job_form["included_job_codes"],
new_job_codes=[],
searching_worker_candidates=[],
appointment_status=job_form["appointment_status"],
#
is_active=True,
is_auto_planning=False,
)
self.set_searching_worker_candidates(appt_job)
return appt_job
def load_transformed_appointments(self):
a = []
a_dict = {}
#
index = 0
for _, appointment in self.kp_data_adapter.appointments_db_dict.items():
a_r = self.env_encode_single_appointment(appointment)
a.append(a_r)
a_dict[a_r.appointment_code] = index
index += 1
return a
def _convert_availability_slot(self, x_str, prev_available_slots):
orig_slot = x_str.split(";")
the_result = [] # copy.copy(prev_available_slots)
for slot in orig_slot:
if (slot is None) or len(slot) < 1:
continue
start_time = datetime.strptime(
slot.split("_")[0], kandbox_config.KANDBOX_DATETIME_FORMAT_GTS_SLOT
)
end_time = datetime.strptime(
slot.split("_")[1], kandbox_config.KANDBOX_DATETIME_FORMAT_GTS_SLOT
)
start_minutes = self.env_encode_from_datetime_to_minutes(start_time)
end_minutes = self.env_encode_from_datetime_to_minutes(end_time)
the_result.append((start_minutes, end_minutes))
# if len(prev_available_slots) < 1:
# TODO, i take new one simply as Last Known. Is it Right? 2021-02-20 10:01:44
return the_result
# prev_tree= IntervalTree.from_tuples(prev_available_slots)
# curr_tree = IntervalTree.from_tuples(the_result)
# new_result = [(s[0], s[1]) for s in list(prev_tree)]
# return new_result
def _convert_lists_to_slots(self, input_list: list, job):
orig_loc = self.locations_dict[job["location_code"]]
CONSTANT_JOB_LOCATION = JobLocationBase(
geo_longitude=orig_loc.geo_longitude,
geo_latitude=orig_loc.geo_latitude,
location_type=LocationType.HOME,
location_code=orig_loc.location_code,
# historical_serving_worker_distribution=None,
# avg_actual_start_minutes=0,
# avg_days_delay=0,
# stddev_days_delay=0,
# available_slots=tuple(), # list of [start,end] in linear scale
# rejected_slots=job["rejected_slots"],
)
job_available_slots = []
for slot in sorted(list(input_list), key=lambda x: x[0]):
wts = WorkingTimeSlot(
slot_type=TimeSlotType.FLOATING,
start_minutes=slot[0],
end_minutes=slot[1],
prev_slot_code=None,
next_slot_code=None,
start_location=CONSTANT_JOB_LOCATION,
end_location=CONSTANT_JOB_LOCATION,
worker_id="_",
available_free_minutes=0,
assigned_job_codes=[],
)
job_available_slots.append(wts)
return job_available_slots
def env_encode_single_job(self, job: dict) -> Job: # from db dict object to dataclass object
if job is None:
log.error("env_encode_single_job: job is None")
return
if job["code"] in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug(f"debug {kandbox_config.DEBUGGING_JOB_CODE_SET}")
flex_form_data = job["flex_form_data"]
if pd.isnull(job["requested_start_datetime"]):
log.error("requested_start_datetime is null, not allowed")
return None
if pd.isnull(job["requested_duration_minutes"]):
log.error("requested_duration_minutes is null, not allowed")
return None
assigned_start_minutes = 0
if job["planning_status"] in (JobPlanningStatus.PLANNED, JobPlanningStatus.IN_PLANNING):
try:
assigned_start_minutes = int(
(job["scheduled_start_datetime"] - self.data_start_datetime).total_seconds()
/ 60
)
except ValueError as ve:
log.error(f"Data error: failed to convert scheduled_start_datetime, job = {job}")
return None
requested_start_minutes = int(
(job["requested_start_datetime"] - self.data_start_datetime).total_seconds() / 60
)
min_minutes = requested_start_minutes
max_minutes = requested_start_minutes
if "tolerance_start_minutes" in flex_form_data.keys():
min_minutes = requested_start_minutes + (flex_form_data["tolerance_start_minutes"])
if "tolerance_end_minutes" in flex_form_data.keys():
max_minutes = requested_start_minutes + (flex_form_data["tolerance_end_minutes"])
assigned_start_minutes = 0
if job["planning_status"] in (JobPlanningStatus.PLANNED, JobPlanningStatus.IN_PLANNING):
assigned_start_minutes = int(
(job["scheduled_start_datetime"] - self.data_start_datetime).total_seconds() / 60
)
historical_serving_worker_distribution = None
if "job_history_feature_data" in job.keys():
if "historical_serving_worker_distribution" in job["job_history_feature_data"].keys():
historical_serving_worker_distribution = job["job_history_feature_data"][
"historical_serving_worker_distribution"
]
if "requested_primary_worker_code" not in job.keys():
log.debug("No request primary code")
if historical_serving_worker_distribution is None:
historical_serving_worker_distribution = {job["requested_primary_worker_code"]: 1}
if job["location_code"] not in self.locations_dict.keys():
job_location = JobLocation(
geo_longitude=job["geo_longitude"],
geo_latitude=job["geo_latitude"],
location_type=LocationType.JOB,
location_code=job["location_code"],
historical_serving_worker_distribution=historical_serving_worker_distribution,
avg_actual_start_minutes=0,
avg_days_delay=0,
stddev_days_delay=0,
# rejected_slots=job["rejected_slots"],
)
self.locations_dict[job_location.location_code] = job_location
else:
job_location = self.locations_dict[job["location_code"]]
# if job_location.geo_longitude < 0:
# log.warn(
# f"Job {job['code']} has invalid location : {job_location.location_code} with geo_longitude = {job_location.geo_longitude}"
# )
# prev_available_slots = [
# (s.start_minutes, s.end_minutes) for s in job_location.available_slots
# ]
net_avail_slots = [
[
self.get_env_planning_horizon_start_minutes(),
self.get_env_planning_horizon_end_minutes(),
]
]
# TODO duan, change job_location from tuple to dataclass.
# job_location.available_slots.clear()
job_available_slots = self._convert_lists_to_slots(net_avail_slots, job)
# job_location.available_slots = sorted(list(net_avail_slots), key=lambda x: x[0])
if "requested_skills" in flex_form_data.keys():
requested_skills = {
"skills": set(flex_form_data["requested_skills"]),
}
else:
requested_skills = set()
log.error(f"job({job['code']}) has no requested_skills")
the_final_status_type = job["planning_status"]
is_appointment_confirmed = False
# if job_schedule_type == JobScheduleType.FIXED_SCHEDULE:
# is_appointment_confirmed = True
primary_workers = []
if not pd.isnull(job["scheduled_primary_worker_id"]):
primary_workers.append(
self.workers_dict_by_id[job["scheduled_primary_worker_id"]]["code"]
)
final_job = Job(
job_code=job["code"],
job_id=job["id"],
job_index=0,
job_type=job["job_type"], # JobType.JOB,
job_schedule_type=JobScheduleType.NORMAL,
planning_status=the_final_status_type,
location=job_location,
requested_skills=requested_skills, # TODO
#
scheduled_worker_codes=primary_workers + job["scheduled_secondary_worker_codes"],
scheduled_start_minutes=assigned_start_minutes,
scheduled_duration_minutes=job["scheduled_duration_minutes"],
#
requested_start_min_minutes=min_minutes,
requested_start_max_minutes=max_minutes,
requested_start_minutes=requested_start_minutes,
requested_time_slots=[[0, 0]],
requested_primary_worker_code=job["requested_primary_worker_code"],
requested_duration_minutes=job["requested_duration_minutes"],
#
available_slots=job_available_slots,
flex_form_data=flex_form_data,
searching_worker_candidates=[],
included_job_codes=flex_form_data["included_job_codes"],
new_job_codes=[],
appointment_status=AppointmentStatus.NOT_APPOINTMENT,
#
is_active=job["is_active"],
is_appointment_confirmed=is_appointment_confirmed,
is_auto_planning=job["auto_planning"],
)
self.set_searching_worker_candidates(final_job)
# TODO, @xingtong.
return final_job
def load_transformed_jobs(self):
# TODO , need data_window
# This function also alters self.locations_dict
#
w = []
for k, row_dict in self.kp_data_adapter.jobs_db_dict.items():
if row_dict["code"] in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug(
f"load_transformed_jobs Debug {str(kandbox_config.DEBUGGING_JOB_CODE_SET)} "
)
if pd.isnull(row_dict["requested_primary_worker_id"]):
continue
job = self.env_encode_single_job(job=row_dict)
if job is not None:
w.append(job)
return w
def env_encode_single_absence(self, job: dict) -> Absence:
assigned_start_minutes = int(
(job["scheduled_start_datetime"] - self.data_start_datetime).total_seconds() / 60
)
scheduled_primary_worker_code = job["code"]
job_code = job["absence_code"]
abs_location = LocationTuple(
geo_longitude=job["geo_longitude"],
geo_latitude=job["geo_latitude"],
location_type=LocationType.HOME,
location_code=f"evt_loc_{job_code}",
)
# loc = LocationTuple(loc_t[0], loc_t[1], LocationType.HOME, f"worker_loc_{worker['id']}",)
return Absence(
job_code=job_code,
job_id=job["id"],
job_index=0,
job_type=JobType.ABSENCE,
job_schedule_type=JobScheduleType.FIXED_SCHEDULE, # job_seq: 89, job['job_code'] "FS"
planning_status=JobPlanningStatus.PLANNED,
location=abs_location,
requested_skills={}, # TODO
#
scheduled_worker_codes=[scheduled_primary_worker_code],
scheduled_start_minutes=assigned_start_minutes,
scheduled_duration_minutes=job["scheduled_duration_minutes"],
#
requested_start_min_minutes=0,
requested_start_max_minutes=0,
requested_start_minutes=0,
requested_time_slots=[],
#
requested_primary_worker_code=scheduled_primary_worker_code,
# requested_start_datetime=assigned_start_minutes, # No day, no minutes
requested_duration_minutes=job["scheduled_duration_minutes"], # TODO remove plural?)
searching_worker_candidates=[],
included_job_codes=[],
new_job_codes=[],
available_slots=[],
appointment_status=AppointmentStatus.NOT_APPOINTMENT,
#
is_active=True,
is_replayed=False,
is_auto_planning=False,
flex_form_data=job["flex_form_data"],
)
def set_current_job_i(self, job_code):
self.current_job_i = self.jobs_dict[job_code].job_index
def decode_single_job_to_solution(self, job: BaseJob): # _for_training
all_worker_ids = [
self.workers_dict[wcode].worker_id for wcode in job.scheduled_worker_codes
]
new_job = {
"id": self.jobs_dict[job.job_code].job_id,
"code": job.job_code,
"planning_status": job.planning_status,
"scheduled_start_datetime": None,
"scheduled_duration_minutes": job.scheduled_duration_minutes, # TODO remove plural
"scheduled_primary_worker_id": all_worker_ids[0],
"scheduled_secondary_worker_ids": all_worker_ids[1:],
}
if job.planning_status != JobPlanningStatus.UNPLANNED:
new_job["scheduled_start_datetime"] = self.env_decode_from_minutes_to_datetime(
job.scheduled_start_minutes
)
return new_job
def get_solution_dataset(self, query_start_datetime, query_end_datetime):
workers_dimensions = [
"index",
"skills",
"max_conflict_level",
"worker_code",
"geo_longitude",
"geo_latitude",
"weekly_working_minutes",
"selected_flag",
"worker_code",
]
jobs_dimensions = [
"scheduled_worker_index",
"scheduled_start_datetime", # _js
"scheduled_end_datetime", # _js datetime in javascript MilliSecond.
"job_code",
"job_type",
"scheduled_travel_minutes_before",
"scheduled_travel_prev_code",
"conflict_level",
"scheduled_primary_worker_id",
"geo_longitude",
"geo_latitude",
"changed_flag",
"prev_geo_longitude",
"prev_geo_latitude",
"prev_location_type",
]
planned_jobs_list = []
all_jobs_in_env_list = []
print(
datetime.now(),
"worker_job_dataset_json: Started transforming to dataset for web json... ",
)
workers_start_end_dict = {}
workers_dict = {}
workers_list = []
# linear_free_slot = [[510, 610], [720, 840], [1440, 1640], [1840, 1940]]
query_start_minutes = self.env_encode_from_datetime_to_minutes(query_start_datetime)
for index, w in enumerate(self.workers):
linear_free_slot = self.get_worker_floating_slots(
worker_code=w.worker_code, query_start_minutes=query_start_minutes
)
report_string_dict = copy.deepcopy(w.skills)
report_string_dict["pair"] = w.belongs_to_pair
worker1 = {
"index": index,
"skills": str(report_string_dict),
# list(w.skills["product_code"]), # w["skills"], pprint.pformat(
"max_conflict_level": 1,
"worker_code": w.worker_code,
"geo_longitude": 0, # w.location[0],
"geo_latitude": 0, # w.location[1],
"weekly_working_minutes": linear_free_slot, # w.linear_working_slots,
"selected_flag": 0 if w.belongs_to_pair is None else 1,
"worker_code": w.worker_code,
}
workers_dict[w.worker_code] = worker1
workers_list.append(worker1)
new_df = pd.DataFrame(workers_list)
# TODO , async warm up env
df_count = new_df.count().max()
if (pd.isnull(df_count)) or (df_count < 1):
return {
"workers_dimensions": workers_dimensions,
"workers_data": [],
"jobs_dimensions": jobs_dimensions,
"planned_jobs_data": [],
"all_jobs_in_env": [], # all_jobs_in_env
"start_time": query_start_datetime.isoformat(),
"end_time": query_end_datetime.isoformat(),
# TODO: worker_time [[day_start, day_end], [day_start, day_end]], office_hours{[day_start, day_end, day_start, day_end, ...] }.
}
#
# new_df.columns
workers_data = new_df[workers_dimensions].values.tolist()
jobs_json = self.get_solution_json(query_start_datetime, query_end_datetime)
for index, j in enumerate(jobs_json):
if j["job_code"] in kandbox_config.DEBUGGING_JOB_CODE_SET:
print(f"pause for debugging {kandbox_config.DEBUGGING_JOB_CODE_SET }")
if j["planning_status"] in [JobPlanningStatus.UNPLANNED]:
j["scheduled_primary_worker_id"] = workers_data[0][3]
if (j["scheduled_duration_minutes"] is None) or (
np.isnan(j["scheduled_duration_minutes"])
):
j["scheduled_duration_minutes"] = j["requested_duration_minutes"]
j["scheduled_duration_minutes"] = 1
j["scheduled_start_minutes"] = 0
else:
if j["scheduled_primary_worker_id"] not in workers_dict.keys():
# continue
print(
"Internal error, j['scheduled_primary_worker_id'] not in workers_dict.keys(): "
)
# if workers_dict[curr_worker]['nbr_conflicts'] < conflict_level:
# workers_dict[curr_worker]['nbr_conflicts'] = conflict_level
scheduled_start_datetime = self.data_start_datetime + timedelta(
minutes=j["scheduled_start_minutes"]
)
scheduled_end_datetime = scheduled_start_datetime + timedelta(
minutes=j["scheduled_duration_minutes"]
)
scheduled_start_datetime_js = int(
time.mktime(scheduled_start_datetime.timetuple()) * 1000
)
if "requested_start_datetime" not in j.keys():
j["requested_start_datetime"] = self.env_decode_from_minutes_to_datetime(
(j["requested_start_min_minutes"] + j["requested_start_max_minutes"]) / 2
)
# j["requested_duration_minutes"] =
j["requested_primary_worker_id"] = j["requested_primary_worker_code"]
scheduled_end_datetime_js = int(time.mktime(scheduled_end_datetime.timetuple()) * 1000)
# TODO 2020-10-17 21:59:03
if pd.isnull(j["scheduled_primary_worker_id"]):
print("debug: j - scheduled_primary_worker_id is nan, please debug later.")
continue
if pd.isnull(j["scheduled_primary_worker_id"]):
print("debug: j - scheduled_primary_worker_id is nan, please debug later.")
continue
if "job_id" not in j.keys():
print("debug")
j["requested_primary_worker_id"] = j["scheduled_primary_worker_id"]
j["changed_flag"] = False
pass
plot_job_type = (
f"{j['job_type']}"
if j["job_type"] in {JobType.APPOINTMENT, JobType.ABSENCE}
else f"{j['planning_status']}" # f"{j['job_schedule_type']}"
)
# print(f"{j['job_code']} = {plot_job_type}")
if j["planning_status"] in (JobPlanningStatus.UNPLANNED):
j["scheduled_worker_codes"] = [j["requested_primary_worker_code"]]
new_job = {
"job_code": j["job_code"],
"job_type": "{}_1_2_3_4_{}".format(plot_job_type, j["scheduled_share_status"]),
"requested_primary_worker_id": j["requested_primary_worker_code"],
"requested_start_datetime": j[
"requested_start_datetime"
].isoformat(), # requested_start_datetime_js,
"requested_duration_minutes": j["requested_duration_minutes"],
"scheduled_primary_worker_id": self.workers_dict[
j["scheduled_worker_codes"][0]
].worker_id,
"scheduled_worker_codes": j["scheduled_worker_codes"],
"scheduled_worker_index": workers_dict[j["scheduled_primary_worker_id"]]["index"],
# https://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript
"scheduled_start_datetime_js": scheduled_start_datetime_js, # scheduled_start_datetime.isoformat(),
"scheduled_end_datetime_js": scheduled_end_datetime_js, # scheduled_end_datetime.isoformat(),
"scheduled_start_datetime": scheduled_start_datetime.isoformat(),
"scheduled_end_datetime": scheduled_end_datetime.isoformat(),
# "scheduled_start_minutes": j["scheduled_start_minutes"],
"scheduled_duration_minutes": j["scheduled_duration_minutes"],
"planning_status": j["planning_status"],
"conflict_level": 0, # j["conflict_level"],
"scheduled_travel_minutes_after": 0, # j["scheduled_travel_minutes_after"],
"scheduled_travel_minutes_before": j["scheduled_travel_minutes_before"],
"scheduled_travel_prev_code": j["scheduled_travel_prev_code"],
"geo_longitude": j["location"].geo_longitude,
"geo_latitude": j["location"].geo_latitude,
"changed_flag": j["is_changed"],
"prev_geo_longitude": 0,
"prev_geo_latitude": 0,
"prev_location_type": "J",
}
if j["scheduled_share_status"] != "S": # add only those P, N.
all_jobs_in_env_list.append(new_job)
else:
new_job["job_code"] = "{}_S_{}".format(
j["job_code"], j["scheduled_primary_worker_id"]
)
if j["planning_status"] not in [JobPlanningStatus.UNPLANNED]: # 'I', 'P', 'C'
new_job["prev_geo_longitude"] = j["prev_geo_longitude"]
new_job["prev_geo_latitude"] = j["prev_geo_latitude"]
new_job["prev_location_type"] = j["prev_location_type"]
planned_jobs_list.append(new_job)
planned_jobs_data = []
all_jobs_in_env = all_jobs_in_env_list
# is_NaN = all_jobs_in_env_df.isnull()
# row_has_NaN = is_NaN.any(axis=1)
# rows_with_NaN = all_jobs_in_env_df[row_has_NaN]
if len(planned_jobs_list) > 0:
new_planned_job_df = pd.DataFrame(planned_jobs_list).fillna(0)
planned_jobs_data = new_planned_job_df[jobs_dimensions].values.tolist()
all_jobs_in_env_df = pd.DataFrame(all_jobs_in_env).fillna(0)
# /Users/qiyangduan/temp/nlns_env/lib/python3.7/site-packages/pandas/core/frame.py:1490: FutureWarning: Using short name for 'orient' is deprecated. Only the options: ('dict', list, 'series', 'split', 'records', 'index')
# all_jobs_in_env = all_jobs_in_env_df.to_dict(orient="record")
all_jobs_in_env = all_jobs_in_env_df.to_dict("record")
print(datetime.now(), "worker_job_dataset_json: Finished.")
# https://echarts.apache.org/en/option.html#series-custom.dimensions
# echarts might infer type as number.
# jobs_dimensions[3]= {"name": 'job_code', "type": 'ordinal'}
return {
"workers_dimensions": workers_dimensions,
"workers_data": workers_data,
"jobs_dimensions": jobs_dimensions,
"planned_jobs_data": planned_jobs_data,
"all_jobs_in_env": all_jobs_in_env, # all_jobs_in_env,
"start_time": query_start_datetime.isoformat(),
"end_time": query_end_datetime.isoformat(),
# TODO: worker_time [[day_start, day_end], [day_start, day_end]], office_hours{[day_start, day_end, day_start, day_end, ...] }.
}
def get_solution_json(self, query_start_datetime, query_end_datetime):
query_start_minutes = self.env_encode_from_datetime_to_minutes(query_start_datetime)
query_end_minutes = self.env_encode_from_datetime_to_minutes(query_end_datetime)
job_solution = []
inplanning_job_index_set = set()
shared_prev_jobs = {}
sorted_slot_codes = sorted(list(self.slot_server.time_slot_dict.keys()))
slot_dict = self.slot_server.time_slot_dict
# for worker_code in self.workers_dict.keys():
pre_job_code = "__HOME"
prev_slot = None
# In this loop, it first collects Only all (I/P) in/planned jobs, which are available from working time slots
current_worker_id = "NOT_EXIST"
for slot_code in sorted_slot_codes:
# for work_time_i in range(len( self.workers_dict[worker_code]['assigned_jobs'] ) ): # nth assigned job time unit.
if slot_code in kandbox_config.DEBUGGING_SLOT_CODE_SET:
log.debug("debug atomic_slot_delete_and_add_back")
# for assigned_job in self.workers_dict[worker_code]["assigned_jobs"]:
# for day_i in range(self.config["nbr_of_days_planning_window"]):
# a_slot = slot_dict[slot_code]
# I Loop through all
# if a_slot.slot_type in (TimeSlotType.JOB_FIXED, TimeSlotType.FLOATING):
try:
a_slot = self.slot_server.get_slot(self.redis_conn.pipeline(), slot_code)
the_assigned_codes = sorted(
a_slot.assigned_job_codes,
key=lambda jc: self.jobs_dict[jc].scheduled_start_minutes,
)
except KeyError as ke:
log.error(
f"unknown worker code, or unknown job codes to the env. slot_code = {slot_code}, error = {str(ke)}"
)
print(ke)
continue
if a_slot.worker_id != current_worker_id:
current_start_loc = a_slot.start_location
elif (a_slot.slot_type == TimeSlotType.FLOATING) or (a_slot.prev_slot_code is None):
current_start_loc = a_slot.start_location
current_worker_id = a_slot.worker_id
for assigned_index, assigned_job_code in enumerate(the_assigned_codes):
assigned_job = self.jobs_dict[assigned_job_code]
if assigned_job.job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
print(f"pause for debugging {kandbox_config.DEBUGGING_JOB_CODE_SET }")
if (
assigned_job.scheduled_start_minutes + assigned_job.scheduled_duration_minutes
< query_start_minutes
) | (assigned_job.scheduled_start_minutes > query_end_minutes):
log.warn(
f"Should be error.In slot but Solution skipped job_code={assigned_job.job_code}, minutes = {query_start_minutes} not in {query_start_minutes} -- {query_end_minutes}"
)
continue
if len(assigned_job.scheduled_worker_codes) < 1:
log.warn(
"Internal BUG, why a job from assigned_jobs has no assigned_workers, but?"
)
continue
prev_travel_minutes = self.travel_router.get_travel_minutes_2locations(
current_start_loc,
assigned_job.location,
)
# Only single worker jobs are collected in this loop.
assigned_job_dict = dataclasses.asdict(assigned_job) # .copy().as_dict()
assigned_job_dict[
"requested_start_datetime"
] = self.env_decode_from_minutes_to_datetime(
assigned_job.requested_start_min_minutes
)
if len(assigned_job.scheduled_worker_codes) == 1:
# changed flag is moved to step()
assigned_job_dict["scheduled_share_status"] = "N"
elif len(assigned_job.scheduled_worker_codes) > 1:
try:
if assigned_job.scheduled_worker_codes.index(a_slot.worker_id) == 0:
assigned_job_dict["scheduled_share_status"] = "P"
else:
assigned_job_dict["scheduled_share_status"] = "S"
except ValueError as ve:
log.error(f"Failed to find worker status, job = {assigned_job_dict}")
print(ve)
continue
else:
log.error(f"Lost assigned worker ID, job = {assigned_job_dict}")
continue
# assigned_job["requested_start_day"] = assigned_job["requested_start_day"]
assigned_job_dict["scheduled_primary_worker_id"] = a_slot.worker_id
assigned_job_dict["scheduled_travel_prev_code"] = pre_job_code
assigned_job_dict["scheduled_travel_minutes_before"] = prev_travel_minutes
assigned_job_dict["prev_geo_longitude"] = current_start_loc[0] # .geo_longitude
assigned_job_dict["prev_geo_latitude"] = current_start_loc[1] # .geo_latitude
assigned_job_dict["prev_location_type"] = current_start_loc[2] # .geo_latitude
current_start_loc = assigned_job.location
if a_slot.next_slot_code is not None: # End of original daily working hour
pre_job_code = assigned_job_code
else:
pre_job_code = "__HOME"
job_solution.append(assigned_job_dict)
inplanning_job_index_set.add(assigned_job_code)
if len(assigned_job_dict["scheduled_worker_codes"]) > 1:
for worker_code in assigned_job_dict["scheduled_worker_codes"]:
shared_prev_jobs[(worker_code, assigned_job_code)] = pre_job_code
pre_job_code = assigned_job.job_code
# In this loop, it collects Only U-planned jobs, which are not available from working time slots
for job_code, new_job in self.jobs_dict.items():
if new_job.job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
print(f"pause for debugging {kandbox_config.DEBUGGING_JOB_CODE_SET }")
if job_code in inplanning_job_index_set:
continue
if not new_job.is_active: # events and appt can not be U anyway
log.warn(f"job={ job_code} is skipped from solution because is_active == False")
continue
if new_job.planning_status != JobPlanningStatus.UNPLANNED:
# Not available in slot, a problem to investigate
if (
new_job.scheduled_start_minutes + new_job.scheduled_duration_minutes
< query_start_minutes
) | (new_job.scheduled_start_minutes > query_end_minutes):
# log.debug(
# f"solution skipped job_code={assigned_job.job_code}, minutes = {query_start_minutes} not in {query_start_minutes} -- {query_end_minutes}"
# )
continue
log.error(f"job={job_code} is in planning, but not in inplanning_job_index_set")
continue
new_job_dict = dataclasses.asdict(new_job)
try: # TODO, not necesasary 2020-10-27 15:55:09
# new_job_dict["scheduled_start_minutes"] = self.jobs[job_index]["requested_start_minutes"]
# new_job_dict["scheduled_start_day"] = self.jobs[job_index]["requested_start_day"]
new_job_dict["scheduled_duration_minutes"] = new_job.requested_duration_minutes
new_job_dict["scheduled_primary_worker_id"] = new_job.requested_primary_worker_code
except:
# TODO, duan, 2020-10-20 16:20:36 , why?
print("debug duan todo, appointment why?")
continue
new_job_dict["scheduled_start_datetime"] = self.env_decode_from_minutes_to_datetime(
new_job_dict["scheduled_start_minutes"]
)
new_job_dict["scheduled_share_status"] = "N"
new_job_dict["scheduled_travel_prev_code"] = "__HOME"
new_job_dict["scheduled_travel_minutes_before"] = 0
job_solution.append(new_job_dict)
return job_solution
def get_planner_score_stats(self, include_slot_details=False):
assigned_job_stats = {}
inplanning_job_index_set = set()
shared_prev_jobs = {}
total_travel_minutes = 0
sorted_slot_codes = sorted(list(self.slot_server.time_slot_dict.keys()))
pre_job_code = "__HOME"
prev_slot = None
current_worker_id = "NOT_EXIST"
slot_details = {}
# In this loop, it first collects Only all (I/P) in/planned jobs, which are available from working time slots
for slot_code in sorted_slot_codes:
# for work_time_i in range(len( self.workers_dict[worker_code]['assigned_jobs'] ) ): # nth assigned job time unit.
if slot_code in kandbox_config.DEBUGGING_SLOT_CODE_SET:
log.debug("debug atomic_slot_delete_and_add_back")
try:
a_slot = self.slot_server.get_slot(self.redis_conn.pipeline(), slot_code)
the_assigned_codes = sorted(
a_slot.assigned_job_codes,
key=lambda jc: self.jobs_dict[jc].scheduled_start_minutes,
)
except KeyError as ke:
log.error(
f"unknown worker code during get_planner_score_stats, or unknown job codes to the env. slot_code = {slot_code}, error = {str(ke)}"
)
print(ke)
continue
if include_slot_details:
slot_duration = a_slot.end_minutes - a_slot.start_minutes
slot_details[slot_code] = {
"travel_n_duration": [],
"free_minutes": slot_duration,
"start_end_duration": [a_slot.start_minutes, a_slot.end_minutes, slot_duration],
"the_assigned_codes": the_assigned_codes,
}
if a_slot.worker_id != current_worker_id:
current_start_loc = a_slot.start_location
elif (a_slot.slot_type == TimeSlotType.FLOATING) or (a_slot.prev_slot_code is None):
current_start_loc = a_slot.start_location
# if a_slot.slot_type in (TimeSlotType.JOB_FIXED, TimeSlotType.FLOATING):
current_worker_id = a_slot.worker_id
try:
the_assigned_codes = sorted(
a_slot.assigned_job_codes,
key=lambda jc: self.jobs_dict[jc].scheduled_start_minutes,
)
except KeyError as ke:
log.error(f"not all job codes are in env. a_slot = {a_slot}, error = {str(ke)}")
print(ke)
continue
for assigned_index, assigned_job_code in enumerate(the_assigned_codes):
assigned_job = self.jobs_dict[assigned_job_code]
if assigned_job.job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
print(f"pause for debugging {kandbox_config.DEBUGGING_JOB_CODE_SET }")
prev_travel_minutes = self.travel_router.get_travel_minutes_2locations(
current_start_loc,
assigned_job.location,
)
if include_slot_details:
slot_details[slot_code]["free_minutes"] -= (
prev_travel_minutes + assigned_job.requested_duration_minutes)
slot_details[slot_code]["travel_n_duration"] .append(
(prev_travel_minutes, assigned_job.requested_duration_minutes))
if assigned_job_code in assigned_job_stats:
assigned_job_stats[assigned_job_code]["travel"] += prev_travel_minutes
total_travel_minutes += prev_travel_minutes
else:
assigned_job_stats[assigned_job_code] = {
"travel": prev_travel_minutes,
"requested_duration_minutes": assigned_job.requested_duration_minutes,
}
total_travel_minutes += prev_travel_minutes
inplanning_job_index_set.add(assigned_job.job_code)
current_start_loc = assigned_job.location
# In this loop, it collects Only U-planned jobs, which are not available from working time slots
unplanned_job_count = 0
for job_code, new_job in self.jobs_dict.items():
if job_code in inplanning_job_index_set:
continue
if new_job.job_type == JobType.ABSENCE:
continue
if not new_job.is_active: # events and appt can not be U anyway
log.warn(f"job={ job_code} is skipped from solution because is_active == False")
continue
if new_job.planning_status == JobPlanningStatus.UNPLANNED:
unplanned_job_count += 1
elif new_job.planning_status in (JobPlanningStatus.COMPLETED,):
continue
elif new_job.planning_status in (JobPlanningStatus.PLANNED,):
if (
new_job.scheduled_start_minutes + new_job.scheduled_duration_minutes
< self.get_env_planning_horizon_start_minutes()
) | (new_job.scheduled_start_minutes > self.get_env_planning_horizon_end_minutes()):
log.warn(
f"solution skipped job_code={new_job.job_code}, minutes = {new_job.scheduled_start_minutes} not in planning window"
)
continue
else:
log.error(f"Why not in slots? PLANNED, job_code = {job_code}")
else:
log.error(f"Why not in slots? INPLANNING, job_code = {job_code}")
onsite_working_minutes = sum(
[assigned_job_stats[j]["requested_duration_minutes"] for j in assigned_job_stats]
)
inplanning_job_count = len(assigned_job_stats)
planning_start = datetime.strftime(
self.env_decode_from_minutes_to_datetime(self.get_env_planning_horizon_start_minutes()),
"%m-%d",
)
planning_end = datetime.strftime(
self.env_decode_from_minutes_to_datetime(self.get_env_planning_horizon_end_minutes()),
"%m-%d",
)
total_overtime_minutes = 0
for w in self.workers_dict.keys():
total_overtime_minutes += sum(
[
self.workers_dict[w].used_overtime_minutes[day_seq]
for day_seq in self.workers_dict[w].used_overtime_minutes.keys()
]
)
if inplanning_job_count + unplanned_job_count <= 0 or onsite_working_minutes <= 0:
overall_score = 0
else:
overall_score = (
(onsite_working_minutes / (total_travel_minutes + onsite_working_minutes))
+ (inplanning_job_count / (inplanning_job_count + unplanned_job_count))
- (total_overtime_minutes / onsite_working_minutes)
)
planner_score_stats = {
"overall_score": overall_score,
"score": "{:.3f}".format(overall_score),
"total_travel_minutes": int(total_travel_minutes),
"inplanning_job_count": inplanning_job_count,
"unplanned_job_count": unplanned_job_count,
"visible_job_count": len(self.jobs_dict),
"onsite_working_minutes": onsite_working_minutes,
"planning_window": f"{planning_start} ~ {planning_end}",
"total_overtime_minutes": total_overtime_minutes,
"slot_details": slot_details,
}
return planner_score_stats
def get_planning_window_days_from_redis(self) -> set:
days_on_redis = self.redis_conn.hgetall(self.get_env_planning_day_redis_key())
existing_working_days = set([int(di) for di in days_on_redis.keys()])
return existing_working_days
# **----------------------------------------------------------------------------
# ## Extended functions
# **----------------------------------------------------------------------------
def _reset_horizon_start_minutes(self):
if TESTING_MODE == "yes":
# if "env_start_day" in self.config.keys():
# Report exception if there is no env_start_day
if self.config["env_start_day"] == "_":
self.horizon_start_minutes = None
else:
# start_date = datetime.strptime(
# self.config["env_start_day"], kandbox_config.KANDBOX_DATE_FORMAT
# )
# self.horizon_start_minutes = self.env_encode_from_datetime_to_minutes(start_date) + 600
self.horizon_start_minutes = self.config["horizon_start_minutes"]
else:
self.horizon_start_minutes = None
def mutate_refresh_planning_window_from_redis(self) -> bool:
existing_working_days = self.get_planning_window_days_from_redis()
self.existing_working_days = existing_working_days
if len(existing_working_days) < 1:
min_horizon_start_seq = 0
max_horizon_start_seq = 4
else:
min_horizon_start_seq = min(existing_working_days)
max_horizon_start_seq = max(existing_working_days)
self._reset_horizon_start_minutes()
# min_planning_day_seq = int(self.get_env_planning_horizon_start_minutes() / 1440)
# max_planning_day_seq = max(existing_working_days)
self.config["nbr_of_days_planning_window"] = (
max_horizon_start_seq - min_horizon_start_seq + 1
)
log.info(
f"Refresh planning window done, min_horizon_start_seq= {min_horizon_start_seq}, max_horizon_start_seq = {max_horizon_start_seq}"
)
def mutate_extend_planning_window(self) -> bool:
existing_working_days = self.get_planning_window_days_from_redis()
# Hold this value and do not call it multiple times in a loop
today_start_date = self.env_decode_from_minutes_to_datetime(
self.get_env_planning_horizon_start_minutes()
)
horizon_day_seq = self.env_encode_from_datetime_to_day_with_validation(today_start_date)
# Now find out all current days in planning windows.
new_working_days = set()
for day_i in range(100):
new_start_date = today_start_date + timedelta(days=day_i)
day_seq = (new_start_date - self.data_start_datetime).days
new_weekday = self.env_encode_day_seq_to_weekday(day_seq)
new_start_date_str = datetime.strftime(
new_start_date, kandbox_config.KANDBOX_DATE_FORMAT
)
if new_start_date_str in self.national_holidays:
# This is a holiday. It overwrites weekly settings from weekly_working_days_flag.
continue
if self.weekly_working_days_flag[new_weekday]:
# This is a working day
self.daily_working_flag[day_seq] = True
new_working_days.add(day_seq)
else:
self.daily_working_flag[day_seq] = False
if len(new_working_days) >= self.config["planning_working_days"]:
break
# It may reduce several days of holidays
# 2020-11-17 06:27:47
# If planning window start with Sunday, new_working_days does not contain it, horizon_day_seq should still point to Sunday
self.config["nbr_of_days_planning_window"] = max(new_working_days) - horizon_day_seq + 1
if new_working_days <= existing_working_days:
log.warn(
f"There is no change in planning day when it is called at {datetime.now()}). existing_working_days={existing_working_days}. Maybe we have passed a weekend? "
)
return False
# Update nbr_of_days_planning_window, which may be larger than specified self.config["DaysForPlanning"]
# nbr_of_days_planning_window may contain
self.existing_working_days = new_working_days
days_to_add = new_working_days - existing_working_days
days_to_delete = existing_working_days - new_working_days
for day_seq in days_to_delete:
self._mutate_planning_window_delete_day(day_seq)
log.info(f"A planning day (day_seq = {day_seq}) is purged out of planning window. ")
if len(days_to_add) > 0:
self.kp_data_adapter.reload_data_from_db()
for day_seq in days_to_add:
if self._mutate_planning_window_add_day(day_seq):
# log.info(
# f"A planning day (day_seq = {day_seq}) is added into the planning window. "
# )
pass
else:
log.error(f"Failed to add a planning day (day_seq = {day_seq}) . ")
self.mutate_replay_jobs_single_working_day(day_seq)
self.kafka_server.post_refresh_planning_window()
return True
# fmt:off
def _mutate_planning_window_delete_day(self, day_seq: int) -> bool:
""" Right now, I delete all slots before this day. """
today_start_date = self.data_start_datetime + timedelta(days=day_seq)
today_start_date_str = datetime.strftime(today_start_date, kandbox_config.KANDBOX_DATE_FORMAT)
today_weekday = self.env_encode_day_seq_to_weekday(day_seq)
self.redis_conn.hdel(self.get_env_planning_day_redis_key(), *[day_seq])
for worker_code in self.workers_dict.keys():
#working_day_start = day_seq * 1440 if self.workers_dict[worker_code].weekly_working_slots[0] > 0 else day_seq * 1440 + self.workers_dict[worker_code].weekly_working_slots[0]
if self.workers_dict[worker_code].weekly_working_slots[today_weekday][1] < 1440:
working_day_end = (day_seq) * 1440 + \
self.workers_dict[worker_code].weekly_working_slots[today_weekday][1]
else:
working_day_end = (day_seq + 1) * 1440
# Do not extending to the whole day if it less than the whole, because there may be time slots from next day, with < 0 start time.??? allowed?
# forced actions to outside of working hour.
# However, this may accidently delete
self.slot_server.delete_slots_from_worker(
worker_code=worker_code,
start_minutes=0, # day_seq * 1440,
end_minutes=working_day_end,
)
# TODO
# w.linear_working_slots.remove(slot)
# fmt:on
def _mutate_planning_window_add_day(self, day_seq: int) -> bool:
today_start_date = self.data_start_datetime + timedelta(days=day_seq)
today_start_date_str = datetime.strftime(
today_start_date, kandbox_config.KANDBOX_DATE_FORMAT
)
today_weekday = self.env_encode_day_seq_to_weekday(day_seq)
"""
Of course it must be inside this.
if day_seq in self.daily_working_flag.keys():
log.error(
f"The planning_window slot (day_seq = {day_seq}) is aready added. It is skipped this time."
)
return False
"""
# self.redis_conn.hset(self.get_env_planning_day_redis_key(), mapping={day_seq: 0})
if today_start_date_str in self.national_holidays:
# This is a holiday. It overwrites weekly settings.
log.error(f"The planning_window slot (day_seq = {day_seq}) is in holiday.")
return False
if not self.weekly_working_days_flag[today_weekday]:
log.error(f"The planning_window slot (day_seq = {day_seq}) is in weekly rest day .")
return False
self.redis_conn.hset(
self.get_env_planning_day_redis_key(),
key=day_seq,
value=f"{today_start_date_str}_{day_seq*1440}->{(day_seq+1)*1440}"
# mapping={day_seq: f"{today_start_date_str}_{day_seq*1440}->{(day_seq+1)*1440}"},
)
for w in self.workers:
self._mutate_planning_window_add_worker_day(worker_code=w.worker_code, day_seq=day_seq)
log.info(
f"The planning window slot (day_seq = {day_seq}, today_start_date_str = {today_start_date_str}) is added succesfully!"
)
return True
def _mutate_planning_window_add_worker_day(self, worker_code: str, day_seq: int) -> WorkingTimeSlot:
# I first calculate the working slot in this day
min_day_seq = min(self.existing_working_days)
w = self.workers_dict[worker_code]
today_weekday = self.env_encode_day_seq_to_weekday(day_seq)
slot = (
day_seq * 1440 + w.weekly_working_slots[today_weekday][0],
day_seq * 1440 + w.weekly_working_slots[today_weekday][1],
)
if slot[1] - slot[0] < 1:
return False
# Check where there are existing slots overlapping with this working slot from redis.
overlap_slots = self.slot_server.get_overlapped_slots(
worker_id=worker_code,
start_minutes=slot[0],
end_minutes=slot[1], # (day_seq + 1) * 1440,
)
w.linear_working_slots.append(slot)
if len(overlap_slots) > 0:
overlap_slots_sorted = sorted(overlap_slots, key=lambda x: x.start_minutes)
new_slot_start = slot[0]
# new_slot_end = overlap_slots_sorted[0].end_minutes
new_start_location = w.weekly_start_gps[today_weekday]
# new_end_location = w.weekly_end_gps[today_weekday]
for aslot_i, aslot_ in enumerate(overlap_slots_sorted):
if aslot_.slot_type == TimeSlotType.FLOATING:
log.error(
f"WORKER:{worker_code}:DAY:{day_seq}:SLOT:{slot}, Encountered existing floating working time slot ({aslot_.start_minutes}->{aslot_.end_minutes}) while trying to add working slot. Aborted."
)
return False
log.warn(
f"WORKER:{worker_code}:DAY:{day_seq}:SLOT:{slot}, Encountered existing working time slot ({aslot_.start_minutes}->{aslot_.end_minutes}) while trying to add working slot {slot}"
)
new_slot_end = aslot_.start_minutes
new_end_location = aslot_.start_location
curr_slot = self.slot_server.add_single_working_time_slot(
worker_code=w.worker_code,
start_minutes=new_slot_start,
end_minutes=new_slot_end,
start_location=new_start_location,
end_location=new_end_location,
)
if aslot_i == 0:
if min_day_seq == day_seq:
self.workers_dict[w.worker_code].curr_slot = curr_slot
new_slot_start = aslot_.end_minutes
new_start_location = aslot_.end_location
# Add the last time slot if present
if new_slot_start < slot[1]:
self.slot_server.add_single_working_time_slot(
worker_code=w.worker_code,
start_minutes=new_slot_start,
end_minutes=slot[1],
start_location=new_start_location,
end_location=w.weekly_end_gps[today_weekday],
)
return False
else:
curr_slot = self.slot_server.add_single_working_time_slot(
worker_code=w.worker_code,
start_minutes=slot[0],
end_minutes=slot[1],
start_location=w.weekly_start_gps[today_weekday],
end_location=w.weekly_end_gps[today_weekday],
)
if min_day_seq == day_seq:
self.workers_dict[w.worker_code].curr_slot = curr_slot
log.info(
f"The worker_day slot (start_minutes = {slot[0]}, w.worker_code = {w.worker_code}) is added succesfully!"
)
return True
# ***************************************************************
# Mutation functions, lock?
# They are named as C-R-U-D, therefore Create will add the object to the ENV, Update will update existing one.
# ***************************************************************
def mutate_worker_add_overtime_minutes(
self, worker_code: str, day_seq: int, net_overtime_minutes: int, post_changes_flag=False
) -> bool:
added_successful = True
if day_seq not in self.workers_dict[worker_code].used_overtime_minutes.keys():
log.warn(
f"rejected, day_seq={day_seq} not in self.workers_dict[{worker_code}].used_overtime_minutes.keys()"
)
return False
self.workers_dict[worker_code].used_overtime_minutes[day_seq] += net_overtime_minutes
if self.workers_dict[worker_code].used_overtime_minutes[day_seq] < 0:
self.workers_dict[worker_code].used_overtime_minutes[day_seq] = 0
added_successful = False
for limit_days_key in self.workers_dict[worker_code].overtime_limits.keys():
total_overtime = 0
all_days_valid = True
for dsq in limit_days_key:
if dsq in self.workers_dict[worker_code].used_overtime_minutes.keys():
total_overtime += self.workers_dict[worker_code].used_overtime_minutes[dsq]
else:
log.info(
f"day_seq={dsq} is not longer in self.workers_dict[{worker_code}].used_overtime_minutes.keys() = {self.workers_dict[worker_code].used_overtime_minutes}. I will remove limit_days_key = {limit_days_key}"
)
dsq_error = False
break
if not all_days_valid:
# del self.workers_dict[worker_code].overtime_limits[limit_days_key]
continue
if total_overtime > self.workers_dict[worker_code].overtime_limits[limit_days_key]:
log.warn(
f"Overtime={total_overtime} is larger than limit for worker={worker_code}, key={limit_days_key}"
)
added_successful = False
if post_changes_flag:
self.kafka_server.post_env_message(
message_type=KafkaMessageType.UPDATE_WORKER_ATTRIBUTES,
payload=[
{
worker_code: {
"used_overtime_minutes": self.workers_dict[
worker_code
].used_overtime_minutes
}
}
],
)
# if worker_code == "MY|D|3|CT02":
# log.debug(
# f"{worker_code} - {net_overtime_minutes} = {self.workers_dict[worker_code].used_overtime_minutes}"
# )
return added_successful
def mutate_create_worker(self, new_worker: Worker):
# For now it does not trigger re-planning affected, 2020-10-25 10:09:19
if new_worker.worker_code in self.workers_dict.keys():
log.error(
f"mutate_create_worker: error, worker already existed: {new_worker.worker_code}"
)
return False
# raise ValueError("duplicate worker code '{0}' found".format(new_worker.worker_code))
self.workers.append(new_worker)
self.workers_dict[new_worker.worker_code] = new_worker
self.workers_dict[new_worker.worker_code].worker_index = len(self.workers) - 1
for day_seq in self.daily_working_flag.keys():
self._mutate_planning_window_add_worker_day(
worker_code=new_worker.worker_code, day_seq=day_seq
)
log.info(
f"mutate_create_worker: successfully added worker, code = {new_worker.worker_code}"
)
def mutate_update_worker(self, new_worker: Worker):
# I assume it is Unplanned for now, 2020-04-24 07:22:53.
# No replay
if new_worker.worker_code not in self.workers_dict.keys():
log.error(
"mutate_update_worker: The worker code '{0}' is not found".format(
new_worker.worker_code
)
)
return
# raise ValueError("The worker code '{0}' is not found".format(new_worker.worker_code))
w_idx = self.workers_dict[new_worker.worker_code].worker_index
if self.workers[w_idx].worker_code != new_worker.worker_code:
log.error(
f"worker_index mismatch during mutate_update_worker {new_worker}. Failed to update"
)
# TODO, clean up and reset index.
return
self.workers_dict[new_worker.worker_code] = new_worker
self.workers_dict[new_worker.worker_code].worker_index = w_idx
self.workers[w_idx] = self.workers_dict[new_worker.worker_code]
log.info(f"mutate_update_worker: success, worker is updated with new profile: {new_worker}")
def mutate_create_appointment(self, new_job: Appointment, auto_replay=True):
# I assume it is Unplanned for now, 2020-04-24 07:22:53.
# No replay
if new_job.job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug(f"debug appt {kandbox_config.DEBUGGING_JOB_CODE_SET}")
if new_job.job_code in self.jobs_dict.keys():
log.warn(
f"APPT:{new_job.job_code}: mutate_create_appointment, job already existed",
)
# raise ValueError("duplicate job code '{0}' found".format(new_job.job_code))
return
# new_job.job_index = job_index
self.jobs.append(new_job)
job_index = len(self.jobs) - 1
new_job.job_index = job_index
self.jobs_dict[new_job.job_code] = new_job
if new_job.included_job_codes is None:
new_job.included_job_codes = []
# First return occupied slots from included jobs
for existing_job_code in new_job.flex_form_data["included_scheduled_visit_ids"].split(";"):
# remove the job from time slots
# step current appointment.
stripped_job_code = existing_job_code.strip()
try:
v = self.jobs_dict[stripped_job_code]
except KeyError:
print(
f"_get_appointment_form_dict: at least one visit is not found job_code={stripped_job_code.strip()}, ",
new_job.flex_form_data["all_verified"],
)
# new_job.flex_form_data["all_verified"] = False
return False
if stripped_job_code not in new_job.included_job_codes:
new_job.included_job_codes.append(stripped_job_code)
self.jobs_dict[stripped_job_code].is_active = False
if self.jobs_dict[stripped_job_code].planning_status == JobPlanningStatus.UNPLANNED:
log.warn(
f"APPT:{new_job.job_code}:JOB:{stripped_job_code}: U status job () is included in appointment (). It is simply applied without modifying job."
) # log.warn
continue
# Then apply this appointment action
# if False:
# if not kandbox_config.DEBUG_ENABLE_APPOINTMENT_REPLAY:
# return
if auto_replay:
release_success, info = self.slot_server.release_job_time_slots(
self.jobs_dict[stripped_job_code]
)
if not release_success:
log.warn(
f"Error releasing job (code={stripped_job_code}) inside the appointment ({new_job.job_code}), but I will continue appointment replay ..."
)
# return False
self.mutate_replay_appointment(job=new_job)
self.redis_conn.hmset(
"{}{}".format(kandbox_config.APPOINTMENT_ON_REDIS_KEY_PREFIX, new_job.job_code),
{"rec_round": 0, "planning_status": "PENDING", "team_id": self.config["team_id"]},
)
def mutate_replay_appointment(self, job: Appointment):
if (job.scheduled_start_minutes < self.get_env_planning_horizon_start_minutes()) or (
job.scheduled_start_minutes > self.get_env_planning_horizon_end_minutes()
):
log.warn(
f"Start time of appointment ({job.job_code}={job.scheduled_start_minutes}) is out of planning window, skippped replaying"
)
return False
action_dict = self.gen_action_dict_from_appointment(job)
self.mutate_update_job_by_action_dict(a_dict=action_dict, post_changes_flag=False)
return True
# TODO, Third, trigger the heuristic search for recommendations
# There is no update_appointment. This should be done by API->commit
# def mutate_update_appointment(self, new_job):
# Deprecated 2020-12-16 20:57:17
# Reason: I keep original appt code for multiple times re-scheduling
def mutate_update_job_code__TODEL(self, old_job_code: str, new_job_code: str):
# I assume it is Unplanned for now, 2020-04-24 07:22:53.
# No replay
if old_job_code not in self.jobs_dict.keys():
# raise ValueError("Job code '{0}' not found".format(old_job_code))
log.error("Job code '{0}' not found".format(old_job_code))
return
if self.jobs_dict[old_job_code].job_type != JobType.APPOINTMENT:
log.error(
"ERROR: ONLY appointment job code can be changed, but {} is {} ".format(
old_job_code, self.jobs_dict[old_job_code].job_type
)
)
old_job = self.jobs_dict[old_job_code]
old_job.job_code = new_job_code
del self.jobs_dict[old_job_code]
self.jobs_dict[new_job_code] = old_job
# location in self.jobs remains untouched.
# TODO, send to kafka.
log.warn(
f"mutate_update_job_code:APPT:{old_job_code}:NEW_APPT:{new_job_code}: appointment code to updated to new one by {self.env_inst_code}. No more recommendations to any of them."
)
def mutate_delete_appointment(self, job_code):
# I assume it is Unplanned for now, 2020-04-24 07:22:53.
# No replay
if job_code not in self.jobs_dict.keys():
log.error("mutate_delete_appointment: appt does not exist: ", job_code)
return
self.jobs_dict[job_code].is_active = False
job_index = self.jobs_dict[job_code].job_index
curr_appt = self.jobs_dict[job_code]
release_success_flag, info = self.slot_server.release_job_time_slots(job=curr_appt)
if not release_success_flag:
log.warn(
f"Error whiling tring to release existing appoitnment slot, {curr_appt.job_code}"
)
# Restore job status after removing covering appointment.
for included_job_code in curr_appt.included_job_codes:
self.jobs_dict[included_job_code].is_active = True
if self.jobs_dict[included_job_code].planning_status != JobPlanningStatus.UNPLANNED:
self.mutate_replay_job(job=self.jobs_dict[included_job_code])
self.jobs_dict[job_code] = None
del self.jobs_dict[job_code]
try:
if self.jobs[job_index].job_code != job_code:
log.error(f"mutate_delete_appointment: job code mismatch {job_code}")
else:
del self.jobs[job_index]
except:
log.error(
f"mutate_delete_appointment:exception: job code mismatch job_code={job_code}, job_index = {job_index}, "
)
# Remove appt from redis recommendation frontend
self.redis_conn.delete(
"{}{}".format(kandbox_config.APPOINTMENT_ON_REDIS_KEY_PREFIX, job_code)
)
# Remove recommendations from redis
for rec_code in self.redis_conn.scan_iter(
f"{self.get_recommendation_job_key(job_code=job_code, action_day=-1)}*"
):
self.redis_conn.delete(rec_code)
log.info(f"APPT:{job_code} is purged out of env {self.env_inst_code}")
def mutate_create_worker_absence(self, new_job: Absence, auto_replay=True):
if new_job.job_code in self.jobs_dict.keys():
print("add_job: error, job already existed: ", new_job.job_code)
return
# raise ValueError("duplicate job code '{0}' found".format(new_job.job_code))
# new_job.job_index = job_index
self.jobs.append(new_job)
job_index = len(self.jobs) - 1
new_job.job_index = job_index
self.jobs_dict[new_job.job_code] = new_job
if auto_replay:
self.mutate_replay_worker_absence(job=new_job)
log.debug(
f"ABSENCE:{new_job.job_code}: the absence is created/added into env.",
)
def mutate_replay_worker_absence(self, job: Absence):
if (job.scheduled_start_minutes < self.get_env_planning_horizon_start_minutes()) or (
job.scheduled_start_minutes > self.get_env_planning_horizon_end_minutes()
):
log.warn(
f"Start time of absence ({job.job_code}={job.scheduled_start_minutes}) is out of planning window, skippped replaying"
)
return False
action_dict = self.gen_action_dict_from_worker_absence(job)
self.mutate_update_job_by_action_dict(a_dict=action_dict, post_changes_flag=False)
return True
def mutate_create_job(self, new_job, auto_replay=True):
# I assume it is Unplanned for now, 2020-04-24 07:22:53.
# No replay
if new_job.job_code in self.jobs_dict.keys():
log.error("add_job: error, job already existed: ", new_job.job_code)
return
# raise ValueError("duplicate job code '{0}' found".format(new_job.job_code))
# new_job.job_index = job_index
else:
self.jobs.append(new_job)
new_job.job_index = len(self.jobs) - 1
self.jobs_dict[new_job.job_code] = new_job
# Then apply this appointment action
if auto_replay:
if new_job.planning_status != JobPlanningStatus.UNPLANNED:
self.mutate_replay_job(job=new_job)
log.info(f"mutate_create_job: successfully added job, code = {new_job.job_code}")
def mutate_update_job_metadata(self, new_job):
# I assume it is Unplanned for now, 2020-04-24 07:22:53.
# No replay
if new_job.job_code not in self.jobs_dict.keys():
log.error("add_job: error, job already existed: ", new_job.job_code)
return
curr_job = self.jobs_dict[new_job.job_code]
# Keep the scheduling information intact as before
new_job.scheduled_worker_codes = curr_job.scheduled_worker_codes
new_job.scheduled_start_minutes = curr_job.scheduled_start_minutes
new_job.scheduled_duration_minutes = curr_job.scheduled_duration_minutes
curr_job_index = curr_job.job_index
if self.jobs[curr_job_index].job_code != new_job.job_code:
log.warn(f"mismatched job_index while adding job {new_job.job_code}")
# return
self.jobs[curr_job_index] = new_job
self.jobs_dict[new_job.job_code] = new_job
log.info(
f"Successfully updated job metadata, no schedulling action done. code = {new_job.job_code}"
)
# Then I skip appling scheduling action
def mutate_replay_job(self, job: Appointment):
if (job.scheduled_start_minutes < self.get_env_planning_horizon_start_minutes()) or (
job.scheduled_start_minutes > self.get_env_planning_horizon_end_minutes()
):
log.info(
f"Start time of Job ({job.job_code}) is {job.scheduled_start_minutes}), out of planning window ({self.get_env_planning_horizon_start_minutes()}->{self.get_env_planning_horizon_end_minutes()}), skipped replaying"
)
return False
if job.planning_status == JobPlanningStatus.UNPLANNED:
log.info(f"U-Status Job ({job.job_code}) is skipped from replaying.")
return True
action_dict = self.gen_action_dict_from_job(job, is_forced_action=True)
self.mutate_update_job_by_action_dict(a_dict=action_dict, post_changes_flag=False)
return True
def mutate_complete_job(self, job_code):
# Mark a job as completed and optionally remove it.
if job_code not in self.jobs_dict.keys():
log.error("mutate_complete_job: error, job not exist: ", job_code)
return
curr_job = self.jobs_dict[job_code]
# Keep the scheduling information intact as before
release_success_flag, info = self.slot_server.release_job_time_slots(job=curr_job)
if (not release_success_flag):
log.warning(
f"Error in release_job_time_slots, {curr_job.job_code}, error = {str(info)}")
return False
curr_job.planning_status = JobPlanningStatus.COMPLETED
self.jobs_dict[job_code] = curr_job
log.debug(
f"Successfully mutate_complete_job. code = { job_code}"
)
def mutate_update_job_by_action_dict(
self, a_dict: ActionDict, post_changes_flag: bool = False
) -> SingleJobCommitInternalOutput:
if a_dict.job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
print(f"pause for {a_dict.job_code}")
if post_changes_flag:
if PLANNER_SERVER_ROLE == "recommendation":
log.warn(f"recommender should not post changes, setting to false")
# post_changes_flag = False
# TODO If there is not enough capacities without considering travel and time slot, reject it
if a_dict.job_code not in self.jobs_dict.keys():
log.error("add_job: error, job already existed: ", a_dict.job_code)
return
# duplicated check with above.
# if len(self.jobs_dict) < 1:
# log.error("No jobs to mutate")
# return
if self.current_job_i >= len(self.jobs):
self.current_job_i = 0
# if self.jobs[self.current_job_i].job_code != a_dict.job_code:
# self.current_job_i = self.jobs_dict[a_dict.job_code].job_index
# log.warn(
# f"job_code = { a_dict.job_code} missmatching. self.jobs[self.current_job_i].job_code != a_dict.job_code"
# )
curr_job = self.jobs_dict[a_dict.job_code]
# TODO, verify that this is not duplicated action by itself. 2020-10-30 17:25:51
job_commit_output = SingleJobCommitInternalOutput(
status_code=ActionScoringResultType.ERROR,
messages=[],
nbr_changed_jobs=0,
new_job_code="",
)
if a_dict.scheduled_duration_minutes < 1:
log.warn(
f"action is rejected because scheduled_duration_minutes < 1, action = {a_dict}"
)
job_commit_output.messages.append(
f"action is rejected because scheduled_duration_minutes < 1"
)
return job_commit_output
if curr_job.planning_status in {JobPlanningStatus.PLANNED}:
if (curr_job.job_type == JobType.JOB) and (not a_dict.is_forced_action):
job_commit_output.messages.append(
f"Failed to change job planning because planning_status == PLANNED and not is_forced_action"
)
return job_commit_output
if (curr_job.planning_status == JobPlanningStatus.UNPLANNED) & (
a_dict.action_type == ActionType.UNPLAN
):
job_commit_output.messages.append(
f"Job is already unplanned, and can not be unplanned."
)
job_commit_output.status_code = ActionScoringResultType.OK
return job_commit_output
if curr_job.planning_status in {
JobPlanningStatus.IN_PLANNING,
JobPlanningStatus.PLANNED, # Here planned is only for replay
}:
if (
(a_dict.action_type != ActionType.UNPLAN)
& (a_dict.scheduled_start_minutes == curr_job.scheduled_start_minutes)
& (a_dict.scheduled_worker_codes == curr_job.scheduled_worker_codes)
& (not a_dict.is_forced_action)
):
job_commit_output.messages.append(
f"Job is in-planning, the new scheduled_start_minutes, scheduled_worker_codes are same."
)
job_commit_output.status_code = ActionScoringResultType.OK
return job_commit_output
if not curr_job.is_replayed:
log.warn(f"job ({curr_job.job_code}) is not replayed for releasing?")
# Now we first release previous slot
release_success_flag, info = self.slot_server.release_job_time_slots(job=curr_job)
if (not release_success_flag) & curr_job.is_replayed & (not a_dict.is_forced_action):
log.warn(f"Error in release_job_time_slots, {curr_job.job_code}")
job_commit_output.messages.append(
f"Failed to release job slot for {curr_job.job_code}, release_success_flag = false, error = {str(info)}"
)
return job_commit_output
if a_dict.action_type == ActionType.UNPLAN:
curr_job.planning_status = JobPlanningStatus.UNPLANNED
if self.run_mode != EnvRunModeType.REPLAY:
job_commit_output.nbr_changed_jobs += 1
curr_job.is_changed = True
self.changed_job_codes_set.add(curr_job.job_code)
if post_changes_flag:
self.commit_changed_jobs(changed_job_codes_set={curr_job.job_code})
job_commit_output.status_code = ActionScoringResultType.OK
log.info(f"JOB:{curr_job.job_code}: job is unplanned successfully. ")
return job_commit_output
else:
log.info(
f"JOB:{curr_job.job_code}: job is unplanned temporary by releasing slot for inplan again. release_success_flag = {release_success_flag}"
)
# a normal job duration is 200 minutes. The jobs before 200 minute will not impact current planning window
# TODO
if (
a_dict.scheduled_start_minutes < self.get_env_planning_horizon_start_minutes() - 200
) or (a_dict.scheduled_start_minutes > self.get_env_planning_horizon_end_minutes()):
log.warn(
f"action is rejected because scheduled_start_minutes is out of planning window, action = {a_dict}"
)
job_commit_output.messages.append(
f"action is rejected because scheduled_start_minutes is out of planning window"
)
return job_commit_output
if not a_dict.is_forced_action:
rule_check_info = self._check_action_on_rule_set(a_dict)
if rule_check_info.status_code != ActionScoringResultType.OK:
job_commit_output.messages.append(rule_check_info)
return job_commit_output # False, rule_check_info # {'message':'ok'}
success_flag, info = self.slot_server.cut_off_time_slots(action_dict=a_dict)
if not success_flag:
job_commit_output.messages.append(info)
if a_dict.is_forced_action:
job_commit_output.status_code = ActionScoringResultType.WARNING
log.error(f"failed to cut_off_time_slots for replaying job = {a_dict.job_code} ")
# TODO, send to kafka for inspection
else:
job_commit_output.status_code = ActionScoringResultType.ERROR
# self._move_to_next_unplanned_job()
return job_commit_output
else:
curr_job.is_replayed = True
if self.jobs_dict[a_dict.job_code].job_type == JobType.ABSENCE:
# Do not need to update current job
return job_commit_output
#
# Now update the job schedule status.
#
nbr_changed_jobs = 0
if curr_job.job_type == JobType.APPOINTMENT:
# Align up all jobs included inside the current apointment
curr_start_minutes = curr_job.scheduled_start_minutes
for _j_code in curr_job.included_job_codes:
# APPT_Required==1 will make IN_PLANNING same as PLANNED
self.jobs_dict[_j_code].planning_status = JobPlanningStatus.IN_PLANNING
self.jobs_dict[_j_code].scheduled_start_minutes = curr_start_minutes
self.jobs_dict[_j_code].scheduled_worker_codes = a_dict.scheduled_worker_codes
self.jobs_dict[
_j_code
].scheduled_duration_minutes = self.get_encode_shared_duration_by_planning_efficiency_factor(
requested_duration_minutes=self.jobs_dict[_j_code].requested_duration_minutes,
nbr_workers=len(a_dict.scheduled_worker_codes),
)
self.jobs_dict[_j_code].is_active = False
self.jobs_dict[_j_code].is_appointment_confirmed = True
self.jobs_dict[_j_code].is_changed = True
# It might be set in next curr_job.is_changed = True
curr_start_minutes += self.jobs_dict[_j_code].scheduled_duration_minutes
# Do not post individual jobs belonging to appt, as it will be posted by appt
#
# Now update the job with other 3 information: Tech, start, duration.
#
if curr_job.planning_status == JobPlanningStatus.UNPLANNED:
curr_job.planning_status = JobPlanningStatus.IN_PLANNING
curr_job.scheduled_worker_codes = a_dict.scheduled_worker_codes
curr_job.scheduled_start_minutes = a_dict.scheduled_start_minutes
# could be accross several days.
curr_job.scheduled_duration_minutes = a_dict.scheduled_duration_minutes
# Here I decide to publish the change or not
if self.run_mode != EnvRunModeType.REPLAY: # and (not a_dict.is_forced_action)
if curr_job.planning_status == JobPlanningStatus.UNPLANNED:
# self.expected_travel_time_so_far += self.job_travel_time_sample_list_static[
# self.current_job_i
# ]
self.nbr_inplanning += 1
curr_job.is_changed = True
self.changed_job_codes_set.add(curr_job.job_code)
nbr_changed_jobs = len(self.changed_job_codes_set)
# It might change multiple jobs
if post_changes_flag:
self.commit_changed_jobs(changed_job_codes_set={curr_job.job_code})
else:
if post_changes_flag:
log.error(f"Not allowed to post_changes_flag for EnvRunModeType.REPLAY")
job_commit_output.status_code = ActionScoringResultType.OK
job_commit_output.nbr_changed_jobs = nbr_changed_jobs
log.debug(
f"JOB:{curr_job.job_code}:WORKER:{a_dict.scheduled_worker_codes}:START_MINUTES:{a_dict.scheduled_start_minutes}: job is commited successfully. "
)
return job_commit_output
def commit_changed_jobs(self, changed_job_codes_set=set()):
# for job in jobs:
changed_jobs = []
changed_job_codes = []
self.changed_job_codes_set = changed_job_codes_set
for job_code in self.changed_job_codes_set:
job = self.jobs_dict[job_code]
if job.is_changed:
a_job = self.decode_single_job_to_solution(job)
changed_jobs.append(a_job)
# changed_job_codes.append(job.job_code)
else:
log.error("ERROR: tracked but job.is_changed == False")
self.kp_data_adapter.save_changed_jobs(changed_jobs=changed_jobs)
new_offset = self.post_changed_jobs(changed_job_codes_set=changed_job_codes_set)
self.kp_data_adapter.set_team_env_window_latest_offset(offset=new_offset)
# Reset all tracked changes.
for job_code in self.changed_job_codes_set:
self.jobs_dict[job_code].is_changed = False
self.changed_job_codes_set = set()
def post_changed_jobs(self, changed_job_codes_set):
"""This is used by ENV to post changed job to external"""
jobs = []
for j in changed_job_codes_set:
job = self.jobs_dict[j]
if job.scheduled_duration_minutes > job.requested_duration_minutes:
log.error(
f"job.scheduled_duration_minutes>job.requested_duration_minutes, Error detected, post_changed_jobs skipped job_code = {job.job_code}, {job.scheduled_duration_minutes} > {job.requested_duration_minutes}"
)
continue
new_job = copy.copy(job)
new_job.location = JobLocationBase(*new_job.location[0:4])
jobs.append(new_job)
appt_included_jobs = []
for job in jobs:
if job.job_type == JobType.APPOINTMENT:
# Loop through included jobs and re-arrange start time for them
curr_start_minutes = job.scheduled_start_minutes
for included_job_code in job.included_job_codes:
job_1 = copy.copy(self.jobs_dict[included_job_code])
job_1.location = JobLocationBase(*job_1.location[0:4])
job_1.scheduled_start_minutes = curr_start_minutes
curr_start_minutes += job_1.scheduled_duration_minutes
appt_included_jobs.append(job_1)
self.kafka_server.post_env_out_message(
message_type=KafkaMessageType.POST_CHANGED_JOBS, payload=jobs + appt_included_jobs
)
# return offset in internal kafka window
return self.kafka_server.post_env_message(
message_type=KafkaMessageType.REFRESH_JOB, payload=jobs + appt_included_jobs
)
# ***************************************************************
# # Gym functions
# ***************************************************************
def step_by_action_dict(self, action_dict): # -> (obs, ..., SingleJobCommitInternalOutput)
# action [1, 0, 0 ]. One hot coding, 1 means this worker take the job.
if self.run_mode == EnvRunModeType.REPLAY:
action_dict.is_forced_action = True
result_info_obj = self.mutate_update_job_by_action_dict(a_dict=action_dict)
result_info = dataclasses.asdict(result_info_obj)
if result_info["status_code"] == ActionScoringResultType.OK:
reward = 0.11 # self._get_reward() # 1
self.inplanning_job_count += 1
else:
reward = -1
if self.run_mode == EnvRunModeType.REPLAY:
self.current_job_i += 1
has_next = self.current_job_i < len(self.jobs)
else:
has_next = self._move_to_next_unplanned_job()
if has_next:
done = False
else:
done = True
reward = 5
# print("Adding job {}--{} to Worker {}-{} , result: {}...".format(self.current_job_i - 1, self.jobs[self.current_job_i - 1]['job_code'],action['worker_code'],action['start_minutes'],add_result ))
if self.run_mode == EnvRunModeType.REPLAY:
return ([], 1, done, result_info)
# print(f"Env Error: trail={self.trial_count}, step={self.trial_step_count}")
# if self.trial_step_count > len(self.jobs) * 0.3:
# done = True
# reward = -5
# if self.trial_count % 1 == 0:
# # print("No more unplanned jobs, done. internal error? It should be handled by has_next!") )
# log.info(
# f"Env done with Failure: travel_time={self.total_travel_time:.2f}, trial={self.trial_count}, trial_step_count={self.trial_step_count}, nbr_inplanning={self.nbr_inplanning} ... "
# )
self.nbr_inplanning = sum(
[
1 if self.jobs_dict[j_i].planning_status != "U" else 0
for j_i in self.jobs_dict.keys()
]
)
if self.nbr_inplanning >= len(self.jobs):
overall_score = self.get_planner_score_stats()["overall_score"]
done = True
reward = 4 * overall_score
# if (done == True) & (self.trial_count % 1 == 0): # trial_count
# # print("No more unplanned jobs, done. internal error? It should be handled by has_next!") )
# # self.get_planner_score_stats()["overall_score"]
# log.info(
# f"Env done with Success: self.inplanning_job_count = {self.inplanning_job_count}, trial_step_count = {self.trial_step_count}, reward={reward:.2f}, travel_time={self.total_travel_time:.2f}, nbr_inplanning={self.nbr_inplanning}, travel_worker_job_count = {self.total_travel_worker_job_count}, trial={self.trial_count}"
# )
obs = self._get_observation()
if ((len(obs["jobs"]) < 1) or (len(obs["slots"]) < 1)) and (not done):
log.warn("No jobs or slots to observe, done?")
done = True
return (obs, reward, done, result_info)
def step_recommend_appt(self, action_dict):
curr_job = copy.deepcopy(self.jobs_dict[action_dict.job_code])
action_day = int(action_dict.scheduled_start_minutes / 1440)
curr_job.requested_start_min_minutes = action_day * 1440
curr_job.requested_start_max_minutes = (action_day + 1) * 1440
action_dict_list = self.recommendation_server.search_action_dict_on_worker_day(
a_worker_code_list=action_dict.scheduled_worker_codes,
curr_job=curr_job,
max_number_of_matching=3,
)
if len(action_dict_list) < 1:
reward = -0.1
else:
reward = action_dict_list[0].score + 0.5
self.appt_scores[action_dict.job_code] = reward
info = {"message": f"found {len(action_dict_list)} recommendations"}
done = True
for p_i in range(len(self.appts)):
self.current_appt_i += 1
if self.current_appt_i >= len(self.appts):
self.current_appt_i = 0
if self.appts[self.current_appt_i] not in self.appt_scores.keys():
done = False
n_job_code = self.appts[self.current_appt_i]
self.current_job_i = self.jobs_dict[n_job_code].job_index
if self.jobs[self.current_job_i].job_code != n_job_code:
log.error(
f"n_job_code = {n_job_code}, self.jobs[self.current_job_i].job_code = {self.jobs[self.current_job_i].job_code}"
)
break
if done:
self.current_job_i = 0
obs = self._get_observation()
if self.trial_step_count > self.config["max_trial_step_count"]:
done = True
reward = -5
if self.trial_count % 20 == 0:
print(
f"Env done as Failed: travel_time={self.total_travel_time:.2f}, trial={self.trial_count}, step={self.trial_step_count}, nbr_inplanning={self.nbr_inplanning} ... "
)
# if len(self.appt_scores) >= len(self.appts):
# done = True
# reward += 3
# if self.trial_count % 20 == 0:
# # print("No more unplanned jobs, done. internal error? It should be handled by has_next!") )
# log.info(
# f"Env done as Success: trial={self.trial_count}, step={self.trial_step_count}, reward={reward:.2f}, travel_time={self.total_travel_time:.2f}, nbr_inplanning={self.nbr_inplanning}, travel_worker_job_count = {self.total_travel_worker_job_count}"
# )
def _calc_travel_minutes_difference(
self, shared_time_slots_optimized, arranged_slots, current_job_code
):
new_travel_minutes_difference = 0
for a_slot, arranged_slot in zip(shared_time_slots_optimized, arranged_slots):
new_job_code_list = [s[1] for s in arranged_slot]
(
prev_travel,
next_travel,
inside_travel,
) = self.get_travel_time_jobs_in_slot(a_slot, new_job_code_list)
new_travel_minutes = prev_travel + next_travel + sum(inside_travel)
new_job_code_list.remove(current_job_code)
(
prev_travel,
next_travel,
inside_travel,
) = self.get_travel_time_jobs_in_slot(a_slot, new_job_code_list)
original_travel_minutes = prev_travel + next_travel + sum(inside_travel)
new_travel_minutes_difference += new_travel_minutes - original_travel_minutes
return (100 - new_travel_minutes_difference) / 100
def step_recommend_and_plan_job(self, action_dict):
curr_job = copy.deepcopy(self.jobs_dict[action_dict.job_code])
action_day = int(action_dict.scheduled_start_minutes / 1440)
curr_job.requested_start_min_minutes = (
action_day * 1440 + self.get_env_planning_horizon_start_minutes()
)
curr_job.requested_start_max_minutes = (
action_day + 1
) * 1440 + self.get_env_planning_horizon_start_minutes()
action_dict_list = self.recommendation_server.search_action_dict_on_worker_day(
a_worker_code_list=action_dict.scheduled_worker_codes,
curr_job=curr_job,
max_number_of_matching=3,
)
if curr_job.job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug(f"step_recommend_and_plan_job curr_job.job_code = {curr_job.job_code}")
if len(action_dict_list) >= 1:
selected_action = action_dict_list[0].to_action_dict(self)
return self.step_by_action_dict(action_dict=selected_action)
# else:
reward = -1
result_info = {"message": f"found no action in the given worker+day"}
done = not self._move_to_next_unplanned_job()
obs = self._get_observation()
return (obs, reward, done, result_info)
def step(self, action):
self.trial_step_count += 1
if len(self.internal_obs_slot_list) < 1:
log.error("len(self.internal_obs_slot_list) < 1, not ready... Reset not called yet?")
return self._get_observation(), -1, False, {}
# action [1, 0, 0 ]. One hot coding, 1 means this worker take the job.
new_act = action.copy()
max_i = np.argmax(new_act[0: len(self.internal_obs_slot_list)]) #
if max_i >= len(self.internal_obs_slot_list):
log.error("Wrong")
temp_slot = copy.deepcopy(self.internal_obs_slot_list[max_i])
temp_slot.assigned_job_codes.append(self.jobs[self.current_job_i].job_code)
shared_time_slots_optimized = [temp_slot]
obs, reward, done, info = self.step_naive_search_and_plan_job(shared_time_slots_optimized)
if (not done):
if (self.trial_step_count > self.config["max_trial_step_count"]):
done = True
reward = -5
msg = f"Env done as Failure: inplanning = {self.inplanning_job_count}, trial_step_count = {self.trial_step_count}, reward={reward:.2f}, travel_time={self.total_travel_time:.2f}, nbr_inplanning={self.nbr_inplanning}, travel_worker_job_count = {self.total_travel_worker_job_count}, trial={self.trial_count}, when steps reaches maximum"
info = {
"message": msg
}
log.info(msg)
else:
if (self.trial_count % 1 == 0):
log.info(
f"Env done as Success: inplanning = {self.inplanning_job_count}, trial_step_count = {self.trial_step_count}, reward={reward:.2f}, travel_time={self.total_travel_time:.2f}, nbr_inplanning={self.nbr_inplanning}, travel_worker_job_count = {self.total_travel_worker_job_count}, trial={self.trial_count}, good"
)
return obs, reward, done, info
# try:
# action_dict = self.decode_action_into_dict_native(action)
# if self.run_mode == EnvRunModeType.REPLAY:
# log.error(f"No more replays")
# action_dict.is_forced_action = True
# else:
# action_dict.is_forced_action = False
# except LookupError:
# print("LookupError: failed to parse action")
# # add_result = False
# info = {"message": f"LookupError: failed to parse action into acction_dict {action}"}
# obs = self._get_observation()
# if self.trial_step_count > len(self.appts) * 2:
# done = True
# else:
# done = False
# reward = -1
# return (obs, reward, done, info)
# obs, reward, done, info = self.step_recommend_and_plan_job(action_dict)
def pprint_all_slots(self):
for k in sorted(self.slot_server.time_slot_dict.keys()):
job_list = self.slot_server.time_slot_dict[k].assigned_job_codes
(
prev_travel,
next_travel,
inside_travel,
) = self.get_travel_time_jobs_in_slot(self.slot_server.time_slot_dict[k], job_list)
print(k, self.slot_server.time_slot_dict[k].start_location[0:2], (
prev_travel,
next_travel,
inside_travel,
))
print(self.slot_server.time_slot_dict[k].start_minutes, [
(self.jobs_dict[s].scheduled_start_minutes, s) for s in job_list])
def step_naive_search_and_plan_job(self, shared_time_slots_optimized):
job_code = self.jobs[self.current_job_i].job_code
res = self.naive_opti_slot.dispatch_jobs_in_slots(shared_time_slots_optimized)
if res["status"] == OptimizerSolutionStatus.SUCCESS:
selected_action = res["changed_action_dict_by_job_code"][job_code]
obs, reward, done, result_info = self.step_by_action_dict(action_dict=selected_action)
if reward == 0.11:
travel_difference_score = self._calc_travel_minutes_difference(
shared_time_slots_optimized, res["slots"], job_code
)
reward += travel_difference_score
reward = reward * (self.inplanning_job_count / len(self.jobs))
return obs, reward, done, result_info
# else:
reward = -1.5
# reward = -6
result_info = {"message": f"found no action in the specified slots"}
done = not self._move_to_next_unplanned_job()
# done = True
# if (self.trial_count % 1 == 0):
# log.info(
# f"Env done with Failure: self.inplanning_job_count = {self.inplanning_job_count}, trial_step_count = {self.trial_step_count}, reward={reward:.2f}, nbr_inplanning={self.nbr_inplanning}, trial={self.trial_count}"
# )
obs = self._get_observation()
return (obs, reward, done, result_info)
def step_future_for_job_actions_TODO(self, action):
# action [1, 0, 0 ]. One hot coding, 1 means this worker take the job.
self.trial_step_count += 1
try:
a_dict = self.decode_action_into_dict(action)
if self.run_mode == EnvRunModeType.REPLAY:
a_dict.is_forced_action = True
else:
a_dict.is_forced_action = False
except LookupError:
log.debug("LookupError: failed to parse action")
# add_result = False
info = {"message": f"LookupError: failed to parse action into acction_dict {action}"}
obs = self._get_observation()
if self.trial_step_count > len(self.jobs) * 2:
done = True
else:
done = False
reward = -1
return (obs, reward, done, info)
obs, reward, done, info = self.step_by_action_dict(action_dict=a_dict)
return obs, reward, done, info
def render(self, mode="human"):
pass
def close(self):
pass
def get_travel_time_jobs_in_slot(self, slot, all_jobs):
prev_time = 0
next_time = 0
inside_time = []
if len(all_jobs) < 1:
return 0, 0, []
if slot.start_location[2] == LocationType.JOB:
prev_time = self.travel_router.get_travel_minutes_2locations(
[
self.jobs_dict[all_jobs[0]].location.geo_longitude,
self.jobs_dict[all_jobs[0]].location.geo_latitude,
],
[slot.start_location[0], slot.start_location[1]],
)
if slot.end_location[2] == LocationType.JOB:
next_time = self.travel_router.get_travel_minutes_2locations(
[
self.jobs_dict[all_jobs[-1]].location.geo_longitude,
self.jobs_dict[all_jobs[-1]].location.geo_latitude,
],
[slot.start_location[0], slot.start_location[1]],
)
for job_i in range(len(all_jobs) - 1):
inside_time.append(
self.travel_router.get_travel_minutes_2locations(
[
self.jobs_dict[all_jobs[job_i]].location.geo_longitude,
self.jobs_dict[all_jobs[job_i]].location.geo_latitude,
],
[
self.jobs_dict[all_jobs[job_i + 1]].location.geo_longitude,
self.jobs_dict[all_jobs[job_i + 1]].location.geo_latitude,
],
)
)
return prev_time, next_time, inside_time
def _get_travel_time_2_job_indices(self, job_index_1, job_index_2):
job_1 = self.jobs[job_index_1]
job_2 = self.jobs[job_index_2]
return self._get_travel_time_2jobs(job_1, job_2
)
def _get_travel_time_2jobs(self, job_1, job_2):
return self.travel_router.get_travel_minutes_2locations(
[
job_1.location.geo_longitude,
job_1.location.geo_latitude,
],
[
job_2.location.geo_longitude,
job_2.location.geo_latitude,
],
)
def set_searching_worker_candidates(self, final_job: BaseJob):
if final_job.job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug("final_job.job_code in kandbox_config.DEBUGGING_JOB_CODE_SET?")
# if type(final_job.requested_primary_worker_code) == type(["a"]):
# log.debug("WHY?")
# because rule checkers are mutating job, I copy one here.
curr_job = copy.deepcopy(final_job)
# orig_scheduled_worker_codes = curr_job.scheduled_worker_codes
shared_worker_count = set()
if curr_job.planning_status != JobPlanningStatus.UNPLANNED:
shared_worker_count.add(len(curr_job.scheduled_worker_codes))
# scheduled_duration_minutes = job.requested_duration_minutes
min_number_of_workers = max_number_of_workers = 1
try:
min_number_of_workers = int(curr_job.flex_form_data["min_number_of_workers"])
max_number_of_workers = int(curr_job.flex_form_data["max_number_of_workers"])
except:
log.error(
f"job {final_job.job_code} has no min_number_of_workers or max_number_of_workers and we assumed as 1")
pass
for nbr_worker in range(
min_number_of_workers,
max_number_of_workers + 1,
):
shared_worker_count.add(nbr_worker)
all_qualified_worker_codes = set()
# evaluate_DateTimeTolerance = KandboxRuleToleranceRule()
for worker_id in self.workers_dict.keys():
is_valid = True
for a_rule in self.rule_set_worker_check:
curr_job.scheduled_worker_codes = [worker_id]
check_result = a_rule.evalute_normal_single_worker_n_job(self, job=curr_job)
if check_result.score == -1:
is_valid = False
break
if is_valid:
all_qualified_worker_codes.add(worker_id)
# KandboxRulePluginWithinWorkingHour()
# KandboxRulePluginSufficientTravelTime
# KandboxRulePluginLunchBreak
# TODO, permenant pair & secondary only techs. @xingtong
candidate_dict = {}
# differen tiers of scoring
SCORE_Primary = 1000_000
SCORE_Permenant_Pair = 10_000 # It has priority if tech is permanently paired.
SCORE_History = 9_000
SCORE_Solo = 1_000
SCORE_Share = {2: 100, 3: 80, 4: 60, 5: 30}
for k, v in curr_job.location.historical_serving_worker_distribution.items():
one_candidate_tuple = tuple(k.split(";"))
new_score = v
is_valid = True
for w_code in one_candidate_tuple:
if w_code not in all_qualified_worker_codes:
# Though this worker served in history, he is no longer qualified for this job
is_valid = False
break
if w_code == curr_job.requested_primary_worker_code:
new_score += SCORE_Primary
else:
new_score = v + SCORE_History
if not is_valid:
continue
if self.workers_dict[one_candidate_tuple[0]].flex_form_data["is_assistant"]:
continue
# Here I exclude those historical combinations no longer valid anymore
if len(one_candidate_tuple) < min_number_of_workers:
continue
if len(one_candidate_tuple) > max_number_of_workers:
continue
# If 4 technicians served in the history, I also search for 4 tech combination
shared_worker_count.add(len(one_candidate_tuple))
candidate_dict[one_candidate_tuple] = new_score
# In case in the future, multiple requested worker?
curr_job.requested_primary_worker_code = copy.deepcopy(
final_job.requested_primary_worker_code
)
if min_number_of_workers <= 1:
for w_code in all_qualified_worker_codes:
if (not self.workers_dict[w_code].flex_form_data["is_assistant"]) and (
(w_code,) not in candidate_dict.keys()
):
candidate_dict[(w_code,)] = SCORE_Solo
# It may overrite historical setting. But primary will highest priority
try:
candidate_dict[(curr_job.requested_primary_worker_code,)] = SCORE_Primary
except:
log.debug("WHY?")
primary_worker_codes = (curr_job.requested_primary_worker_code,)
if primary_worker_codes not in candidate_dict.keys():
if min_number_of_workers <= 1:
log.debug(f"requested worker not added! {primary_worker_codes}")
candidate_dict[primary_worker_codes] = SCORE_Primary
if self.workers_dict[curr_job.requested_primary_worker_code].belongs_to_pair is not None:
primary_worker_codes = self.workers_dict[
curr_job.requested_primary_worker_code
].belongs_to_pair
if primary_worker_codes not in candidate_dict.keys():
log.debug(f"requested worker pair not added yet! {primary_worker_codes}")
candidate_dict[primary_worker_codes] = SCORE_Primary
qualified_secondary_worker_codes = sorted(
list(all_qualified_worker_codes - set(primary_worker_codes))
)
shared_worker_count_to_check = shared_worker_count - {1}
for share_count in shared_worker_count_to_check:
# Primary must be default when shared.
secondary_share_count = share_count - len(primary_worker_codes)
if secondary_share_count < 1:
continue
for combined_worker in list(
combinations(qualified_secondary_worker_codes, secondary_share_count)
):
new_combined = primary_worker_codes + combined_worker
if new_combined in self.permanent_pairs:
candidate_dict[new_combined] = SCORE_Permenant_Pair
else:
share_count_score_index = share_count if share_count <= 5 else 5
candidate_dict[new_combined] = SCORE_Share[share_count_score_index]
# if share_count > 2:
# log.info(f"{final_job.job_code}--> {new_combined}, share_count {share_count} > 2, be careful ...")
# # continue
# Finally I sort all candidates by their scores
# I take top 80 only for now.
final_job_workers_ranked = [
k
for k, v in sorted(
candidate_dict.items(),
key=lambda item: item[1],
reverse=True,
)
][0:80]
if len(candidate_dict) > 20:
log.debug(
f"{final_job.job_code}, candidates_length = {len(candidate_dict)}, be careful ..."
) # --> {candidate_dict}
final_job.searching_worker_candidates = final_job_workers_ranked
# [(curr_job.requested_primary_worker_code,)]
# curr_job.scheduled_worker_codes = orig_scheduled_worker_codes
return final_job_workers_ranked
def _get_sorted_worker_code_list(self, job_index):
# TODO
w_set = set()
if len(self.jobs) >= 1:
for w_list in self.jobs[self.current_job_i].searching_worker_candidates:
for w_code in w_list:
w_set.add(w_code)
return w_set
# return list(self.workers_dict.keys())[0 : self.config["nbr_observed_slots"]]
def _check_action_on_rule_set(
self, a_dict: ActionDict, unplanned_job_codes: List = []
) -> SingleJobDropCheckOutput:
if (a_dict.scheduled_start_minutes < self.get_env_planning_horizon_start_minutes()) or (
a_dict.scheduled_start_minutes > self.get_env_planning_horizon_end_minutes()
):
return SingleJobDropCheckOutput(
status_code=ActionScoringResultType.ERROR,
score=0,
travel_time=15,
messages=[
ActionEvaluationScore(
score=-1,
score_type="Planning Window",
message=f"scheduled_start_minutes {a_dict.scheduled_start_minutes} out of horizon ({self.get_env_planning_horizon_start_minutes()} - {self.get_env_planning_horizon_end_minutes()})",
metrics_detail={},
)
],
)
result_info = SingleJobDropCheckOutput(
status_code=ActionScoringResultType.OK,
score=0,
travel_time=15,
messages=[],
)
if len(unplanned_job_codes) > 0:
self.unplanned_job_codes = unplanned_job_codes
for rule in self.rule_set:
rule_checked = rule.evalute_action_normal(env=self, action_dict=a_dict)
rule_checked.score_type = rule.title
result_info.messages.append(rule_checked) # rule_checked_dict
if (rule_checked.score < 1) & (result_info.status_code == ActionScoringResultType.OK):
# Reduce from OK to Warning
result_info.status_code = ActionScoringResultType.WARNING
# Else it is already -1. keep it -1
if rule_checked.score == -1:
# Reduce overall result to ERROR, and stays at ERROR
result_info.status_code = ActionScoringResultType.ERROR
self.unplanned_job_codes = []
return result_info
def _move_to_next_unplanned_job(self):
self.current_job_i += 1
# if self.current_job_i >= len(self.jobs):
# return True
for step_i in range(len(self.jobs)):
if self.current_job_i >= len(self.jobs):
self.current_job_i = 0
if self.jobs[self.current_job_i].planning_status == JobPlanningStatus.UNPLANNED:
self.current_job_code = self.jobs[self.current_job_i].job_code
return True
self.current_job_i += 1
return False
def _move_to_next_not_replayed_job(self) -> str:
"""Find next not replay, but inplanning/planned job"""
self.current_job_i += 1
# if self.current_job_i >= len(self.jobs):
# return True
for step_i in range(len(self.jobs)):
if self.current_job_i >= len(self.jobs):
self.current_job_i = 0
if (self.jobs[self.current_job_i].planning_status != JobPlanningStatus.UNPLANNED) and (
not self.jobs[self.current_job_i].is_replayed
):
return self.jobs[self.current_job_i].job_code
self.current_job_i += 1
return None
def _get_reward(self):
# This only is based on inplanning
if self.nbr_inplanning == 0:
return 0
job_count = 0
inplanning_job_count = 0
for j in self.jobs:
if not j.is_active:
continue
job_count += 1
if j.planning_status != JobPlanningStatus.UNPLANNED:
inplanning_job_count += 1
if job_count <= 0:
return 0
reward = inplanning_job_count / job_count
return reward + 0.1
""" 2020-10-17 14:43:19 Not good.
travel_reward = (
(self.expected_travel_time_so_far / self.nbr_inplanning)
- (self.total_travel_time / self.total_travel_worker_job_count)
) / (self.expected_travel_time_so_far / self.nbr_inplanning)
"""
if travel_reward > 1: # Very unlikely
travel_reward = 1
if travel_reward < -0.5:
travel_reward = -0.5
travel_reward = 0
reward = (nbr_inplanning * 0.2 / len(self.jobs)) + travel_reward
if self.nbr_inplanning == len(self.jobs):
reward = reward + 2 # Additional points
def _get_observation(self):
# return self._get_observation_numerical()
return self._get_observation_slot_list_dict()
# return self._get_observation_slot_list_tuple()
# return self._get_observation_slot_list_tuple_toy()
try:
return self._get_observation_numerical()
except:
log.error("Error in _get_observation_numerical , returning empty observation {}")
return rl_env.action_space.sample()
# return {}
def _get_observation_numerical(self):
# agent_vector is current observation.
obs_dict = {}
if (len(self.jobs) < 1) | (len(self.workers) < 1):
log.error(
"Error, no workers or no jobs , returning empty observation (obs_dict = empty)"
)
# raise LookupError("Error, Env has no workers or no jobs , returning obs_dict = empty")
return obs_dict
self.config["NBR_FEATURE_PER_TECH"] = 19
# NBR_FEATURE_WORKER_ONLY = self.config['NBR_FEATURE_WORKER_ONLY']
# NBR_FEATURE_CUR_JOB_n_OVERALL = self.config['NBR_FEATURE_CUR_JOB_n_OVERALL']
# NBR_WORK_TIME_SLOT_PER_DAY = self.config['NBR_FEATURE_CUR_JOB_n_OVERALL'] # 1
self.current_observed_worker_list = self._get_sorted_worker_code_list(self.current_job_i)
curr_work_time_slots = []
agent_vector = np.zeros(
self.config["NBR_FEATURE_PER_TECH"]
* self.config["nbr_of_days_planning_window"]
* len(self.current_observed_worker_list)
)
o_start_longitude = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
o_start_latitude = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
# o_max_available_working_slot_duration = np.zeros(self.config['nbr_observed_slots']* self.config['nbr_of_days_planning_window'])
o_end_longitude = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
o_end_latitude = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
o_average_longitude = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
o_average_latitude = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
# =
o_nbr_of_jobs_per_worker_day = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
o_total_travel_minutes = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
o_first_job_start_minutes = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
o_total_occupied_duration = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
o_total_unoccupied_duration = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
# =
o_max_available_working_slot_duration = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
o_max_available_working_slot_start = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
o_max_available_working_slot_end = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
o_max_unoccupied_rest_slot_duration = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
o_total_available_working_slot_duration = np.zeros(
self.config["nbr_observed_slots"] * self.config["nbr_of_days_planning_window"]
)
# + NBR_FEATURE_WORKER_ONLY + NBR_FEATURE_CUR_JOB_n_OVERALL ) # .tolist()
# Firstly the assigned job stats
for current_job_worker_index in range(len(self.current_observed_worker_list)):
# setup home (start,end) GPS
# x,y = get_normalized_location(self.workers_dict[worker_index]['home_gps'])
worker = self.workers_dict[self.current_observed_worker_list[current_job_worker_index]]
worker_index = worker.worker_index
# assigned time slots for each worker in agent_vector
# curr_total_free_duration = self.workers_dict[worker_index]['total_free_duration']
for day_i in range(self.config["nbr_of_days_planning_window"]):
# Features for this worker_day_assignment
f_start_longlat = self.get_start_gps_for_worker_day(worker, day_i)
f_end_longlat = self.get_end_gps_for_worker_day(worker, day_i)
f_average_longlat = self.get_start_gps_for_worker_day(worker, day_i)
f_nbr_of_jobs = 0
f_total_travel_minutes = 0
# I use 0--24 hours for each day. Do not accumulate.
f_first_job_start_minutes = self.config["minutes_per_day"]
f_last_job_end_minutes = self.config["minutes_per_day"] # (day_i + 1) *
f_total_occupied_duration = 0 # Sum up from assigned_jobs
f_total_unoccupied_duration = self.config["minutes_per_day"] * 1
f_max_available_working_slot_duration = 0
f_max_available_working_slot_start = 0
f_max_available_working_slot_end = 0
f_min_available_working_slot_duration = 0 # kandbox_config.KANDBOX_MAXINT
f_min_available_working_slot_start = 0
f_min_available_working_slot_end = 0
f_max_unoccupied_rest_slot_duration = 0
f_total_available_working_slot_duration = 0 # Sum up from free_slots
last_job_end = day_i * self.config["minutes_per_day"]
# for assigned_job_index_info in worker[ "assigned_jobs" ]: # TODO, performance, already sorted, not necessary to loop through all.
# for day_i in range(self.config["nbr_of_days_planning_window"]):
job_code_list_today = []
travel_minutes_list_today = []
overlap_slots = self.slot_server.get_overlapped_slots(
worker_id=worker.worker_code,
start_minutes=day_i * 1440,
end_minutes=(day_i + 1) * 1440,
)
job_slot_prev_travel_minutes = 0
for a_slot in overlap_slots:
if a_slot.slot_type == TimeSlotType.JOB_FIXED:
assigned_job_code = a_slot.assigned_job_codes[0]
job_code_list_today.append(a_slot.assigned_job_codes[0])
travel_minutes_list_today.append(job_slot_prev_travel_minutes)
# TODO, performance, already sorted, not necessary to loop through all.
elif a_slot.slot_type == TimeSlotType.FLOATING:
(
prev_travel,
next_travel,
inside_travel,
) = self.get_travel_time_jobs_in_slot(a_slot, a_slot.assigned_job_codes)
free_minutes = (
a_slot.end_minutes
- a_slot.end_minutes
- (prev_travel + next_travel + sum(inside_travel))
)
f_total_available_working_slot_duration += free_minutes
if free_minutes > f_max_available_working_slot_duration:
f_max_available_working_slot_duration = free_minutes
f_max_available_working_slot_start = a_slot.start_minutes
f_max_available_working_slot_end = a_slot.end_minutes
if f_min_available_working_slot_duration == 0:
f_min_available_working_slot_duration = free_minutes
f_min_available_working_slot_start = a_slot.start_minutes
f_min_available_working_slot_end = a_slot.end_minutes
elif free_minutes < f_min_available_working_slot_duration:
f_min_available_working_slot_duration = free_minutes
f_min_available_working_slot_start = a_slot.start_minutes
f_min_available_working_slot_end = a_slot.end_minutes
if len(a_slot.assigned_job_codes) > 0:
job_code_list_today += list(a_slot.assigned_job_codes)
travel_minutes_list_today += [prev_travel] + inside_travel
job_slot_prev_travel_minutes = (
next_travel # Here i assume that it is leading to next job/ or home
)
for job_seq, assigned_job_code in enumerate(job_code_list_today): #
try:
the_job = self.jobs_dict[assigned_job_code]
except KeyError:
log.error(f"JOB:{assigned_job_code} is not found in ENV->self.jobs_dict")
continue
# This job is assigned to this day.
if f_first_job_start_minutes < the_job.scheduled_start_minutes:
f_first_job_start_minutes = the_job.scheduled_start_minutes
f_start_longlat = [
the_job.location.geo_longitude,
the_job.location.geo_latitude,
] #
if (
the_job.scheduled_start_minutes + the_job.scheduled_duration_minutes
< f_last_job_end_minutes
):
f_last_job_end_minutes = (
the_job.scheduled_start_minutes + the_job.scheduled_duration_minutes
)
f_end_longlat = [
the_job.location.geo_longitude,
the_job.location.geo_latitude,
] #
f_average_longlat = [
((f_average_longlat[0] * f_nbr_of_jobs) + the_job.location.geo_longitude)
/ (f_nbr_of_jobs + 1),
((f_average_longlat[1] * f_nbr_of_jobs) + the_job.location.geo_latitude)
/ (f_nbr_of_jobs + 1),
]
f_nbr_of_jobs += 1
f_total_travel_minutes += travel_minutes_list_today[job_seq]
f_total_occupied_duration += the_job.scheduled_duration_minutes
f_total_unoccupied_duration -= (
the_job.scheduled_duration_minutes + travel_minutes_list_today[job_seq]
)
curr_rest_duration_before_job = (
the_job.scheduled_start_minutes
- travel_minutes_list_today[job_seq]
- last_job_end
)
if curr_rest_duration_before_job > f_max_unoccupied_rest_slot_duration:
f_max_unoccupied_rest_slot_duration = curr_rest_duration_before_job
if f_total_available_working_slot_duration < 0:
# log.debug("Error: f_total_available_working_slot_duration < 0")
f_total_available_working_slot_duration = 0
curr_worker_day_i = (
current_job_worker_index * self.config["nbr_of_days_planning_window"] + day_i
)
o_start_longitude[curr_worker_day_i] = f_start_longlat[
0
] # current_job_worker_index, day_i
o_start_latitude[curr_worker_day_i] = f_start_longlat[1]
o_end_longitude[curr_worker_day_i] = f_end_longlat[0]
o_end_latitude[curr_worker_day_i] = f_end_longlat[1]
o_average_longitude[curr_worker_day_i] = f_average_longlat[0]
o_average_latitude[curr_worker_day_i] = f_average_longlat[1]
#
o_nbr_of_jobs_per_worker_day[curr_worker_day_i] = f_nbr_of_jobs
o_total_travel_minutes[curr_worker_day_i] = f_total_travel_minutes
o_first_job_start_minutes[curr_worker_day_i] = f_first_job_start_minutes
o_total_occupied_duration[curr_worker_day_i] = f_total_occupied_duration
o_total_unoccupied_duration[curr_worker_day_i] = f_total_unoccupied_duration
#
o_max_available_working_slot_duration[
curr_worker_day_i
] = f_max_available_working_slot_duration
o_max_available_working_slot_start[
curr_worker_day_i
] = f_max_available_working_slot_start
o_max_available_working_slot_end[
curr_worker_day_i
] = f_max_available_working_slot_end
o_max_unoccupied_rest_slot_duration[
curr_worker_day_i
] = f_max_unoccupied_rest_slot_duration
o_total_available_working_slot_duration[
curr_worker_day_i
] = f_total_available_working_slot_duration
# Not useful, deprecated. Use obs_tuple instead 2020-09-18 08:29:37
agent_vector[
current_job_worker_index
* self.config["NBR_FEATURE_PER_TECH"]
* self.config["nbr_of_days_planning_window"]
+ day_i
* self.config["NBR_FEATURE_PER_TECH"]: current_job_worker_index
* self.config["NBR_FEATURE_PER_TECH"]
* self.config["nbr_of_days_planning_window"]
+ (day_i + 1) * self.config["NBR_FEATURE_PER_TECH"]
] = [
f_start_longlat[0],
f_start_longlat[1],
f_end_longlat[0],
f_end_longlat[1],
f_average_longlat[0],
f_average_longlat[1],
f_nbr_of_jobs,
f_total_travel_minutes,
f_first_job_start_minutes,
f_total_occupied_duration,
f_total_unoccupied_duration,
f_max_available_working_slot_duration,
f_max_available_working_slot_start,
f_max_available_working_slot_end,
f_min_available_working_slot_duration,
f_min_available_working_slot_start,
f_min_available_working_slot_end,
f_max_unoccupied_rest_slot_duration,
f_total_available_working_slot_duration,
]
# worker_feature_begin_offset = NBR_FEATURE_PER_TECH*worker_index + NBR_DAYS*MAX_ASSIGNED_TIME_SLOT_PER_DAY*NBR_WORK_TIME_SLOT_PER_DAY + NBR_DAYS
# agent_vector[ worker_feature_begin_offset + 2] = curr_total_free_duration
# agent_vector[worker_feature_begin_offset + 3] = curr_max_free_slot_duration
"""
# Secondary all worker statistics
for current_job_worker_index in self.current_observed_worker_list:
# setup home (start,end) GPS
# x,y = get_normalized_location(self.workers_dict[worker_index]['home_gps'])
worker = self.workers_dict[self.current_observed_worker_list[current_job_worker_index]]
worker_index = worker['worker_index']
agent_vector[NBR_FEATURE_PER_TECH*worker_index + 0: \
NBR_FEATURE_PER_TECH*worker_index + NBR_WORK_TIME_SLOT_PER_DAY] \
= [ 0, 0 , worker['geo_longitude'] , worker['geo_latitude'] ]
agent_vector[NBR_FEATURE_PER_TECH*(worker_index+1) - 4: \
NBR_FEATURE_PER_TECH*(worker_index+1) - 0 ] \
= [ 0, 0 , x,y]
for worker_index in range(len(self.workers_dict)):
# Historical customer visit GPS Gaussian , Sigma, Gamma, 2 DIMENSIONAL
# and others 4
agent_vector[len(self.workers_dict)*NBR_FEATURE_PER_TECH + worker_index] \
= self.workers_dict[worker_index]['level'] / 5
"""
# Thirdly the job statistics
# Now append the visit information AS THE 2nd half.
# job_feature_start_index = len(self.workers_dict)*NBR_FEATURE_PER_TECH + NBR_FEATURE_WORKER_ONLY
if self.current_job_i >= len(self.jobs):
new_job_i = len(self.jobs) - 1
else:
new_job_i = self.current_job_i
# obs_dict['worker_job_assignment_matrix'] = agent_vector
obs_dict["assignment.start_longitude"] = o_start_longitude
obs_dict["assignment.start_latitude"] = o_start_latitude
obs_dict[
"assignment.max_available_working_slot_duration"
] = o_max_available_working_slot_duration
# obs_dict['job.features'] = np.zeroes(3)
# obs_dict['job.mandatory_minutes_minmax_flag'] = np.zeros(1)
obs_dict[
"job.mandatory_minutes_minmax_flag"
] = 1 # self.jobs[new_job_i][ "mandatory_minutes_minmax_flag" ]
obs_dict["job.requested_start_minutes"] = np.zeros(1)
obs_dict["job.requested_start_minutes"][0] = self.jobs[
new_job_i
].requested_start_min_minutes
if obs_dict["job.requested_start_minutes"][0] < 0:
obs_dict["job.requested_start_minutes"][0] = 0
elif (
obs_dict["job.requested_start_minutes"][0]
>= self.config["minutes_per_day"] * self.config["nbr_of_days_planning_window"] * 2
):
obs_dict["job.requested_start_minutes"][0] = (
self.config["minutes_per_day"] * self.config["nbr_of_days_planning_window"] * 2 - 1
)
obs_dict["job.requested_duration_minutes"] = np.zeros(1)
obs_dict["job.requested_duration_minutes"][0] = self.jobs[
new_job_i
].requested_duration_minutes
obs_dict["job.geo_longitude"] = np.zeros(1)
obs_dict["job.geo_longitude"][0] = self.jobs[new_job_i].location.geo_longitude
obs_dict["job.geo_latitude"] = np.zeros(1)
obs_dict["job.geo_latitude"][0] = self.jobs[new_job_i].location.geo_latitude
obs_tuple = (
o_start_longitude,
o_start_latitude,
o_end_longitude,
o_end_latitude,
o_average_longitude,
o_average_latitude,
#
o_nbr_of_jobs_per_worker_day,
o_total_travel_minutes,
o_first_job_start_minutes,
o_total_occupied_duration,
o_total_unoccupied_duration,
#
o_max_available_working_slot_duration,
o_max_available_working_slot_start,
o_max_available_working_slot_end,
o_max_unoccupied_rest_slot_duration,
o_total_available_working_slot_duration,
#
1, # self.jobs[new_job_i]["mandatory_minutes_minmax_flag"],
obs_dict["job.requested_start_minutes"],
obs_dict["job.requested_duration_minutes"],
obs_dict["job.geo_longitude"],
obs_dict["job.geo_latitude"],
)
"""
f_start_longlat[0], f_start_longlat[1], f_end_longlat[0], f_end_longlat[1], f_average_longlat[0],
f_average_longlat[1], f_nbr_of_jobs, f_total_travel_minutes, f_first_job_start_minutes,
f_total_occupied_duration, f_total_unoccupied_duration, f_max_available_working_slot_duration, f_max_available_working_slot_start, f_max_available_working_slot_end,
f_min_available_working_slot_duration, f_min_available_working_slot_start, f_min_available_working_slot_end, f_max_unoccupied_rest_slot_duration, f_total_available_working_slot_duration
"""
"""
visit_vector = list(range(5))
# Location of the new job is added to the end of agent_vector.
visit_vector[0] = self.jobs[self.current_job_i]['requested_duration_minutes']
visit_vector[1] = self.jobs[self.current_job_i]['mandatory_minutes_minmax_flag']
# visit_vector[2] = self.jobs[self.current_job_i]['preferred_minutes_minmax_flag']
visit_vector[3] = self.jobs[self.current_job_i]['requested_start_minutes']
visit_vector[4] = self.jobs[self.current_job_i]['requested_start_minutes']
"""
# obs_dict['job.requested_start_max_minutes'] = self.jobs[self.current_job_i]['requested_start_minutes']
# agent_vector[job_feature_start_index +3] = self.jobs[self.current_job_i]['expected_job_day'] / 30
# agent_vector[job_feature_start_index +4] = self.jobs[self.current_job_i]['tolerated_day_min'] /10
# agent_vector[job_feature_start_index +5] = self.jobs[self.current_job_i]['tolerated_day_max'] /10
# visit_vector[ 5] = 0 # self.jobs[self.current_job_i]['customer_level']
# visit_vector[ 6] = 0 # self.jobs[self.current_job_i]['product_level']
# obs_dict['current_job_vector'] = visit_vector
# Finished looping through all workers
return obs_tuple # OrderedDict(obs_dict)
# ***************************************************************
# # Internal functions
# ***************************************************************
def _set_spaces(self):
obs_slot_low = (
[self.config["geo_longitude_min"]] * 4 + [self.config["geo_latitude_min"]] * 4 + [0] * 9
)
obs_slot_high = (
[self.config["geo_longitude_max"]] * 4
+ [self.config["geo_latitude_max"]] * 4
+ [self.config["max_nbr_of_jobs_per_day_worker"] + 1]
+ [self.config["minutes_per_day"]] * 3
+ [self.config["minutes_per_day"] * self.config["nbr_of_days_planning_window"]] * 3
+ [1]
+ [self.config["nbr_observed_slots"]]
)
self.obs_slot_space = spaces.Box(
low=np.array(obs_slot_low),
high=np.array(obs_slot_high),
# shape=(len(obs_slot_high),),
dtype=np.float32,
)
# self.obs_slot_space = spaces.Tuple(
# (
# # start_loc, end_loc, avg_loc
# spaces.Box(
# low=self.config["geo_longitude_min"],
# high=self.config["geo_longitude_max"],
# shape=(4,),
# dtype=np.float32,
# ),
# spaces.Box(
# low=self.config["geo_latitude_min"],
# high=self.config["geo_latitude_max"],
# shape=(4,),
# dtype=np.float32,
# ),
# # f_nbr_of_jobs,
# spaces.Box(
# low=0,
# high=self.config["max_nbr_of_jobs_per_day_worker"] + 1,
# shape=(1,),
# dtype=np.float32,
# ),
# # f_slot_duration, f_total_travel_minutes,f_total_occupied_duration, # f_total_unoccupied_duration, f_max_available_working_slot_duration
# spaces.Box(
# low=0,
# high=self.config["minutes_per_day"],
# shape=(3,),
# dtype=np.float32,
# ),
# # f_slot_start_minutes, f_first_job_start_minutes, # f_max_available_working_slot_start, f_max_available_working_slot_end,
# spaces.Box(
# low=0,
# high=self.config["minutes_per_day"]
# * self.config["nbr_of_days_planning_window"],
# shape=(3,),
# dtype=np.float32,
# ),
# # f_max_unoccupied_rest_slot_duration --- Looks like current RL can only do working hour dispatching.
# # f_min_available_working_slot_duration,
# # f_min_available_working_slot_start,
# # f_min_available_working_slot_end,
# # spaces.Discrete(2), # Mandatory start time or not
# spaces.Discrete(2), # valid slot indicator. ==1
# spaces.Discrete(self.config["nbr_observed_slots"]), # technician ID
# )
# )
obs_job_low = (
[self.config["geo_longitude_min"]]
+ [self.config["geo_latitude_min"]]
+ [0]
+ [0 - (self.PLANNING_WINDOW_LENGTH * 5)] * 3
+ [0] * 4
)
obs_job_high = (
[self.config["geo_longitude_max"]]
+ [self.config["geo_latitude_max"]]
+ [self.config["minutes_per_day"]]
+ [self.PLANNING_WINDOW_LENGTH * 5] * 3
+ [self.config["nbr_observed_slots"]] * 2
+ [1] * 2
)
self.obs_job_space = spaces.Box(
low=np.array(obs_job_low),
high=np.array(obs_job_high),
# shape=(len(obs_job_high),),
dtype=np.float32,
)
# self.obs_job_space = spaces.Tuple(
# (
# # start_loc, end_loc, avg_loc
# spaces.Box(
# low=self.config["geo_longitude_min"],
# high=self.config["geo_longitude_max"],
# shape=(1,),
# dtype=np.float32,
# ),
# spaces.Box(
# low=self.config["geo_latitude_min"],
# high=self.config["geo_latitude_max"],
# shape=(1,),
# dtype=np.float32,
# ),
# # f_job_duration,
# spaces.Box(
# low=0,
# high=self.config["minutes_per_day"],
# shape=(1,),
# dtype=np.float32,
# ),
# # min, max, f_job_start_minutes
# spaces.Box(
# low=0 - (self.PLANNING_WINDOW_LENGTH * 5),
# high=self.PLANNING_WINDOW_LENGTH * 5,
# shape=(3,),
# dtype=np.float32,
# ),
# # # min max nbr of share
# spaces.Box(
# low=0,
# high=self.config["nbr_observed_slots"],
# shape=(2,),
# dtype=np.float32,
# ),
# # f_max_unoccupied_rest_slot_duration --- Looks like current RL can only do working hour dispatching.
# # f_min_available_working_slot_duration,
# # f_min_available_working_slot_start,
# # f_min_available_working_slot_end,
# # spaces.Discrete(2), # Mandatory start time or not
# spaces.Discrete(2), # valid slot indicator. ==1
# spaces.Discrete(2), # fixed time requirements
# )
# )
self.observation_space = spaces.Dict(
{
"slots": Repeated(self.obs_slot_space, max_len=self.config["nbr_observed_slots"]),
"jobs": Repeated(self.obs_job_space, max_len=self.config["nbr_observed_slots"]),
}
)
# action_low = 0
# action_high = np.ones(MAX_OBSERVED_SLOTS)
# action_low[0:self.config['nbr_observed_slots']] = 0
# action_high[0:self.config['nbr_observed_slots']] = 1
self.action_space = spaces.Box(
low=0, high=1, shape=(self.config["nbr_observed_slots"],), dtype=np.float32
)
# self.action_space = spaces.Discrete(self.config["nbr_observed_slots"])
def _get_observation_slot_list_tuple(self):
if (len(self.jobs) < 1) | (len(self.workers) < 1):
log.error(
"Error, no workers or no jobs , returning empty observation (obs_dict = empty)"
)
# raise LookupError("Error, Env has no workers or no jobs , returning obs_dict = empty")
return (0, 0)
if self.current_job_i >= len(self.jobs):
self.current_job_i = 0
# new_job_i = self.current_job_i
current_capable_worker_set = self._get_sorted_worker_code_list(self.current_job_i)
sorted_slot_codes = sorted(list(self.slot_server.time_slot_dict.keys()))
slot_dict = self.slot_server.time_slot_dict
curr_work_time_slots = []
max_available_working_slot_duration = 0
viewed_slots = []
for slot_code in sorted_slot_codes:
# for work_time_i in range(len( self.workers_dict[worker_code]['assigned_jobs'] ) ): # nth assigned job time unit.
if slot_code in config.DEBUGGING_SLOT_CODE_SET:
log.debug(f"_get_observation_slot_list debug {slot_code}")
# if a_slot.slot_type in (TimeSlotType.JOB_FIXED, TimeSlotType.FLOATING):
try:
a_slot = self.slot_server.get_slot(self.redis_conn.pipeline(), slot_code)
if a_slot.worker_id not in current_capable_worker_set:
continue
the_assigned_codes = sorted(
a_slot.assigned_job_codes,
key=lambda jc: self.jobs_dict[jc].scheduled_start_minutes,
)
except KeyError as ke:
log.error(
f"unknown worker code, or unknown job codes to the env. slot_code = {slot_code}, error = {str(ke)}"
)
print(ke)
continue
except MissingSlotException as mse:
log.error(
f"MissingSlotException - unknow slot_code = {slot_code}, error = {str(mse)}"
)
print(mse)
continue
(
prev_travel,
next_travel,
inside_travel,
) = self.get_travel_time_jobs_in_slot(a_slot, a_slot.assigned_job_codes)
# , what if there is no job? you get 0,[],0
all_travels = [prev_travel] + inside_travel + [next_travel]
f_total_travel_minutes = sum(all_travels)
f_max_travel_minutes_index = sum(all_travels[:-1])
slot_vector = np.zeros(self.NBR_FEATURE_PER_SLOT)
slot_vector[0:2] = [
self.normalize(a_slot.start_location[0], "longitude"),
self.normalize(a_slot.start_location[1], "latitude"),
]
slot_vector[2] = 0 if a_slot.start_location[2] == "H" else 1
slot_vector[3:5] = [
self.normalize(a_slot.end_location[0], "longitude"),
self.normalize(a_slot.end_location[1], "latitude"),
]
slot_vector[5] = 0 if a_slot.end_location[2] == "H" else 1
slot_vector[6] = self.normalize(
len(a_slot.assigned_job_codes), "max_job_in_slot"
) # f_nbr_of_jobs = 0
slot_vector[7] = self.normalize(f_total_travel_minutes, "day_minutes_1440")
if len(a_slot.assigned_job_codes) < 1:
slot_vector[8:10] = slot_vector[0:2]
slot_vector[10:12] = slot_vector[3:5]
# all_location_0 = [a_slot.start_location[0], a_slot.end_location[0]]
# all_location_1 = [a_slot.start_location[1], a_slot.end_location[1]]
# [
# sum(all_location_0) / len(all_location_0),
# sum(all_location_1) / len(all_location_1),
# sum(all_location_0) / len(all_location_0),
# sum(all_location_1) / len(all_location_1),
# ]
else:
slot_vector[8:10] = [
self.normalize(
self.jobs_dict[a_slot.assigned_job_codes[0]].location[0], "longitude"
),
self.normalize(
self.jobs_dict[a_slot.assigned_job_codes[0]].location[1], "latitude"
),
]
slot_vector[10:12] = [
self.normalize(
self.jobs_dict[a_slot.assigned_job_codes[-1]].location[0], "longitude"
),
self.normalize(
self.jobs_dict[a_slot.assigned_job_codes[-1]].location[1], "latitude"
),
]
# all_location_0 = [
# self.jobs_dict[a_slot.assigned_job_codes[j]].location[0]
# for j in range(len(a_slot.assigned_job_codes))
# ]
# all_location_1 = [
# self.jobs_dict[a_slot.assigned_job_codes[j]].location[1]
# for j in range(len(a_slot.assigned_job_codes))
# ]
# slot_vector[10:12] = [
# sum(all_location_0) / len(all_location_0),
# sum(all_location_1) / len(all_location_1),
# ]
# f_total_occupied_duration # Sum up from assigned_jobs
f_total_occupied_duration = sum(
self.jobs_dict[a_slot.assigned_job_codes[j]].scheduled_duration_minutes
for j in range(len(a_slot.assigned_job_codes))
)
slot_vector[12] = self.normalize(f_total_occupied_duration, "day_minutes_1440")
if (
a_slot.end_minutes - a_slot.start_minutes - slot_vector[12]
> max_available_working_slot_duration
):
max_available_working_slot_duration = (
a_slot.end_minutes - a_slot.start_minutes - slot_vector[12]
)
slot_vector[13] = self.normalize(a_slot.start_minutes, "start_minutes")
slot_vector[14] = self.normalize(a_slot.end_minutes, "start_minutes")
# Start time within a day
slot_vector[15] = self.normalize(a_slot.start_minutes % 1440, "day_minutes_1440")
slot_vector[16] = self.normalize(a_slot.end_minutes % 1440, "day_minutes_1440")
available_free_minutes = (
a_slot.end_minutes - a_slot.start_minutes - slot_vector[12] - f_total_travel_minutes
)
slot_vector[17] = self.normalize(available_free_minutes, "day_minutes_1440")
slot_vector[18] = self.normalize(a_slot.start_overtime_minutes, "duration")
slot_vector[19] = self.normalize(a_slot.end_overtime_minutes, "duration")
# Secondary tech
slot_vector[20] = 0
# ( 0 if self.workers_dict[a_slot.worker_id].flex_form_data["is_assistant"] else 1 )
slot_vector[21] = 0 # self.workers_dict[a_slot.worker_id].worker_index
slot_vector[22] = (
1 if self.workers_dict[a_slot.worker_id].belongs_to_pair is not None else 0
)
slot_vector[23] = 0 # Future
curr_work_time_slots.append(slot_vector)
viewed_slots.append(a_slot)
# Secondary overall statistics about workers, inplanning jobs, including un-planed visits
unplanned_job_list = []
current_job_list = []
target_unplanned_job_i = None
for job_code, new_job in self.jobs_dict.items():
if not new_job.is_active: # events and appt can not be U anyway
continue
if new_job.planning_status != JobPlanningStatus.UNPLANNED:
continue
unplanned_job_vector = np.zeros(self.NBR_FEATURE_PER_UNPLANNED_JOB)
unplanned_job_vector[0] = self.normalize(new_job.requested_duration_minutes, "duration")
unplanned_job_vector[1] = self.normalize(
new_job.requested_start_min_minutes, "start_minutes"
)
unplanned_job_vector[2] = self.normalize(
new_job.requested_start_max_minutes, "start_minutes"
)
# in case FS, FT, requested_start_min_minutes == requested_start_max_minutes. But they might mean other day - FT.
unplanned_job_vector[3] = self.normalize(
new_job.requested_start_minutes, "start_minutes"
)
unplanned_job_vector[4] = self.normalize(
new_job.requested_start_min_minutes % 1440, "day_minutes_1440"
)
unplanned_job_vector[5] = 0 # 1 if new_job.flex_form_data["ServiceType"] == "FS" else 0
unplanned_job_vector[6] = 0 # 1 if new_job.flex_form_data["ServiceType"] == "FT" else 0
unplanned_job_vector[7:9] = [
self.normalize(new_job.location[0], "longitude"),
self.normalize(new_job.location[1], "latitude"),
]
# Min shared tech
unplanned_job_vector[9] = self.normalize(
new_job.flex_form_data["min_number_of_workers"], "max_nbr_shared_workers"
)
unplanned_job_vector[10] = self.normalize(
new_job.flex_form_data["max_number_of_workers"], "max_nbr_shared_workers"
)
unplanned_job_vector[
11
] = 0 # self.workers_dict[ new_job.requested_primary_worker_code ].worker_index
if new_job.job_code == self.jobs[self.current_job_i].job_code:
target_unplanned_job_i = len(unplanned_job_list) - 1
current_job_list.append(unplanned_job_vector)
else:
unplanned_job_list.append(unplanned_job_vector)
if len(unplanned_job_list) + len(current_job_list) < 1:
log.error("No jobs to observe, done?")
return None # (0, 0, 0)
if len(curr_work_time_slots) < 1:
log.error("No slots to observe, done?")
return None # (0, 0, 0)
# Thirdly the current job being dispatched
overview_vector = np.zeros(self.NBR_FEATURE_OVERVIEW)
overview_vector[0] = 0 # target_unplanned_job_i
overview_vector[1] = max_available_working_slot_duration
overview_vector[2] = len(self.workers_dict.keys())
overview_vector[3] = len(curr_work_time_slots)
overview_vector[4] = len(self.jobs_dict.keys())
overview_vector[5] = len(unplanned_job_list)
overview_vector[6] = self.get_env_planning_horizon_start_minutes()
overview_vector[7] = int(self.get_env_planning_horizon_start_minutes() / 1440) * 1440
# Work, Rest days as one-hot
# Overtime usage?
#
obs_tuple = [
np.stack(curr_work_time_slots, axis=0),
np.stack(current_job_list + current_job_list, axis=0), # TODO + unplanned_job_list
# np.stack(, axis=0),
overview_vector,
viewed_slots,
]
return obs_tuple # OrderedDict(obs_dict)
def _get_observation_slot_list_tuple_toy(self):
curr_work_time_slots = self.curr_work_time_slots
current_job_list = [self.unplanned_job_list[self.trial_step_count]]
temp_merged = np.concatenate((
self.curr_work_time_slots,
self.unplanned_job_list[self.trial_step_count:] +
self.unplanned_job_list[:self.trial_step_count]
), axis=1)
obs_tuple = [
np.stack(curr_work_time_slots, axis=0),
np.stack(current_job_list + current_job_list, axis=0),
np.stack(temp_merged, axis=0),
]
return obs_tuple # OrderedDict(obs_dict)
def _get_observation_slot_list_dict(self):
if (len(self.jobs) < 1) | (len(self.workers) < 1):
log.error(
"Error, no workers or no jobs , returning empty observation (obs_dict = empty)"
)
# raise LookupError("Error, Env has no workers or no jobs , returning obs_dict = empty")
return (0, 0)
if self.current_job_i >= len(self.jobs):
self.current_job_i = 0
# new_job_i = self.current_job_i
current_capable_worker_set = self._get_sorted_worker_code_list(self.current_job_i)
sorted_slot_codes = sorted(list(self.slot_server.time_slot_dict.keys()))
slot_dict = self.slot_server.time_slot_dict
curr_work_time_slots = []
max_available_working_slot_duration = 0
viewed_slots = []
obs_dict = {
"slots": [],
"jobs": [],
}
for slot_code in sorted_slot_codes:
# for work_time_i in range(len( self.workers_dict[worker_code]['assigned_jobs'] ) ): # nth assigned job time unit.
if slot_code in kandbox_config.DEBUGGING_SLOT_CODE_SET:
log.debug(f"_get_observation_slot_list debug {slot_code}")
# if a_slot.slot_type in (TimeSlotType.JOB_FIXED, TimeSlotType.FLOATING):
try:
a_slot = self.slot_server.get_slot(self.redis_conn.pipeline(), slot_code)
if a_slot.worker_id not in current_capable_worker_set:
continue
the_assigned_codes = sorted(
a_slot.assigned_job_codes,
key=lambda jc: self.jobs_dict[jc].scheduled_start_minutes,
)
except KeyError as ke:
log.error(
f"unknown worker code, or unknown job codes to the env. slot_code = {slot_code}, error = {str(ke)}"
)
print(ke)
continue
except MissingSlotException as mse:
log.error(
f"MissingSlotException - unknow slot_code = {slot_code}, error = {str(mse)}"
)
print(mse)
continue
(
prev_travel,
next_travel,
inside_travel,
) = self.get_travel_time_jobs_in_slot(a_slot, a_slot.assigned_job_codes)
# , what if there is no job? you get 0,[],0
all_travels = [prev_travel] + inside_travel + [next_travel]
f_total_travel_minutes = sum(all_travels)
f_max_travel_minutes_index = sum(all_travels[:-1])
longitude_vector = np.zeros(4)
longitude_vector[0] = a_slot.start_location[0]
longitude_vector[1] = a_slot.end_location[0]
latitude_vector = np.zeros(4)
latitude_vector[0] = a_slot.start_location[1]
latitude_vector[1] = a_slot.end_location[1]
if len(a_slot.assigned_job_codes) < 1:
longitude_vector[2] = (longitude_vector[0] + longitude_vector[1]) / 2
latitude_vector[2] = (latitude_vector[0] + latitude_vector[1]) / 2
longitude_vector[3] = (longitude_vector[0] + longitude_vector[1]) / 2
latitude_vector[3] = (latitude_vector[0] + latitude_vector[1]) / 2
else:
longitude_vector[2] = self.jobs_dict[a_slot.assigned_job_codes[0]].location[0]
latitude_vector[2] = self.jobs_dict[a_slot.assigned_job_codes[0]].location[1]
longitude_vector[3] = self.jobs_dict[a_slot.assigned_job_codes[-1]].location[0]
latitude_vector[3] = self.jobs_dict[a_slot.assigned_job_codes[-1]].location[1]
f_nbr_of_jobs = np.zeros(1)
f_nbr_of_jobs[0] = len(a_slot.assigned_job_codes)
slot_duration_vector = np.zeros(3)
# f_slot_duration, f_total_travel_minutes,f_total_occupied_duration, # f_total_unoccupied_duration ### , f_max_available_working_slot_duration
slot_duration_vector[0] = a_slot.end_minutes - a_slot.start_minutes
slot_duration_vector[1] = f_total_travel_minutes
slot_duration_vector[2] = f_total_occupied_duration = sum(
self.jobs_dict[a_slot.assigned_job_codes[j]].scheduled_duration_minutes
for j in range(len(a_slot.assigned_job_codes))
)
start_minutes_vector = np.zeros(3)
# f_slot_start_minutes, f_first_job_start_minutes, f_last_job_end_minutes # f_max_available_working_slot_start, f_max_available_working_slot_end,
start_minutes_vector[0] = (
a_slot.start_minutes - self.get_env_planning_horizon_start_minutes()
)
start_minutes_vector[1] = start_minutes_vector[0]
start_minutes_vector[2] = start_minutes_vector[0]
if len(a_slot.assigned_job_codes) >= 1:
start_minutes_vector[1] = (
self.jobs_dict[a_slot.assigned_job_codes[0]].scheduled_start_minutes
- a_slot.start_minutes
)
start_minutes_vector[2] = (
self.jobs_dict[a_slot.assigned_job_codes[-1]].scheduled_start_minutes
+ self.jobs_dict[a_slot.assigned_job_codes[-1]].scheduled_duration_minutes
- a_slot.start_minutes
)
if start_minutes_vector[1] < 0:
start_minutes_vector[1] = 0
if start_minutes_vector[2] < 0:
start_minutes_vector[2] = 0
if start_minutes_vector[1] >= self.PLANNING_WINDOW_LENGTH:
start_minutes_vector[1] = self.PLANNING_WINDOW_LENGTH - 1
if start_minutes_vector[2] >= self.PLANNING_WINDOW_LENGTH:
start_minutes_vector[2] = self.PLANNING_WINDOW_LENGTH - 1
# ( 0 if self.workers_dict[a_slot.worker_id].flex_form_data["is_assistant"] else 1 )
# 1 if self.workers_dict[a_slot.worker_id].belongs_to_pair is not None else 0
slot_obs = np.concatenate(
(
np.array(longitude_vector),
np.array(latitude_vector),
np.array(f_nbr_of_jobs),
np.array(slot_duration_vector),
np.array(start_minutes_vector),
# # valid slot, worker id
np.array([1, self.workers_dict[a_slot.worker_id].worker_index]),
),
axis=0,
)
# (
# np.array(longitude_vector),
# np.array(latitude_vector),
# np.array(f_nbr_of_jobs),
# np.array(slot_duration_vector),
# np.array(start_minutes_vector),
# 1,
# self.workers_dict[a_slot.worker_id].worker_index,
# )
obs_dict["slots"].append(slot_obs)
viewed_slots.append(a_slot)
if len(obs_dict["slots"]) >= self.config["nbr_observed_slots"]:
break
# Secondary overall statistics about workers, inplanning jobs, including un-planed visits
unplanned_job_list = []
current_job_list = []
target_unplanned_job_i = None
for job_code, new_job in self.jobs_dict.items():
if not new_job.is_active: # events and appt can not be U anyway
continue
if new_job.planning_status != JobPlanningStatus.UNPLANNED:
continue
longitude_vector = np.zeros(1)
longitude_vector[0] = new_job.location[0]
latitude_vector = np.zeros(1)
latitude_vector[0] = new_job.location[1]
duration_vector =
|
np.zeros(1)
|
numpy.zeros
|
import numpy as np
import cv2
import math
from skimage.morphology import skeletonize
def rotate(origin, xy, radians):
"""Rotate a point around a given point.
I call this the "high performance" version since we're caching some
values that are needed >1 time. It's less readable than the previous
function but it's faster.
"""
y, x = xy[:2]
offset_y, offset_x = origin[:2]
adjusted_x = (x - offset_x)
adjusted_y = (y - offset_y)
cos_rad = math.cos(radians)
sin_rad = math.sin(radians)
qx = offset_x + cos_rad * adjusted_x + sin_rad * adjusted_y
qy = offset_y + -sin_rad * adjusted_x + cos_rad * adjusted_y
return qx, qy
def neighbour(x,y,image):
"""Return 8-neighbours of image point P1(x,y), in a clockwise order"""
img = image.copy()
x_1, y_1, x1, y1 = x-1, y-1, x+1, y+1;
return [img[x_1][y], img[x_1][y1], img[x][y1], img[x1][y1], img[x1][y], img[x1][y_1], img[x][y_1], img[x_1][y_1]]
def getSkeletonIntersection(skeleton):
""" Given a skeletonised image, it will give the coordinates of the intersections of the skeleton.
Keyword arguments:
skeleton -- the skeletonised image to detect the intersections of
Returns:
List of 2-tuples (x,y) containing the intersection coordinates
"""
# A big list of valid intersections 2 3 4
# These are in the format shown to the right 1 C 5
# 8 7 6
validIntersection = [[0,1,0,1,0,0,1,0],
[0,0,1,0,1,0,0,1],
[1,0,0,1,0,1,0,0],
[0,1,0,0,1,0,1,0],
[0,0,1,0,0,1,0,1],
[1,0,0,1,0,0,1,0],
[0,1,0,0,1,0,0,1],
[1,0,1,0,0,1,0,0],
[0,1,0,0,0,1,0,1],
[0,1,0,1,0,0,0,1],
[0,1,0,1,0,1,0,0],
[0,0,0,1,0,1,0,1],
[1,0,1,0,0,0,1,0],[1,0,1,0,1,0,0,0],[0,0,1,0,1,0,1,0],
[1,0,0,0,1,0,1,0],[1,0,0,1,1,1,0,0],[0,0,1,0,0,1,1,1],
[1,1,0,0,1,0,0,1],[0,1,1,1,0,0,1,0],[1,0,1,1,0,0,1,0],
[1,0,1,0,0,1,1,0],[1,0,1,1,0,1,1,0],[0,1,1,0,1,0,1,1],
[1,1,0,1,1,0,1,0],[1,1,0,0,1,0,1,0],[0,1,1,0,1,0,1,0],
[0,0,1,0,1,0,1,1],[1,0,0,1,1,0,1,0],[1,0,1,0,1,1,0,1],
[1,0,1,0,1,1,0,0],[1,0,1,0,1,0,0,1],[0,1,0,0,1,0,1,1],
[0,1,1,0,1,0,0,1],[1,1,0,1,0,0,1,0],[0,1,0,1,1,0,1,0],
[0,0,1,0,1,1,0,1],[1,0,1,0,0,1,0,1],[1,0,0,1,0,1,1,0],
[1,0,1,1,0,1,0,0]];
image = skeleton.copy();
image = image/255;
row,col = image.shape[:2]
intersections = []
neighbours = []
for x in range(1,row-1):
for y in range(1,col-1):
# If we have a white pixel
if image[x,y] == 1:
neighbours = neighbour(x,y,image)
valid = True;
if neighbours in validIntersection:
intersections.append((y,x));
# Filter intersections to make sure we don't count them twice or ones that are very close together
for point1 in intersections:
for point2 in intersections:
if (((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) < 10**2) and (point1 != point2):
intersections.remove(point2);
# Remove duplicates
intersections = list(set(intersections));
return intersections;
def skeleton_endpoints(skel):
# make out input nice, possibly necessary
skel = skel.copy()
skel[skel!=0] = 1
skel = np.uint8(skel)
# apply the convolution
kernel = np.uint8([[1, 1, 1],
[1, 10, 1],
[1, 1, 1]])
src_depth = -1
filtered = cv2.filter2D(skel,src_depth,kernel)
# now look through to find the value of 11
# this returns a mask of the endpoints, but if you just want the coordinates, you could simply return np.where(filtered==11)
out = np.zeros_like(skel)
out =
|
np.where(filtered==11)
|
numpy.where
|
"""
Define related utility functions for Fourier–Bessel (2D), Spherical Fourier–Bessel (3D) and
prolate spheroidal wave function (PSWF) objects.
"""
import logging
import numpy as np
from numpy import diff, exp, log, pi
from numpy.polynomial.legendre import leggauss
from scipy.special import jn, jv, sph_harm
from aspire.utils import grid_2d, grid_3d
logger = logging.getLogger(__name__)
def check_besselj_zeros(nu, z):
"""
Sanity-check a sequence of estimated zeros of the Bessel function with order `nu`.
:param nu: The real number order of the Bessel function.
:param z: (Array-like) A sequence of postulated zeros.
:return result: True or False.
"""
# Compute first and second order differences of the sequence of zeros
dz = np.diff(z)
ddz = np.diff(dz)
# Check criteria for acceptable zeros
result = True
# Real roots
result = result and all(np.isreal(z))
# All roots should be > 0, check first of increasing sequence
result = result and z[0] > 0
# Spacing between zeros is greater than 3
result = result and all(dz > 3)
# Second order differences should be zero or just barely increasing to
# within 16x machine precision.
if nu >= 0.5:
result = result and all(ddz < 16 * np.spacing(z[1:-1]))
# For nu < 0.5 the spacing will be slightly decreasing, so flip the sign
else:
result = result and all(ddz > -16 * np.spacing(z[1:-1]))
return result
def besselj_newton(nu, z0, max_iter=10):
"""
Uses the Newton-Raphson method to compute the zero(s) of the
Bessel function with order `nu` with initial guess(es) `z0`.
:param nu: The real number order of the Bessel function.
:param z0: (Array-like) The initial guess(es) for the root-finding algorithm.
:param max_iter: Maximum number of iterations for Newton-Raphson
(default: 10).
:return z: (Array-like) The estimated root(s).
"""
z = z0
# Factor worse than machine precision
c = 8
for i in range(max_iter):
# Calculate values and derivatives at z
f = jv(nu, z)
fp = jv(nu - 1, z) - nu * f / z
# Update zeros
dz = -f / fp
z = z + dz
# Check for convergence
if all(np.abs(dz) < c * np.spacing(z)):
break
# If we're not converging yet, start relaxing convergence criterion
if i >= 6:
c *= 2
return z
def sph_bessel(ell, r):
"""
Compute spherical Bessel function values.
:param ell: The order of the spherical Bessel function.
:param r: The coordinates where the function is to be evaluated.
:return: The value of j_ell at r.
"""
scalar = np.isscalar(r)
len_r = 1 if scalar else len(r)
j = np.zeros(len_r)
j[r == 0] = 1 if ell == 0 else 0
r_mask = r != 0
j[r_mask] = np.sqrt(pi / (2 * r[r_mask])) * jv(ell + 0.5, r[r_mask])
if scalar:
j = j.item()
return j
def norm_assoc_legendre(j, m, x):
"""
Evaluate the normalized associated Legendre polynomial
:param j: The order of the associated Legendre polynomial, must satisfy |m| < j.
:param m: The degree of the associated Legendre polynomial, must satisfy |m| < j.
:param x: An array of values between -1 and +1 on which to evaluate.
:return: The normalized associated Legendre polynomial evaluated at corresponding x.
"""
# For negative m, flip sign and use the symmetry identity.
# In the rest, we assume that m is non-negative.
if m < 0:
m = -m
px = (-1) ** m * norm_assoc_legendre(j, m, x)
px *= (-1) ** m
return px
# Initialize the recurrence at (m, m) and (m, m+1).
p0 = (
(-1) ** m
* np.sqrt(
(2 * m + 1)
/ 2
* np.prod(np.arange(2 * m - 1, 0, -2) / np.arange(2 * m, 0, -2))
)
* (1 - x * x) ** (m / 2)
)
p1 = x * np.sqrt(2 * m + 3) * p0
# If these are the desired indices, return these initial values.
if j == m:
px = p0
elif j == m + 1:
px = p1
else:
# Fixing m, work our way up from (m, m+1) to (m, j).
for n in range(m + 1, j):
px = np.sqrt((2 * n + 3) / ((n + 1 + m) * (n + 1 - m))) * (
np.sqrt(2 * n + 1) * x * p1
- np.sqrt((n + m) * (n - m) / (2 * n - 1)) * p0
)
p0 = p1
p1 = px
return px
def real_sph_harmonic(j, m, theta, phi):
"""
Evaluate a real spherical harmonic
:param j: The order of the spherical harmonic. These must satisfy |m| < j.
:param m: The degree of the spherical harmonic. These must satisfy |m| < j.
:param theta: The spherical coordinates of the points at which we want to evaluate the real spherical harmonic.
`theta` is the latitude between 0 and pi
:param phi: The spherical coordinates of the points at which we want to evaluate the real spherical harmonic.
`phi` is the longitude, between 0 and 2*pi
:return: The real spherical harmonics evaluated at the points (theta, phi).
"""
abs_m = abs(m)
y = sph_harm(abs_m, j, phi, theta)
if m < 0:
y = np.sqrt(2) * np.imag(y)
elif m > 0:
y = np.sqrt(2) * np.real(y)
else:
y = np.real(y)
return y
def besselj_zeros(nu, k):
"""
Finds the first `k` zeros of the Bessel function of order `nu`, i.e. J_nu.
Adapted from "zerobess.m" by <NAME> <<EMAIL>>
:param nu: The real number order of the Bessel function (must be positive and <1e7).
:param k: The number of zeros to return (must be >= 3).
:return z: A 1D NumPy array of the first `k` zeros.
"""
assert k >= 3, "k must be >= 3"
assert 0 <= nu <= 1e7, "nu must be between 0 and 1e7"
z = np.zeros(k)
# Guess first zeros using powers of nu
c0 = np.array(
[
[0.1701, -0.6563, 1.0355, 1.8558],
[0.1608, -1.0189, 3.1348, 3.2447],
[-0.2005, -1.2542, 5.7249, 4.3817],
]
)
z0 = nu + c0 @ ((nu + 1) ** np.array([[-1, -2 / 3, -1 / 3, 1 / 3]]).T)
# refine guesses
z[:3] = besselj_newton(nu, z0).squeeze()
n = 3
j = 2
err_tol = 5e-3
# Estimate further zeros iteratively using spacing of last three zeros so far
while n < k:
j = min(j, k - n)
# Use last 3 zeros to predict spacing for next j zeros
r = diff(z[n - 3 : n]) - pi
if (r[0] * r[1]) > 0 and (r[0] / r[1]) > 1:
p = log(r[0] / r[1]) / log(1 - 1 / (n - 1))
t = np.array(np.arange(1, j + 1), ndmin=2).T / (n - 1)
dz = pi + r[1] * exp(p * log(1 + t))
else:
dz = pi * np.ones((j, 1))
# Guess and refine
z0 = z[n - 1] + np.cumsum(dz)
z[n : n + j] = besselj_newton(nu, z0)
# Check to see that the sequence of zeros makes sense
assert check_besselj_zeros(
nu, z[n - 2 : n + j]
), "Unable to properly estimate Bessel function zeros."
# Check how far off we are
err = (z[n : n + j] - z0) / np.diff(z[n - 1 : n + j])
n = n + j
if max(abs(err)) < err_tol:
# Predictions were close enough, double number of zeros
j *= 2
else:
# Some predictions were off, set to double the number of good predictions
j = 2 * (np.where(abs(err) >= err_tol)[0][0] + 1)
return z
def all_besselj_zeros(ell, r):
"""
Compute the zeros of the order `ell` Bessel function which are less than `r`.
:param ell: The real number order of the Bessel function.
:param r: The upper bound for zeros returned.
:return n, r0: The number of zeros and the zeros themselves
as a NumPy array.
"""
k = 4
# get the first 4 zeros
r0 = besselj_zeros(ell, k)
while all(r0 < r):
# increase the number of zeros sought
# until one of the zeros is greater than `r`
k *= 2
r0 = besselj_zeros(ell, k)
r0 = r0[r0 < r]
# return the number of zeros and the zeros themselves
return len(r0), r0
def unique_coords_nd(N, ndim, shifted=False, normalized=True, dtype=np.float32):
"""
Generate unique polar coordinates from 2D or 3D rectangular coordinates.
:param N: length size of a square or cube.
:param ndim: number of dimension, 2 or 3.
:param shifted: shifted half pixel or not for odd N.
:param normalized: normalize the grid or not.
:return: The unique polar coordinates in 2D or 3D
"""
assert ndim in (
2,
3,
), "Only two- or three-dimensional basis functions are supported."
assert N > 0, "Number of grid points should be greater than 0."
if ndim == 2:
grid = grid_2d(
N, shifted=shifted, normalized=normalized, indexing="yx", dtype=dtype
)
mask = grid["r"] <= 1
# Minor differences in r/theta/phi values are unimportant for the purpose
# of this function, so round off before proceeding
r = grid["r"][mask].round(5)
phi = grid["phi"][mask].round(5)
r_unique, r_idx = np.unique(r, return_inverse=True)
ang_unique, ang_idx = np.unique(phi, return_inverse=True)
else:
grid = grid_3d(
N, shifted=shifted, normalized=normalized, indexing="zyx", dtype=dtype
)
mask = grid["r"] <= 1
# Minor differences in r/theta/phi values are unimportant for the purpose of this function,
# so we round off before proceeding.
r = grid["r"][mask].round(5)
theta = grid["theta"][mask].round(5)
phi = grid["phi"][mask].round(5)
r_unique, r_idx = np.unique(r, return_inverse=True)
ang_unique, ang_idx = np.unique(
np.vstack([theta, phi]), axis=1, return_inverse=True
)
return {
"r_unique": r_unique,
"ang_unique": ang_unique,
"r_idx": r_idx,
"ang_idx": ang_idx,
"mask": mask,
}
def lgwt(ndeg, a, b, dtype=np.float32):
"""
Compute Legendre-Gauss quadrature
Generates the Legendre-Gauss nodes and weights on an interval
[a, b] with truncation order of ndeg for computing definite integrals
using Legendre-Gauss quadrature.
Suppose you have a continuous function f(x) which is defined on [a, b]
which you can evaluate at any x in [a, b]. Simply evaluate it at all of
the values contained in the x vector to obtain a vector f, then compute
the definite integral using sum(f.*w);
This is a 2rapper for numpy.polynomial leggauss which outputs only in the
range of (-1, 1).
:param ndeg: truncation order, that is, the number of nodes.
:param a, b: The endpoints of the interval over which the quadrature is defined.
:return x, w: The quadrature nodes and weights.
"""
x, w = leggauss(ndeg)
scale_factor = (b - a) / 2
shift = (a + b) / 2
x = scale_factor * x + shift
w = scale_factor * w
return x.astype(dtype), w.astype(dtype)
def d_decay_approx_fun(a, b, c, d):
return np.square(c) / (16 * (np.square(d) + d * (2 * b + a + 1)) - np.square(c))
def p_n(n, alpha, beta, x):
"""
The first n jacobi polynomial of x as defined in Yoel's PSWF paper, eq (2), page 6
:param n: int, > 0
Number of polynomials to compute
:param alpha: float, > -1
:param beta: float, > -1
:param x: (m,) ndarray
:return: v: (m, n + 1) ndarray
v[:, i] = P^{(alpha, beta)}_n(x) as defined in the paper
"""
m = len(x)
if n < 0:
return
|
np.array([])
|
numpy.array
|
import numpy as np
from scipy import optimize
import z_l_v
import logging
from numerik import nr_ls
from numerik import rref, ref
import itertools
from setup_results_log import notify_status_func, setup_log_file
import timeit
eps = np.finfo(float).eps
np.set_printoptions(linewidth=200)
setup_log_file('log_bsp_pat_ue_03_2.log', with_console=False)
# Modell feststellen
alpha_tr, epsilon, sigma, psi, omega = z_l_v.use_pr_eos()
p = 35. # bar
temp = 273.15 + 220. # K
t_flash = 273.15 + 60 # K
t0_ref = 298.15 # K
r = 8.314 # J/(mol K)
namen = ['CO', 'H2', 'CO2', 'H2O', 'CH4', 'NH3', 'AR', 'O2', 'N2']
elemente = ['C', 'O', 'N', 'H', 'AR']
atom_m = np.array([
[1, 0, 1, 0, 1, 0, 0, 0, 0],
[1, 0, 2, 1, 0, 0, 0, 2, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 2],
[0, 2, 0, 2, 4, 3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0]
], dtype=float)
mm_el = np.array([
12.011,
15.999,
14.007,
1.008,
39.948,
]) / 1000. # kg/gmol
mm_k = atom_m.T.dot(mm_el)
red_atom_m = rref(ref(atom_m)[0])
rho = int(np.linalg.matrix_rank(red_atom_m))
n_c = len(namen)
n_e = len(elemente)
n_r = n_c - rho
ne_dampf = np.array([
0, 0, 0, 60000, 0, 0, 0, 0, 0
], dtype=float) # kmol/h
ne_rohgas = np.array([
0, 0, 0, 0, 20000, 0, 0, 0, 0
], dtype=float) # kmol/h
ne_luft = np.array([
0, 0, 0, 0, 0, 0,
0.01 * 15000,
0.21 * 15000,
0.78 * 15000
], dtype=float) # kmol/h
te_dampf = 500 + 273.15 # °K
te_rohgas = 20 + 273.15 # °K
te_luft = 20 + 273.15 # °K
# Thermochemische Daten
# <NAME>: Thermochemical Data of Pure Substances.
# Weinheim, New York: VCH, 1993.
h_298 = np.array(
[-110.541, 0., -393.505,
-241.826, -74.873, -45.940,
0., 0., 0.]) * 1000 # J/mol
g_298 = np.array(
[-169.474, -38.962, -457.240,
-298.164, -130.393, -103.417,
-46.167, -61.165, -57.128
]) * 1000 # J/mol
# Kritische Parameter Tc, Pc, omega(azentrischer Faktor)
# e.V., VDI: VDI-Wärmeatlas. Wiesbaden: Springer Berlin Heidelberg, 2013.
tc = np.array([
132.86, 33.19, 304.13,
647.10, 190.56, 405.50,
150.69, 154.60, 126.19
]) # K
pc = np.array([
34.98, 13.15, 73.77,
220.64, 45.99, 113.59,
48.63, 50.46, 33.96
]) # bar
omega_af = np.array([
0.050, -0.219, 0.224,
0.344, 0.011, 0.256,
-0.002, 0.022, 0.037
])
# umformen (reshape), um direkte Division zu ermöglichen
mm = np.array([
28.01, 2.02, 44.01,
18.02, 16.04, 17.03,
39.95, 32.00, 28.01
]).reshape([len(namen), 1])
# Koeffizienten für Cp(T)/R = B+(C-B)(T/(A+T))^2*(
# 1-A/(A+T)*(D+E*T/(A+T)+F*(T/(A+T))^2+G*(T/(A+T))^3))
# Nach rechts hin: A, B, C, D
# e.V., VDI: VDI-Wärmeatlas. Wiesbaden: Springer Ber<NAME>, 2013.
cp_coefs = np.array([z for z in [
[
y.replace(',', '.').replace('–', '-') for y in x.split(' ')
] for x in """
407,9796 3,5028 2,8524 –2,3018 32,9055 –100,1815 106,1141
392,8422 2,4906 –3,6262 –1,9624 35,6197 –81,3691 62,6668
514,5073 3,4923 –0,9306 –6,0861 54,1586 –97,5157 70,9687
706,3032 5,1703 –6,0865 –6,6011 36,2723 –63,0965 46,2085
1530,8043 4,2038 –16,6150 –3,5668 43,0563 –86,5507 65,5986
931,6298 4,8468 –7,1757 –7,6727 51,3877 –93,4217 67,9515
0,0000 2,5000 2,5000 0,0000 0,0000 0,0000 0,0000
2122,2098 3,5302 –7,1076 –1,4542 30,6057 –83,6696 79,4375
432,2027 3,5160 2,8021 –4,1924 42,0153 –114,2500 111,1019
""".split('\n') if len(x) > 0] if len(z) > 1], dtype=float)
def cp_durch_r(t, component):
a, b, c, d, e, f, g = cp_coefs[component, :]
gamma_var = t / (a + t)
return b + (c - b) * gamma_var**2 * (
1 + (gamma_var - 1) * (
d + e * gamma_var + f * gamma_var**2 + g * gamma_var**3
)) # dimensionslos
def int_cp_durch_r_dt_minus_const(t):
a, b, c, d, e, f, g = [
item.reshape(n_c) for item in np.split(
cp_coefs, cp_coefs.shape[1], axis=1
)
]
return b * t + (c - b) * (
t - (d + e + f + g + 2) * a * np.log(a + t) +
-(2 * d + 3 * e + 4 * f + 5 * g + 1) * a**2 / (a + t) +
+(1 * d + 3 * e + 6 * f + 10 * g) * a**3 / 2 / (a + t)**2 +
-(1 * e + 4 * f + 10 * g) * a**4 / 3 / (a + t)**3 +
+(1 * f + 5 * g) * a**5 / 4 / (a + t)**4 +
- g * a**6 / 5 / (a + t)**5
)
def int_cp_durch_rt_dt_minus_const(t):
a, b, c, d, e, f, g = [
item.reshape(n_c) for item in np.split(
cp_coefs, cp_coefs.shape[1], axis=1
)
]
return b * np.log(t) + (c - b) * (
np.log(a + t) + (1 + d + e + f + g) * a / (a + t) +
-(d / 2 + e + 3 * f / 2 + 2 * g) * a**2 / (a + t)**2 +
+(e / 3 + f + 2 * g) * a**3 / (a + t)**3 +
-(f / 4 + g) * a**4 / (a + t)**4 +
+(g / 5) * a**5 / (a + t)**5
)
def mcph(x, t0, t):
return sum(x * (
int_cp_durch_r_dt_minus_const(t) -
int_cp_durch_r_dt_minus_const(t0))
) / sum(x) / (t - t0)
def mcps(x, t0, t):
return sum(x * (
int_cp_durch_rt_dt_minus_const(t) -
int_cp_durch_rt_dt_minus_const(t0))
) / sum(x) / np.log(t / t0)
# Berechne H(T), G(T) und K(T) mit Cp(T)
def h(t):
enthalpien = h_298 + r * (
int_cp_durch_r_dt_minus_const(t) -
int_cp_durch_r_dt_minus_const(298.15)
)
return enthalpien # J/mol
def g(t, h_t):
freie_energien = h_t - t / t0_ref * (h_298 - g_298) - \
r * t * (int_cp_durch_rt_dt_minus_const(t) -
int_cp_durch_rt_dt_minus_const(t0_ref))
return freie_energien # J/mol
def k(t, g_t, nuij):
delta_g_t = nuij.T.dot(g_t)
return np.exp(-delta_g_t / (r * t))
n_0 = (ne_dampf + ne_rohgas + ne_luft) * 1000 # mol/h
h_dampf_ein = h(te_dampf)
h_rohgas_ein = h(te_rohgas)
h_luft_ein = h(te_luft)
# Adiabate Vermischung sum(n_i h_i(T)-n_i h_i(T0))=0
t_ein = optimize.root(lambda temp:
sum(
ne_dampf * 1000 * (h(temp) - h_dampf_ein) +
ne_rohgas * 1000 * (h(temp) - h_rohgas_ein) +
ne_luft * 1000 * (h(temp) - h_luft_ein)
),
(te_luft + te_dampf + te_rohgas) / 3
).x
h_t_ein = h(t_ein)
g_t_ein = g(t_ein, h_t_ein) # J/mol
t_aus_rdampfr = 995 + 273.15 # °K
h_0 = h_t_ein
g_0 = g_t_ein
h_1 = h(t_aus_rdampfr)
g_1 = g(t_aus_rdampfr, h_1)
def stoech_matrix(atom_m, g_t, p, temp, namen, festgelegte_komponente=None):
"""
Hauptreaktionen nach Meyers 1986
REF:
MYERS, <NAME>.; MYERS, <NAME>.
Numerical solution of chemical equilibria with simultaneous reactions.
The Journal of chemical physics, 1986, 84. Jg., Nr. 10, S. 5787-5795.
:param atom_m: atomic matrix. Unsorted. E by C.
:param g_t: Gibbs free energy of formation of species in atom matrix at T.
:param p: Operating pressure
:param namen: names of compounds in atomic matrix.
:return: stoech_m, indexes, nach_g_sortieren, k_t, nuij
"""
nach_g_sortieren = np.argsort(g_t)
# Hauptkomponente festlegen (bei Dampfrerormierung, CH4)
# festgelegte_komponente = [4]
# in nach g sortierten Koordinaten
if festgelegte_komponente is None:
festgelegte_komponente_sortiert = None
else:
festgelegte_komponente_sortiert = sorted(
nach_g_sortieren.argsort()[festgelegte_komponente])
pot_gruppen = itertools.combinations(range(n_c), rho)
i = 0
for komb in pot_gruppen:
i += 1
indexes = np.concatenate([
|
np.array(komb)
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 12:13:33 2018
@author: <NAME> (<EMAIL> / <EMAIL>)
"""
# Python dependencies
from __future__ import division
import pandas as pd
import numpy as np
import mpmath as mp
import matplotlib as mpl
import seaborn as sns
from scipy.constants import codata
from pylab import *
from lmfit import minimize, report_fit
# from scipy.optimize import leastsq
pd.options.mode.chained_assignment = None
# Plotting
mpl.rc("mathtext", fontset="stixsans", default="regular")
mpl.rcParams.update({"axes.labelsize": 22})
mpl.rc("xtick", labelsize=16)
mpl.rc("ytick", labelsize=16)
mpl.rc("legend", fontsize=14)
F = codata.physical_constants["Faraday constant"][0]
Rg = codata.physical_constants["molar gas constant"][0]
### Importing PyEIS add-ons
from .PyEIS_Data_extraction import *
from .PyEIS_Lin_KK import *
from .PyEIS_Advanced_tools import *
### Frequency generator
##
#
def freq_gen(f_start, f_stop, pts_decade=7):
"""
Frequency Generator with logspaced freqencies
Inputs
----------
f_start = frequency start [Hz]
f_stop = frequency stop [Hz]
pts_decade = Points/decade, default 7 [-]
Output
----------
[0] = frequency range [Hz]
[1] = Angular frequency range [1/s]
"""
f_decades = np.log10(f_start) - np.log10(f_stop)
f_range = np.logspace(
np.log10(f_start),
np.log10(f_stop),
num=np.around(pts_decade * f_decades).astype(int),
endpoint=True,
)
w_range = 2 * np.pi * f_range
return f_range, w_range
### Simulation Element Functions
##
#
def elem_L(w, L):
"""
Simulation Function: -L-
Returns the impedance of an inductor
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Inductance [ohm * s]
"""
return 1j * w * L
def elem_C(w, C):
"""
Simulation Function: -C-
Inputs
----------
w = Angular frequency [1/s]
C = Capacitance [F]
"""
return 1 / (C * (w * 1j))
def elem_Q(w, Q, n):
"""
Simulation Function: -Q-
Inputs
----------
w = Angular frequency [1/s]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
"""
return 1 / (Q * (w * 1j) ** n)
### Simulation Curciuts Functions
##
#
def cir_RsC(w, Rs, C):
"""
Simulation Function: -Rs-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
C = Capacitance [F]
"""
return Rs + 1 / (C * (w * 1j))
def cir_RsQ(w, Rs, Q, n):
"""
Simulation Function: -Rs-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
"""
return Rs + 1 / (Q * (w * 1j) ** n)
def cir_RQ(w, R="none", Q="none", n="none", fs="none"):
"""
Simulation Function: -RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
"""
if R == "none":
R = 1 / (Q * (2 * np.pi * fs) ** n)
elif Q == "none":
Q = 1 / (R * (2 * np.pi * fs) ** n)
elif n == "none":
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
return R / (1 + R * Q * (w * 1j) ** n)
def cir_RsRQ(w, Rs="none", R="none", Q="none", n="none", fs="none"):
"""
Simulation Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
"""
if R == "none":
R = 1 / (Q * (2 * np.pi * fs) ** n)
elif Q == "none":
Q = 1 / (R * (2 * np.pi * fs) ** n)
elif n == "none":
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
return Rs + (R / (1 + R * Q * (w * 1j) ** n))
def cir_RC(w, C="none", R="none", fs="none"):
"""
Simulation Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1. see cir_RQ() for details
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
C = Capacitance [F]
fs = Summit frequency of RC circuit [Hz]
"""
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RsRQRQ(
w,
Rs,
R="none",
Q="none",
n="none",
fs="none",
R2="none",
Q2="none",
n2="none",
fs2="none",
):
"""
Simulation Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase element exponent [-]
fs = Summit frequency of RQ circuit [Hz]
R2 = Resistance [Ohm]
Q2 = Constant phase element [s^n/ohm]
n2 = Constant phase element exponent [-]
fs2 = Summit frequency of RQ circuit [Hz]
"""
if R == "none":
R = 1 / (Q * (2 * np.pi * fs) ** n)
elif Q == "none":
Q = 1 / (R * (2 * np.pi * fs) ** n)
elif n == "none":
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if R2 == "none":
R2 = 1 / (Q2 * (2 * np.pi * fs2) ** n2)
elif Q2 == "none":
Q2 = 1 / (R2 * (2 * np.pi * fs2) ** n2)
elif n2 == "none":
n2 = np.log(Q2 * R2) / np.log(1 / (2 * np.pi * fs2))
return (
Rs + (R / (1 + R * Q * (w * 1j) ** n)) + (R2 / (1 + R2 * Q2 * (w * 1j) ** n2))
)
def cir_RsRQQ(w, Rs, Q, n, R1="none", Q1="none", n1="none", fs1="none"):
"""
Simulation Function: -Rs-RQ-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = Summit frequency of RQ circuit [Hz]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
"""
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_Q(w, Q, n)
def cir_RsRQC(w, Rs, C, R1="none", Q1="none", n1="none", fs1="none"):
"""
Simulation Function: -Rs-RQ-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = summit frequency of RQ circuit [Hz]
C = Constant phase element of series Q [s^n/ohm]
"""
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_C(w, C=C)
def cir_RsRCC(w, Rs, R1, C1, C):
"""
Simulation Function: -Rs-RC-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
C = Capacitance of series C [s^n/ohm]
"""
return Rs + cir_RC(w, C=C1, R=R1, fs="none") + elem_C(w, C=C)
def cir_RsRCQ(w, Rs, R1, C1, Q, n):
"""
Simulation Function: -Rs-RC-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
"""
return Rs + cir_RC(w, C=C1, R=R1, fs="none") + elem_Q(w, Q, n)
def Randles_coeff(
w,
n_electron,
A,
E="none",
E0="none",
D_red="none",
D_ox="none",
C_red="none",
C_ox="none",
Rg=Rg,
F=F,
T=298.15,
):
"""
Returns the Randles coefficient sigma [ohm/s^1/2].
Two cases: a) ox and red are both present in solution here both Cred and Dred are defined, b) In the particular case where initially
only Ox species are present in the solution with bulk concentration C*_ox, the surface concentrations may be calculated as function
of the electrode potential following Nernst equation. Here C_red and D_red == 'none'
Ref.:
- Lasia, A.L., ISBN: 978-1-4614-8932-0, "Electrochemical Impedance Spectroscopy and its Applications"
- <NAME>., ISBN: 0-471-04372-9, <NAME>. (2001) "Electrochemical methods: Fundamentals and applications". New York: Wiley.
<NAME> (<EMAIL> // <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Bulk concetration of oxidized specie [mol/cm3]
C_red = Bulk concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = formal potential [V]
if reduced specie is absent == 'none'
Returns
----------
Randles coefficient [ohm/s^1/2]
"""
if C_red != "none" and D_red != "none":
sigma = ((Rg * T) / ((n_electron ** 2) * A * (F ** 2) * (2 ** (1 / 2)))) * (
(1 / (D_ox ** (1 / 2) * C_ox)) + (1 / (D_red ** (1 / 2) * C_red))
)
elif C_red == "none" and D_red == "none" and E != "none" and E0 != "none":
f = F / (Rg * T)
x = (n_electron * f * (E - E0)) / 2
func_cosh2 = (np.cosh(2 * x) + 1) / 2
sigma = (
(4 * Rg * T)
/ ((n_electron ** 2) * A * (F ** 2) * C_ox * ((2 * D_ox) ** (1 / 2)))
) * func_cosh2
else:
print("define E and E0")
Z_Aw = sigma * (w ** (-0.5)) - 1j * sigma * (w ** (-0.5))
return Z_Aw
def cir_Randles(
w,
n_electron,
D_red,
D_ox,
C_red,
C_ox,
Rs,
Rct,
n,
E,
A,
Q="none",
fs="none",
E0=0,
F=F,
Rg=Rg,
T=298.15,
):
"""
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with full complity of the warbug constant
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Concetration of oxidized specie [mol/cm3]
C_red = Concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = Formal potential [V]
if reduced specie is absent == 'none'
Rs = Series resistance [ohm]
Rct = charge-transfer resistance [ohm]
Q = Constant phase element used to model the double-layer capacitance [F]
n = expononent of the CPE [-]
Returns
----------
The real and imaginary impedance of a Randles circuit [ohm]
"""
Z_Rct = Rct
Z_Q = elem_Q(w, Q, n)
Z_w = Randles_coeff(
w,
n_electron=n_electron,
E=E,
E0=E0,
D_red=D_red,
D_ox=D_ox,
C_red=C_red,
C_ox=C_ox,
A=A,
T=T,
Rg=Rg,
F=F,
)
return Rs + 1 / (1 / Z_Q + 1 / (Z_Rct + Z_w))
def cir_Randles_simplified(w, Rs, R, n, sigma, Q="none", fs="none"):
"""
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with a simplified
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
"""
if R == "none":
R = 1 / (Q * (2 * np.pi * fs) ** n)
elif Q == "none":
Q = 1 / (R * (2 * np.pi * fs) ** n)
elif n == "none":
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
Z_Q = 1 / (Q * (w * 1j) ** n)
Z_R = R
Z_w = sigma * (w ** (-0.5)) - 1j * sigma * (w ** (-0.5))
return Rs + 1 / (1 / Z_Q + 1 / (Z_R + Z_w))
# Polymer electrolytes
def cir_C_RC_C(w, Ce, Cb="none", Rb="none", fsb="none"):
"""
Simulation Function: -C-(RC)-C-
This circuit is often used for modeling blocking electrodes with a polymeric electrolyte, which exhibts a immobile ionic species in bulk that gives a capacitance contribution
to the otherwise resistive electrolyte
Ref:
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London, Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Ce = Interfacial capacitance [F]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = summit frequency of bulk (RC) circuit [Hz]
"""
Z_C = elem_C(w, C=Ce)
Z_RC = cir_RC(w, C=Cb, R=Rb, fs=fsb)
return Z_C + Z_RC
def cir_Q_RQ_Q(w, Qe, ne, Qb="none", Rb="none", fsb="none", nb="none"):
"""
Simulation Function: -Q-(RQ)-Q-
Modified cir_C_RC_C() circuits that can be used if electrodes and bulk are not behaving like ideal capacitors
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Qe = Interfacial capacitance modeled with a CPE [F]
ne = Interfacial constant phase element exponent [-]
Rb = Bulk/series resistance [Ohm]
Qb = Bulk capacitance modeled with a CPE [s^n/ohm]
nb = Bulk constant phase element exponent [-]
fsb = summit frequency of bulk (RQ) circuit [Hz]
"""
Z_Q = elem_Q(w, Q=Qe, n=ne)
Z_RQ = cir_RQ(w, Q=Qb, R=Rb, fs=fsb, n=nb)
return Z_Q + Z_RQ
def tanh(x):
"""
As numpy gives errors when tanh becomes very large, above 10^250, this functions is used for np.tanh
"""
return (1 - np.exp(-2 * x)) / (1 + np.exp(-2 * x))
def cir_RCRCZD(
w,
L,
D_s,
u1,
u2,
Cb="none",
Rb="none",
fsb="none",
Ce="none",
Re="none",
fse="none",
):
"""
Simulation Function: -RC_b-RC_e-Z_D
This circuit has been used to study non-blocking electrodes with an ioniocally conducting electrolyte with a mobile and immobile ionic specie in bulk, this is mixed with a
ionically conducting salt. This behavior yields in a impedance response, that consists of the interfacial impendaces -(RC_e)-, the ionically conducitng polymer -(RC_e)-,
and the diffusional impedance from the dissolved salt.
Refs.:
- <NAME>. and <NAME>., Electrochimica Acta, 27, 1671-1675, 1982, "Conductivity, Charge Transfer and Transport number - An AC-Investigation
of the Polymer Electrolyte LiSCN-Poly(ethyleneoxide)"
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London
Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Thickness of electrode [cm]
D_s = Diffusion coefficient of dissolved salt [cm2/s]
u1 = Mobility of the ion reacting at the electrode interface
u2 = Mobility of other ion
Re = Interfacial resistance [Ohm]
Ce = Interfacial capacitance [F]
fse = Summit frequency of the interfacial (RC) circuit [Hz]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = Summit frequency of the bulk (RC) circuit [Hz]
"""
Z_RCb = cir_RC(w, C=Cb, R=Rb, fs=fsb)
Z_RCe = cir_RC(w, C=Ce, R=Re, fs=fse)
alpha = ((w * 1j * L ** 2) / D_s) ** (1 / 2)
Z_D = Rb * (u2 / u1) * (tanh(x=alpha) / alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ(w, Rs, L, Ri, Q="none", n="none"):
"""
Simulation Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = exponent for the interfacial capacitance [-]
"""
Phi = 1 / (Q * (w * 1j) ** n)
X1 = Ri # ohm/cm
Lam = (Phi / X1) ** (1 / 2) # np.sqrt(Phi/X1)
x = L / Lam
x_mp = mp.matrix(x) # x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
) # Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_TLsQ
def cir_RsRQTLsQ(w, Rs, R1, fs1, n1, L, Ri, Q, n, Q1="none"):
"""
Simulation Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance(Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = Exponent for the interfacial capacitance [-]
Output
-----------
Impdance of Rs-(RQ)1-TLsQ
"""
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = 1 / (Q * (w * 1j) ** n)
X1 = Ri
Lam = (Phi / X1) ** (1 / 2)
x = L / Lam
x_mp = mp.matrix(x) # x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j)
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs(w, Rs, L, Ri, R="none", Q="none", n="none", fs="none"):
"""
Simulation Function: -Rs-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R = Interfacial Charge transfer resistance [ohm*cm]
fs = Summit frequency of interfacial RQ circuit [Hz]
n = Exponent for interfacial RQ circuit [-]
Q = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-TLs(RQ)
"""
Phi = cir_RQ(w, R, Q, n, fs)
X1 = Ri
Lam = (Phi / X1) ** (1 / 2)
x = L / Lam
x_mp = mp.matrix(x) # x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
) # Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs(w, Rs, L, Ri, R1, n1, fs1, R2, n2, fs2, Q1="none", Q2="none"):
"""
Simulation Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- B<NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/(ohm * cm)]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R2 = Interfacial Charge transfer resistance [ohm*cm]
fs2 = Summit frequency of interfacial RQ circuit [Hz]
n2 = Exponent for interfacial RQ circuit [-]
Q2 = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-(RQ)1-TLs(RQ)2
"""
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = cir_RQ(w=w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
Lam = (Phi / X1) ** (1 / 2)
x = L / Lam
x_mp = mp.matrix(x) # x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
) # Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
### Support function
def sinh(x):
"""
As numpy gives errors when sinh becomes very large, above 10^250, this functions is used instead of np/mp.sinh()
"""
return (1 - np.exp(-2 * x)) / (2 * np.exp(-x))
def coth(x):
"""
As numpy gives errors when coth becomes very large, above 10^250, this functions is used instead of np/mp.coth()
"""
return (1 + np.exp(-2 * x)) / (1 - np.exp(-2 * x))
###
def cir_RsTLQ(w, L, Rs, Q, n, Rel, Ri):
"""
Simulation Function: -R-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
"""
# The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ(w, L, Rs, Q, n, Rel, Ri, R1, n1, fs1, Q1="none"):
"""
Simulation Function: -R-RQ-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
"""
# The impedance of the series resistance
Z_Rs = Rs
# The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL(w, L, Rs, R, fs, n, Rel, Ri, Q="none"):
"""
Simulation Function: -R-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = Interfacial charge transfer resistance [ohm * cm]
fs = Summit frequency for the interfacial RQ element [Hz]
n = Exponenet for interfacial RQ element [-]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = Electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = Thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
"""
# The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R, Q=Q, n=n, fs=fs)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL(w, L, Rs, R1, fs1, n1, R2, fs2, n2, Rel, Ri, Q1="none", Q2="none"):
"""
Simulation Function: -R-RQ-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
R2 = interfacial charge transfer resistance [ohm * cm]
fs2 = Summit frequency for the interfacial RQ element [Hz]
n2 = exponenet for interfacial RQ element [-]
Q2 = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
"""
# The impedance of the series resistance
Z_Rs = Rs
# The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
# Transmission lines with solid-state transport
def cir_RsTL_1Dsolid(w, L, D, radius, Rs, R, Q, n, R_w, n_w, Rel, Ri):
"""
Simulation Function: -R-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = particle charge transfer resistance [ohm*cm^2]
Q = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
--------------
Impedance of Rs-TL(Q(RW))
"""
# The impedance of the series resistance
Z_Rs = Rs
# The impedance of a 1D Warburg Element
time_const = (radius ** 2) / D
x = (time_const * w * 1j) ** n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
)
Z_w = R_w * np.array(warburg_coth_mp) / x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w, Q=Q, n=n)
Z_Randles = 1 / (1 / Z_Q + 1 / (Z_Rct + Z_w)) # Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles / (Rel + Ri)) ** (1 / 2)
x = L / lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * lamb) / sinh(x))) + lamb * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid(
w, L, D, radius, Rs, R1, fs1, n1, R2, Q2, n2, R_w, n_w, Rel, Ri, Q1="none"
):
"""
Simulation Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- <NAME>., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = charge transfer resistance of the interfacial RQ element [ohm*cm^2]
fs1 = max frequency peak of the interfacial RQ element[Hz]
n1 = exponenet for interfacial RQ element
R2 = particle charge transfer resistance [ohm*cm^2]
Q2 = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n2 = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
------------------
Impedance of R-RQ-TL(Q(RW))
"""
# The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The impedance of a 1D Warburg Element
time_const = (radius ** 2) / D
x = (time_const * w * 1j) ** n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
)
Z_w = R_w * np.array(warburg_coth_mp) / x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w, Q=Q2, n=n2)
Z_Randles = 1 / (1 / Z_Q + 1 / (Z_Rct + Z_w)) # Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles / (Rel + Ri)) ** (1 / 2)
x = L / lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * lamb) / sinh(x))) + lamb * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_RQ + Z_TL
### Fitting Circuit Functions
##
#
def elem_C_fit(params, w):
"""
Fit Function: -C-
"""
C = params["C"]
return 1 / (C * (w * 1j))
def elem_Q_fit(params, w):
"""
Fit Function: -Q-
Constant Phase Element for Fitting
"""
Q = params["Q"]
n = params["n"]
return 1 / (Q * (w * 1j) ** n)
def cir_RsC_fit(params, w):
"""
Fit Function: -Rs-C-
"""
Rs = params["Rs"]
C = params["C"]
return Rs + 1 / (C * (w * 1j))
def cir_RsQ_fit(params, w):
"""
Fit Function: -Rs-Q-
"""
Rs = params["Rs"]
Q = params["Q"]
n = params["n"]
return Rs + 1 / (Q * (w * 1j) ** n)
def cir_RC_fit(params, w):
"""
Fit Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1
"""
if str(params.keys())[10:].find("R") == -1: # if R == 'none':
Q = params["C"]
fs = params["fs"]
R = 1 / (Q * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("C") == -1: # elif Q == 'none':
R = params["R"]
fs = params["fs"]
Q = 1 / (R * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("n") == -1: # elif n == 'none':
R = params["R"]
Q = params["C"]
fs = params["fs"]
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if str(params.keys())[10:].find("fs") == -1: # elif fs == 'none':
R = params["R"]
Q = params["C"]
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RQ_fit(params, w):
"""
Fit Function: -RQ-
Return the impedance of an RQ circuit:
Z(w) = R / (1+ R*Q * (2w)^n)
See Explanation of equations under cir_RQ()
The params.keys()[10:] finds the names of the user defined parameters that should be interated over if X == -1, if the paramter is not given, it becomes equal to 'none'
<NAME> (<EMAIL> / <EMAIL>)
"""
if str(params.keys())[10:].find("R") == -1: # if R == 'none':
Q = params["Q"]
n = params["n"]
fs = params["fs"]
R = 1 / (Q * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("Q") == -1: # elif Q == 'none':
R = params["R"]
n = params["n"]
fs = params["fs"]
Q = 1 / (R * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("n") == -1: # elif n == 'none':
R = params["R"]
Q = params["Q"]
fs = params["fs"]
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if str(params.keys())[10:].find("fs") == -1: # elif fs == 'none':
R = params["R"]
n = params["n"]
Q = params["Q"]
return R / (1 + R * Q * (w * 1j) ** n)
def cir_RsRQ_fit(params, w):
"""
Fit Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RsRQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
"""
if str(params.keys())[10:].find("R") == -1: # if R == 'none':
Q = params["Q"]
n = params["n"]
fs = params["fs"]
R = 1 / (Q * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("Q") == -1: # elif Q == 'none':
R = params["R"]
n = params["n"]
fs = params["fs"]
Q = 1 / (R * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("n") == -1: # elif n == 'none':
R = params["R"]
Q = params["Q"]
fs = params["fs"]
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if str(params.keys())[10:].find("fs") == -1: # elif fs == 'none':
R = params["R"]
Q = params["Q"]
n = params["n"]
Rs = params["Rs"]
return Rs + (R / (1 + R * Q * (w * 1j) ** n))
def cir_RsRQRQ_fit(params, w):
"""
Fit Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details under cir_RsRQRQ()
<NAME> (<EMAIL> / <EMAIL>)
"""
if str(params.keys())[10:].find("'R'") == -1: # if R == 'none':
Q = params["Q"]
n = params["n"]
fs = params["fs"]
R = 1 / (Q * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("'Q'") == -1: # elif Q == 'none':
R = params["R"]
n = params["n"]
fs = params["fs"]
Q = 1 / (R * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("'n'") == -1: # elif n == 'none':
R = params["R"]
Q = params["Q"]
fs = params["fs"]
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if str(params.keys())[10:].find("'fs'") == -1: # elif fs == 'none':
R = params["R"]
Q = params["Q"]
n = params["n"]
if str(params.keys())[10:].find("'R2'") == -1: # if R == 'none':
Q2 = params["Q2"]
n2 = params["n2"]
fs2 = params["fs2"]
R2 = 1 / (Q2 * (2 * np.pi * fs2) ** n2)
if str(params.keys())[10:].find("'Q2'") == -1: # elif Q == 'none':
R2 = params["R2"]
n2 = params["n2"]
fs2 = params["fs2"]
Q2 = 1 / (R2 * (2 * np.pi * fs2) ** n2)
if str(params.keys())[10:].find("'n2'") == -1: # elif n == 'none':
R2 = params["R2"]
Q2 = params["Q2"]
fs2 = params["fs2"]
n2 = np.log(Q2 * R2) / np.log(1 / (2 * np.pi * fs2))
if str(params.keys())[10:].find("'fs2'") == -1: # elif fs == 'none':
R2 = params["R2"]
Q2 = params["Q2"]
n2 = params["n2"]
Rs = params["Rs"]
return (
Rs + (R / (1 + R * Q * (w * 1j) ** n)) + (R2 / (1 + R2 * Q2 * (w * 1j) ** n2))
)
def cir_Randles_simplified_Fit(params, w):
"""
Fit Function: Randles simplified -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit. See more under cir_Randles_simplified()
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> || <EMAIL>)
"""
if str(params.keys())[10:].find("'R'") == -1: # if R == 'none':
Q = params["Q"]
n = params["n"]
fs = params["fs"]
R = 1 / (Q * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("'Q'") == -1: # elif Q == 'none':
R = params["R"]
n = params["n"]
fs = params["fs"]
Q = 1 / (R * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("'n'") == -1: # elif n == 'none':
R = params["R"]
Q = params["Q"]
fs = params["fs"]
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if str(params.keys())[10:].find("'fs'") == -1: # elif fs == 'none':
R = params["R"]
Q = params["Q"]
n = params["n"]
Rs = params["Rs"]
sigma = params["sigma"]
Z_Q = 1 / (Q * (w * 1j) ** n)
Z_R = R
Z_w = sigma * (w ** (-0.5)) - 1j * sigma * (w ** (-0.5))
return Rs + 1 / (1 / Z_Q + 1 / (Z_R + Z_w))
def cir_RsRQQ_fit(params, w):
"""
Fit Function: -Rs-RQ-Q-
See cir_RsRQQ() for details
"""
Rs = params["Rs"]
Q = params["Q"]
n = params["n"]
Z_Q = 1 / (Q * (w * 1j) ** n)
if str(params.keys())[10:].find("R1") == -1: # if R == 'none':
Q1 = params["Q1"]
n1 = params["n1"]
fs1 = params["fs1"]
R1 = 1 / (Q1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("Q1") == -1: # elif Q == 'none':
R1 = params["R1"]
n1 = params["n1"]
fs1 = params["fs1"]
Q1 = 1 / (R1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("n1") == -1: # elif n == 'none':
R1 = params["R1"]
Q1 = params["Q1"]
fs1 = params["fs1"]
n1 = np.log(Q1 * R1) / np.log(1 / (2 * np.pi * fs1))
if str(params.keys())[10:].find("fs1") == -1: # elif fs == 'none':
R1 = params["R1"]
n1 = params["n1"]
Q1 = params["Q1"]
Z_RQ = R1 / (1 + R1 * Q1 * (w * 1j) ** n1)
return Rs + Z_RQ + Z_Q
def cir_RsRQC_fit(params, w):
"""
Fit Function: -Rs-RQ-C-
See cir_RsRQC() for details
"""
Rs = params["Rs"]
C = params["C"]
Z_C = 1 / (C * (w * 1j))
if str(params.keys())[10:].find("R1") == -1: # if R == 'none':
Q1 = params["Q1"]
n1 = params["n1"]
fs1 = params["fs1"]
R1 = 1 / (Q1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("Q1") == -1: # elif Q == 'none':
R1 = params["R1"]
n1 = params["n1"]
fs1 = params["fs1"]
Q1 = 1 / (R1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("n1") == -1: # elif n == 'none':
R1 = params["R1"]
Q1 = params["Q1"]
fs1 = params["fs1"]
n1 = np.log(Q1 * R1) / np.log(1 / (2 * np.pi * fs1))
if str(params.keys())[10:].find("fs1") == -1: # elif fs == 'none':
R1 = params["R1"]
n1 = params["n1"]
Q1 = params["Q1"]
Z_RQ = R1 / (1 + R1 * Q1 * (w * 1j) ** n1)
return Rs + Z_RQ + Z_C
def cir_RsRCC_fit(params, w):
"""
Fit Function: -Rs-RC-C-
See cir_RsRCC() for details
"""
Rs = params["Rs"]
R1 = params["R1"]
C1 = params["C1"]
C = params["C"]
return Rs + cir_RC(w, C=C1, R=R1, fs="none") + elem_C(w, C=C)
def cir_RsRCQ_fit(params, w):
"""
Fit Function: -Rs-RC-Q-
See cir_RsRCQ() for details
"""
Rs = params["Rs"]
R1 = params["R1"]
C1 = params["C1"]
Q = params["Q"]
n = params["n"]
return Rs + cir_RC(w, C=C1, R=R1, fs="none") + elem_Q(w, Q, n)
# Polymer electrolytes
def cir_C_RC_C_fit(params, w):
"""
Fit Function: -C-(RC)-C-
See cir_C_RC_C() for details
<NAME> (<EMAIL> || <EMAIL>)
"""
# Interfacial impedance
Ce = params["Ce"]
Z_C = 1 / (Ce * (w * 1j))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: # if R == 'none':
Cb = params["Cb"]
fsb = params["fsb"]
Rb = 1 / (Cb * (2 * np.pi * fsb))
if str(params.keys())[10:].find("Cb") == -1: # elif Q == 'none':
Rb = params["Rb"]
fsb = params["fsb"]
Cb = 1 / (Rb * (2 * np.pi * fsb))
if str(params.keys())[10:].find("fsb") == -1: # elif fs == 'none':
Rb = params["Rb"]
Cb = params["Cb"]
Z_RC = Rb / (1 + Rb * Cb * (w * 1j))
return Z_C + Z_RC
def cir_Q_RQ_Q_Fit(params, w):
"""
Fit Function: -Q-(RQ)-Q-
See cir_Q_RQ_Q() for details
<NAME> (<EMAIL> || <EMAIL>)
"""
# Interfacial impedance
Qe = params["Qe"]
ne = params["ne"]
Z_Q = 1 / (Qe * (w * 1j) ** ne)
# Bulk impedance
if str(params.keys())[10:].find("Rb") == -1: # if R == 'none':
Qb = params["Qb"]
nb = params["nb"]
fsb = params["fsb"]
Rb = 1 / (Qb * (2 * np.pi * fsb) ** nb)
if str(params.keys())[10:].find("Qb") == -1: # elif Q == 'none':
Rb = params["Rb"]
nb = params["nb"]
fsb = params["fsb"]
Qb = 1 / (Rb * (2 * np.pi * fsb) ** nb)
if str(params.keys())[10:].find("nb") == -1: # elif n == 'none':
Rb = params["Rb"]
Qb = params["Qb"]
fsb = params["fsb"]
nb = np.log(Qb * Rb) / np.log(1 / (2 * np.pi * fsb))
if str(params.keys())[10:].find("fsb") == -1: # elif fs == 'none':
Rb = params["Rb"]
nb = params["nb"]
Qb = params["Qb"]
Z_RQ = Rb / (1 + Rb * Qb * (w * 1j) ** nb)
return Z_Q + Z_RQ
def cir_RCRCZD_fit(params, w):
"""
Fit Function: -RC_b-RC_e-Z_D
See cir_RCRCZD() for details
<NAME> (<EMAIL> || <EMAIL>)
"""
# Interfacial impendace
if str(params.keys())[10:].find("Re") == -1: # if R == 'none':
Ce = params["Ce"]
fse = params["fse"]
Re = 1 / (Ce * (2 * np.pi * fse))
if str(params.keys())[10:].find("Ce") == -1: # elif Q == 'none':
Re = params["Rb"]
fse = params["fsb"]
Ce = 1 / (Re * (2 * np.pi * fse))
if str(params.keys())[10:].find("fse") == -1: # elif fs == 'none':
Re = params["Re"]
Ce = params["Ce"]
Z_RCe = Re / (1 + Re * Ce * (w * 1j))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: # if R == 'none':
Cb = params["Cb"]
fsb = params["fsb"]
Rb = 1 / (Cb * (2 * np.pi * fsb))
if str(params.keys())[10:].find("Cb") == -1: # elif Q == 'none':
Rb = params["Rb"]
fsb = params["fsb"]
Cb = 1 / (Rb * (2 * np.pi * fsb))
if str(params.keys())[10:].find("fsb") == -1: # elif fs == 'none':
Rb = params["Rb"]
Cb = params["Cb"]
Z_RCb = Rb / (1 + Rb * Cb * (w * 1j))
# Mass transport impendance
L = params["L"]
D_s = params["D_s"]
u1 = params["u1"]
u2 = params["u2"]
alpha = ((w * 1j * L ** 2) / D_s) ** (1 / 2)
Z_D = Rb * (u2 / u1) * (tanh(alpha) / alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ_fit(params, w):
"""
Fit Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsTLsQ()
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
Q = params["Q"]
n = params["n"]
Phi = 1 / (Q * (w * 1j) ** n)
X1 = Ri # ohm/cm
Lam = (Phi / X1) ** (1 / 2) # np.sqrt(Phi/X1)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
#
# Z_TLsQ = Lam * X1 * coth_mp
Z_TLsQ = Lam * X1 * coth(x)
return Rs + Z_TLsQ
def cir_RsRQTLsQ_Fit(params, w):
"""
Fit Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsRQTLsQ
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
Q = params["Q"]
n = params["n"]
if str(params.keys())[10:].find("R1") == -1: # if R == 'none':
Q1 = params["Q1"]
n1 = params["n1"]
fs1 = params["fs1"]
R1 = 1 / (Q1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("Q1") == -1: # elif Q == 'none':
R1 = params["R1"]
n1 = params["n1"]
fs1 = params["fs1"]
Q1 = 1 / (R1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("n1") == -1: # elif n == 'none':
R1 = params["R1"]
Q1 = params["Q1"]
fs1 = params["fs1"]
n1 = np.log(Q1 * R1) / np.log(1 / (2 * np.pi * fs1))
if str(params.keys())[10:].find("fs1") == -1: # elif fs == 'none':
R1 = params["R1"]
n1 = params["n1"]
Q1 = params["Q1"]
Z_RQ = R1 / (1 + R1 * Q1 * (w * 1j) ** n1)
Phi = 1 / (Q * (w * 1j) ** n)
X1 = Ri
Lam = (Phi / X1) ** (1 / 2)
x = L / Lam
x_mp = mp.matrix(x) # x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
) # Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs_Fit(params, w):
"""
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
See mor under cir_RsTLs()
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
if str(params.keys())[10:].find("R") == -1: # if R == 'none':
Q = params["Q"]
n = params["n"]
fs = params["fs"]
R = 1 / (Q * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("Q") == -1: # elif Q == 'none':
R = params["R"]
n = params["n"]
fs = params["fs"]
Q = 1 / (R * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("n") == -1: # elif n == 'none':
R = params["R"]
Q = params["Q"]
fs = params["fs"]
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if str(params.keys())[10:].find("fs") == -1: # elif fs == 'none':
R = params["R"]
n = params["n"]
Q = params["Q"]
Phi = R / (1 + R * Q * (w * 1j) ** n)
X1 = Ri
Lam = (Phi / X1) ** (1 / 2)
x = L / Lam
x_mp = mp.matrix(x) # x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
) # Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs_Fit(params, w):
"""
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line with a faradaic interfacial impedance (RQ)
See more under cir_RsRQTLs()
<NAME> (<EMAIL> || <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
if str(params.keys())[10:].find("R1") == -1: # if R == 'none':
Q1 = params["Q1"]
n1 = params["n1"]
fs1 = params["fs1"]
R1 = 1 / (Q1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("Q1") == -1: # elif Q == 'none':
R1 = params["R1"]
n1 = params["n1"]
fs1 = params["fs1"]
Q1 = 1 / (R1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("n1") == -1: # elif n == 'none':
R1 = params["R1"]
Q1 = params["Q1"]
fs1 = params["fs1"]
n1 = np.log(Q1 * R1) / np.log(1 / (2 * np.pi * fs1))
if str(params.keys())[10:].find("fs1") == -1: # elif fs == 'none':
R1 = params["R1"]
n1 = params["n1"]
Q1 = params["Q1"]
Z_RQ = R1 / (1 + R1 * Q1 * (w * 1j) ** n1)
if str(params.keys())[10:].find("R2") == -1: # if R == 'none':
Q2 = params["Q2"]
n2 = params["n2"]
fs2 = params["fs2"]
R2 = 1 / (Q2 * (2 * np.pi * fs2) ** n2)
if str(params.keys())[10:].find("Q2") == -1: # elif Q == 'none':
R2 = params["R2"]
n2 = params["n2"]
fs2 = params["fs2"]
Q2 = 1 / (R2 * (2 * np.pi * fs2) ** n1)
if str(params.keys())[10:].find("n2") == -1: # elif n == 'none':
R2 = params["R2"]
Q2 = params["Q2"]
fs2 = params["fs2"]
n2 = np.log(Q2 * R2) / np.log(1 / (2 * np.pi * fs2))
if str(params.keys())[10:].find("fs2") == -1: # elif fs == 'none':
R2 = params["R2"]
n2 = params["n2"]
Q2 = params["Q2"]
Phi = R2 / (1 + R2 * Q2 * (w * 1j) ** n2)
X1 = Ri
Lam = (Phi / X1) ** (1 / 2)
x = L / Lam
x_mp = mp.matrix(x) # x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
) # Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
def cir_RsTLQ_fit(params, w):
"""
Fit Function: -R-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
Rel = params["Rel"]
Q = params["Q"]
n = params["n"]
# The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ_fit(params, w):
"""
Fit Function: -R-RQ-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
Rel = params["Rel"]
Q = params["Q"]
n = params["n"]
# The impedance of the series resistance
Z_Rs = Rs
# The (RQ) circuit in series with the transmission line
if str(params.keys())[10:].find("R1") == -1: # if R == 'none':
Q1 = params["Q1"]
n1 = params["n1"]
fs1 = params["fs1"]
R1 = 1 / (Q1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("Q1") == -1: # elif Q == 'none':
R1 = params["R1"]
n1 = params["n1"]
fs1 = params["fs1"]
Q1 = 1 / (R1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("n1") == -1: # elif n == 'none':
R1 = params["R1"]
Q1 = params["Q1"]
fs1 = params["fs1"]
n1 = np.log(Q1 * R1) / np.log(1 / (2 * np.pi * fs1))
if str(params.keys())[10:].find("fs1") == -1: # elif fs == 'none':
R1 = params["R1"]
n1 = params["n1"]
Q1 = params["Q1"]
Z_RQ1 = R1 / (1 + R1 * Q1 * (w * 1j) ** n1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_Fit(params, w):
"""
Fit Function: -R-TLQ- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
See cir_RsTL() for details
<NAME> (<EMAIL> || <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
Rel = params["Rel"]
# The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R") == -1: # if R == 'none':
Q = params["Q"]
n = params["n"]
fs = params["fs"]
R = 1 / (Q * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("Q") == -1: # elif Q == 'none':
R = params["R"]
n = params["n"]
fs = params["fs"]
Q = 1 / (R * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("n") == -1: # elif n == 'none':
R = params["R"]
Q = params["Q"]
fs = params["fs"]
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if str(params.keys())[10:].find("fs") == -1: # elif fs == 'none':
R = params["R"]
n = params["n"]
Q = params["Q"]
Phi = R / (1 + R * Q * (w * 1j) ** n)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_fit(params, w):
"""
Fit Function: -R-RQ-TL- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity including both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
Rel = params["Rel"]
# The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: # if R == 'none':
Q1 = params["Q1"]
n1 = params["n1"]
fs1 = params["fs1"]
R1 = 1 / (Q1 * (2 * np.pi * fs1) ** n1)
elif str(params.keys())[10:].find("Q1") == -1: # elif Q == 'none':
R1 = params["R1"]
n1 = params["n1"]
fs1 = params["fs1"]
Q1 = 1 / (R1 * (2 * np.pi * fs1) ** n1)
elif str(params.keys())[10:].find("n1") == -1: # elif n == 'none':
R1 = params["R1"]
Q1 = params["Q1"]
fs1 = params["fs1"]
n1 = np.log(Q1 * R1) / np.log(1 / (2 * np.pi * fs1))
elif str(params.keys())[10:].find("fs1") == -1: # elif fs == 'none':
R1 = params["R1"]
n1 = params["n1"]
Q1 = params["Q1"]
Z_RQ1 = R1 / (1 + R1 * Q1 * (w * 1j) ** n1)
#
# # The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R2") == -1: # if R == 'none':
Q2 = params["Q2"]
n2 = params["n2"]
fs2 = params["fs2"]
R2 = 1 / (Q2 * (2 * np.pi * fs2) ** n2)
elif str(params.keys())[10:].find("Q2") == -1: # elif Q == 'none':
R2 = params["R2"]
n2 = params["n2"]
fs2 = params["fs2"]
Q2 = 1 / (R2 * (2 * np.pi * fs2) ** n1)
elif str(params.keys())[10:].find("n2") == -1: # elif n == 'none':
R2 = params["R2"]
Q2 = params["Q2"]
fs2 = params["fs2"]
n2 = np.log(Q2 * R2) / np.log(1 / (2 * np.pi * fs2))
elif str(params.keys())[10:].find("fs2") == -1: # elif fs == 'none':
R2 = params["R2"]
n2 = params["n2"]
Q2 = params["Q2"]
Phi = R2 / (1 + R2 * Q2 * (w * 1j) ** n2)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float((mp.coth(x_mp[i]).imag))*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real) + float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real)*1j)
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float((mp.sinh(x_mp[i]).imag))*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_1Dsolid_fit(params, w):
"""
Fit Function: -R-TL(Q(RW))-
Transmission line w/ full complexity
See cir_RsTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
radius = params["radius"]
D = params["D"]
R = params["R"]
Q = params["Q"]
n = params["n"]
R_w = params["R_w"]
n_w = params["n_w"]
Rel = params["Rel"]
Ri = params["Ri"]
# The impedance of the series resistance
Z_Rs = Rs
# The impedance of a 1D Warburg Element
time_const = (radius ** 2) / D
x = (time_const * w * 1j) ** n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
)
Z_w = R_w * np.array(warburg_coth_mp) / x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w=w, Q=Q, n=n)
Z_Randles = 1 / (1 / Z_Q + 1 / (Z_Rct + Z_w)) # Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles / (Rel + Ri)) ** (1 / 2)
x = L / lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * lamb) / sinh(x))) + lamb * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid_fit(params, w):
"""
Fit Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel. The Warburg element is specific for 1D solid-state diffusion
See cir_RsRQTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
radius = params["radius"]
D = params["D"]
R2 = params["R2"]
Q2 = params["Q2"]
n2 = params["n2"]
R_w = params["R_w"]
n_w = params["n_w"]
Rel = params["Rel"]
Ri = params["Ri"]
# The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: # if R == 'none':
Q1 = params["Q1"]
n1 = params["n1"]
fs1 = params["fs1"]
R1 = 1 / (Q1 * (2 * np.pi * fs1) ** n1)
elif str(params.keys())[10:].find("Q1") == -1: # elif Q == 'none':
R1 = params["R1"]
n1 = params["n1"]
fs1 = params["fs1"]
Q1 = 1 / (R1 * (2 * np.pi * fs1) ** n1)
elif str(params.keys())[10:].find("n1") == -1: # elif n == 'none':
R1 = params["R1"]
Q1 = params["Q1"]
fs1 = params["fs1"]
n1 = np.log(Q1 * R1) / np.log(1 / (2 * np.pi * fs1))
elif str(params.keys())[10:].find("fs1") == -1: # elif fs == 'none':
R1 = params["R1"]
n1 = params["n1"]
Q1 = params["Q1"]
Z_RQ1 = R1 / (1 + R1 * Q1 * (w * 1j) ** n1)
# The impedance of a 1D Warburg Element
time_const = (radius ** 2) / D
x = (time_const * w * 1j) ** n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
)
Z_w = R_w * np.array(warburg_coth_mp) / x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w, Q=Q2, n=n2)
Z_Randles = 1 / (1 / Z_Q + 1 / (Z_Rct + Z_w)) # Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles / (Rel + Ri)) ** (1 / 2)
x = L / lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * lamb) / sinh(x))) + lamb * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
### Least-Squares error function
def leastsq_errorfunc(params, w, re, im, circuit, weight_func):
"""
Sum of squares error function for the complex non-linear least-squares fitting procedure (CNLS). The fitting function (lmfit) will use this function to iterate over
until the total sum of errors is minimized.
During the minimization the fit is weighed, and currently three different weigh options are avaliable:
- modulus
- unity
- proportional
Modulus is generially recommended as random errors and a bias can exist in the experimental data.
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------
- params: parameters needed for CNLS
- re: real impedance
- im: Imaginary impedance
- circuit:
The avaliable circuits are shown below, and this this parameter needs it as a string.
- C
- Q
- R-C
- R-Q
- RC
- RQ
- R-RQ
- R-RQ-RQ
- R-RQ-Q
- R-(Q(RW))
- R-(Q(RM))
- R-RC-C
- R-RC-Q
- R-RQ-Q
- R-RQ-C
- RC-RC-ZD
- R-TLsQ
- R-RQ-TLsQ
- R-TLs
- R-RQ-TLs
- R-TLQ
- R-RQ-TLQ
- R-TL
- R-RQ-TL
- R-TL1Dsolid (reactive interface with 1D solid-state diffusion)
- R-RQ-TL1Dsolid
- weight_func
Weight function
- modulus
- unity
- proportional
"""
if circuit == "C":
re_fit = elem_C_fit(params, w).real
im_fit = -elem_C_fit(params, w).imag
elif circuit == "Q":
re_fit = elem_Q_fit(params, w).real
im_fit = -elem_Q_fit(params, w).imag
elif circuit == "R-C":
re_fit = cir_RsC_fit(params, w).real
im_fit = -cir_RsC_fit(params, w).imag
elif circuit == "R-Q":
re_fit = cir_RsQ_fit(params, w).real
im_fit = -cir_RsQ_fit(params, w).imag
elif circuit == "RC":
re_fit = cir_RC_fit(params, w).real
im_fit = -cir_RC_fit(params, w).imag
elif circuit == "RQ":
re_fit = cir_RQ_fit(params, w).real
im_fit = -cir_RQ_fit(params, w).imag
elif circuit == "R-RQ":
re_fit = cir_RsRQ_fit(params, w).real
im_fit = -cir_RsRQ_fit(params, w).imag
elif circuit == "R-RQ-RQ":
re_fit = cir_RsRQRQ_fit(params, w).real
im_fit = -cir_RsRQRQ_fit(params, w).imag
elif circuit == "R-RC-C":
re_fit = cir_RsRCC_fit(params, w).real
im_fit = -cir_RsRCC_fit(params, w).imag
elif circuit == "R-RC-Q":
re_fit = cir_RsRCQ_fit(params, w).real
im_fit = -cir_RsRCQ_fit(params, w).imag
elif circuit == "R-RQ-Q":
re_fit = cir_RsRQQ_fit(params, w).real
im_fit = -cir_RsRQQ_fit(params, w).imag
elif circuit == "R-RQ-C":
re_fit = cir_RsRQC_fit(params, w).real
im_fit = -cir_RsRQC_fit(params, w).imag
elif circuit == "R-(Q(RW))":
re_fit = cir_Randles_simplified_Fit(params, w).real
im_fit = -cir_Randles_simplified_Fit(params, w).imag
elif circuit == "R-(Q(RM))":
re_fit = cir_Randles_uelectrode_fit(params, w).real
im_fit = -cir_Randles_uelectrode_fit(params, w).imag
elif circuit == "C-RC-C":
re_fit = cir_C_RC_C_fit(params, w).real
im_fit = -cir_C_RC_C_fit(params, w).imag
elif circuit == "Q-RQ-Q":
re_fit = cir_Q_RQ_Q_Fit(params, w).real
im_fit = -cir_Q_RQ_Q_Fit(params, w).imag
elif circuit == "RC-RC-ZD":
re_fit = cir_RCRCZD_fit(params, w).real
im_fit = -cir_RCRCZD_fit(params, w).imag
elif circuit == "R-TLsQ":
re_fit = cir_RsTLsQ_fit(params, w).real
im_fit = -cir_RsTLsQ_fit(params, w).imag
elif circuit == "R-RQ-TLsQ":
re_fit = cir_RsRQTLsQ_Fit(params, w).real
im_fit = -cir_RsRQTLsQ_Fit(params, w).imag
elif circuit == "R-TLs":
re_fit = cir_RsTLs_Fit(params, w).real
im_fit = -cir_RsTLs_Fit(params, w).imag
elif circuit == "R-RQ-TLs":
re_fit = cir_RsRQTLs_Fit(params, w).real
im_fit = -cir_RsRQTLs_Fit(params, w).imag
elif circuit == "R-TLQ":
re_fit = cir_RsTLQ_fit(params, w).real
im_fit = -cir_RsTLQ_fit(params, w).imag
elif circuit == "R-RQ-TLQ":
re_fit = cir_RsRQTLQ_fit(params, w).real
im_fit = -cir_RsRQTLQ_fit(params, w).imag
elif circuit == "R-TL":
re_fit = cir_RsTL_Fit(params, w).real
im_fit = -cir_RsTL_Fit(params, w).imag
elif circuit == "R-RQ-TL":
re_fit = cir_RsRQTL_fit(params, w).real
im_fit = -cir_RsRQTL_fit(params, w).imag
elif circuit == "R-TL1Dsolid":
re_fit = cir_RsTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsTL_1Dsolid_fit(params, w).imag
elif circuit == "R-RQ-TL1Dsolid":
re_fit = cir_RsRQTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsRQTL_1Dsolid_fit(params, w).imag
else:
print("Circuit is not defined in leastsq_errorfunc()")
error = [(re - re_fit) ** 2, (im - im_fit) ** 2] # sum of squares
# Different Weighing options, see Lasia
if weight_func == "modulus":
weight = [
1 / ((re_fit ** 2 + im_fit ** 2) ** (1 / 2)),
1 / ((re_fit ** 2 + im_fit ** 2) ** (1 / 2)),
]
elif weight_func == "proportional":
weight = [1 / (re_fit ** 2), 1 / (im_fit ** 2)]
elif weight_func == "unity":
unity_1s = []
for k in range(len(re)):
unity_1s.append(
1
) # makes an array of [1]'s, so that the weighing is == 1 * sum of squres.
weight = [unity_1s, unity_1s]
else:
print("weight not defined in leastsq_errorfunc()")
S = np.array(weight) * error # weighted sum of squares
return S
### Fitting Class
class EIS_exp:
"""
This class is used to plot and/or analyze experimental impedance data. The class has three major functions:
- EIS_plot()
- Lin_KK()
- EIS_fit()
- EIS_plot() is used to plot experimental data with or without fit
- Lin_KK() performs a linear Kramers-Kronig analysis of the experimental data set.
- EIS_fit() performs complex non-linear least-squares fitting of the experimental data to an equivalent circuit
<NAME> (<EMAIL> || <EMAIL>)
Inputs
-----------
- path: path of datafile(s) as a string
- data: datafile(s) including extension, e.g. ['EIS_data1', 'EIS_data2']
- cycle: Specific cycle numbers can be extracted using the cycle function. Default is 'none', which includes all cycle numbers.
Specific cycles can be extracted using this parameter, insert cycle numbers in brackets, e.g. cycle number 1,4, and 6 are wanted. cycle=[1,4,6]
- mask: ['high frequency' , 'low frequency'], if only a high- or low-frequency is desired use 'none' for the other, e.g. maks=[10**4,'none']
"""
def __init__(self, path, data, cycle="off", mask=["none", "none"]):
self.df_raw0 = []
self.cycleno = []
for j, f in enumerate(data, start=0):
if f.endswith("mpt"): # file is a .mpt file
self.df_raw0.append(
extract_mpt(path=path, EIS_name=f)
) # reads all datafiles
elif f.endswith("DTA"): # file is a .dta file
self.df_raw0.append(
extract_dta(path=path, EIS_name=f)
) # reads all datafiles
elif f.endswith("z"): # file is a .z file
self.df_raw0.append(
extract_solar(path=path, EIS_name=f)
) # reads all datafiles
elif f.endswith("txt"):
self.df_raw0.append(extract_csv(path=path, EIS_name=f))
else:
print("Data file(s) could not be identified")
self.cycleno.append(self.df_raw0[j].cycle_number)
if np.min(self.cycleno[j]) <= np.max(self.cycleno[j - 1]):
if j > 0: # corrects cycle_number except for the first data file
self.df_raw0[j].update(
{"cycle_number": self.cycleno[j] + np.max(self.cycleno[j - 1])}
) # corrects cycle number
# currently need to append a cycle_number coloumn to gamry files
# adds individual dataframes into one
if len(self.df_raw0) == 1:
self.df_raw = self.df_raw0[0]
elif len(self.df_raw0) == 2:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1]], axis=0)
elif len(self.df_raw0) == 3:
self.df_raw = pd.concat(
[self.df_raw0[0], self.df_raw0[1], self.df_raw0[2]], axis=0
)
elif len(self.df_raw0) == 4:
self.df_raw = pd.concat(
[self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3]],
axis=0,
)
elif len(self.df_raw0) == 5:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
],
axis=0,
)
elif len(self.df_raw0) == 6:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
],
axis=0,
)
elif len(self.df_raw0) == 7:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
],
axis=0,
)
elif len(self.df_raw0) == 8:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
],
axis=0,
)
elif len(self.df_raw0) == 9:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
self.df_raw0[8],
],
axis=0,
)
elif len(self.df_raw0) == 10:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
self.df_raw0[8],
self.df_raw0[9],
],
axis=0,
)
elif len(self.df_raw0) == 11:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
self.df_raw0[8],
self.df_raw0[9],
self.df_raw0[10],
],
axis=0,
)
elif len(self.df_raw0) == 12:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
self.df_raw0[8],
self.df_raw0[9],
self.df_raw0[10],
self.df_raw0[11],
],
axis=0,
)
elif len(self.df_raw0) == 13:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
self.df_raw0[8],
self.df_raw0[9],
self.df_raw0[10],
self.df_raw0[11],
self.df_raw0[12],
],
axis=0,
)
elif len(self.df_raw0) == 14:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
self.df_raw0[8],
self.df_raw0[9],
self.df_raw0[10],
self.df_raw0[11],
],
self.df_raw0[12],
self.df_raw0[13],
axis=0,
)
elif len(self.df_raw0) == 15:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
self.df_raw0[8],
self.df_raw0[9],
self.df_raw0[10],
self.df_raw0[11],
],
self.df_raw0[12],
self.df_raw0[13],
self.df_raw0[14],
axis=0,
)
else:
print("Too many data files || 15 allowed")
self.df_raw = self.df_raw.assign(
w=2 * np.pi * self.df_raw.f
) # creats a new coloumn with the angular frequency
# Masking data to each cycle
self.df_pre = []
self.df_limited = []
self.df_limited2 = []
self.df = []
if mask == ["none", "none"] and cycle == "off":
for i in range(len(self.df_raw.cycle_number.unique())): # includes all data
self.df.append(
self.df_raw[
self.df_raw.cycle_number == self.df_raw.cycle_number.unique()[i]
]
)
elif mask == ["none", "none"] and cycle != "off":
for i in range(len(cycle)):
self.df.append(
self.df_raw[self.df_raw.cycle_number == cycle[i]]
) # extracting dataframe for each cycle
elif mask[0] != "none" and mask[1] == "none" and cycle == "off":
self.df_pre = self.df_raw.mask(self.df_raw.f > mask[0])
self.df_pre.dropna(how="all", inplace=True)
for i in range(
len(self.df_pre.cycle_number.unique())
): # Appending data based on cycle number
self.df.append(
self.df_pre[
self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]
]
)
elif (
mask[0] != "none" and mask[1] == "none" and cycle != "off"
): # or [i for i, e in enumerate(mask) if e == 'none'] == [0]
self.df_limited = self.df_raw.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(
self.df_limited[self.df_limited.cycle_number == cycle[i]]
)
elif mask[0] == "none" and mask[1] != "none" and cycle == "off":
self.df_pre = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_pre.dropna(how="all", inplace=True)
for i in range(len(self.df_raw.cycle_number.unique())): # includes all data
self.df.append(
self.df_pre[
self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]
]
)
elif mask[0] == "none" and mask[1] != "none" and cycle != "off":
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
for i in range(len(cycle)):
self.df.append(
self.df_limited[self.df_limited.cycle_number == cycle[i]]
)
elif mask[0] != "none" and mask[1] != "none" and cycle != "off":
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(
self.df_limited[self.df_limited2.cycle_number == cycle[i]]
)
elif mask[0] != "none" and mask[1] != "none" and cycle == "off":
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(self.df_raw.cycle_number.unique())):
self.df.append(
self.df_limited[
self.df_limited2.cycle_number
== self.df_raw.cycle_number.unique()[i]
]
)
else:
print("__init__ error (#2)")
def Lin_KK(
self,
num_RC="auto",
legend="on",
plot="residuals",
bode="off",
nyq_xlim="none",
nyq_ylim="none",
weight_func="Boukamp",
savefig="none",
):
"""
Plots the Linear Kramers-Kronig (KK) Validity Test
The script is based on Boukamp and Schōnleber et al.'s papers for fitting the resistances of multiple -(RC)- circuits
to the data. A data quality analysis can hereby be made on the basis of the relative residuals
Ref.:
- Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
- Boukamp, B.A. J. Electrochem. Soc., 142, 6, 1885-1894
The function performs the KK analysis and as default the relative residuals in each subplot
Note, that weigh_func should be equal to 'Boukamp'.
<NAME> (<EMAIL> || <EMAIL>)
Optional Inputs
-----------------
- num_RC:
- 'auto' applies an automatic algorithm developed by Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
that ensures no under- or over-fitting occurs
- can be hardwired by inserting any number (RC-elements/decade)
- plot:
- 'residuals' = plots the relative residuals in subplots correspoding to the cycle numbers picked
- 'w_data' = plots the relative residuals with the experimental data, in Nyquist and bode plot if desired, see 'bode =' in description
- nyq_xlim/nyq_xlim: Change the x/y-axis limits on nyquist plot, if not equal to 'none' state [min,max] value
- legend:
- 'on' = displays cycle number
- 'potential' = displays average potential which the spectra was measured at
- 'off' = off
bode = Plots Bode Plot - options:
'on' = re, im vs. log(freq)
'log' = log(re, im) vs. log(freq)
're' = re vs. log(freq)
'log_re' = log(re) vs. log(freq)
'im' = im vs. log(freq)
'log_im' = log(im) vs. log(freq)
"""
if num_RC == "auto":
print("cycle || No. RC-elements || u")
self.decade = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
self.number_RC = []
self.number_RC_sort = []
self.KK_u = []
self.KK_Rgreater = []
self.KK_Rminor = []
M = 2
for i in range(len(self.df)):
self.decade.append(
np.log10(np.max(self.df[i].f)) - np.log10(np.min(self.df[i].f))
) # determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC.append(M)
self.number_RC_sort.append(M) # needed for self.KK_R
self.Rparam.append(
KK_Rnam_val(
re=self.df[i].re,
re_start=self.df[i].re.idxmin(),
num_RC=int(self.number_RC[i]),
)[0]
) # Creates intial guesses for R's
self.t_const.append(
KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i]))
) # Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(
minimize(
KK_errorfunc,
self.Rparam[i],
method="leastsq",
args=(
self.df[i].w.values,
self.df[i].re.values,
self.df[i].im.values,
self.number_RC[i],
weight_func,
self.t_const[i],
),
)
) # maxfev=99
self.R_names.append(
KK_Rnam_val(
re=self.df[i].re,
re_start=self.df[i].re.idxmin(),
num_RC=int(self.number_RC[i]),
)[1]
) # creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(
self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value
)
self.number_RC_sort.insert(0, 0) # needed for self.KK_R
for i in range(len(self.df)):
self.KK_R.append(
self.KK_R0[
int(np.cumsum(self.number_RC_sort)[i]) : int(
np.cumsum(self.number_RC_sort)[i + 1]
)
]
) # assigns resistances from each spectra to their respective df
self.KK_Rgreater.append(
np.where(np.array(self.KK_R)[i] >= 0, np.array(self.KK_R)[i], 0)
)
self.KK_Rminor.append(
np.where(np.array(self.KK_R)[i] < 0, np.array(self.KK_R)[i], 0)
)
self.KK_u.append(
1
- (
np.abs(np.sum(self.KK_Rminor[i]))
/ np.abs(np.sum(self.KK_Rgreater[i]))
)
)
for i in range(len(self.df)):
while self.KK_u[i] <= 0.75 or self.KK_u[i] >= 0.88:
self.number_RC_sort0 = []
self.KK_R_lim = []
self.number_RC[i] = self.number_RC[i] + 1
self.number_RC_sort0.append(self.number_RC)
self.number_RC_sort = np.insert(self.number_RC_sort0, 0, 0)
self.Rparam[i] = KK_Rnam_val(
re=self.df[i].re,
re_start=self.df[i].re.idxmin(),
num_RC=int(self.number_RC[i]),
)[
0
] # Creates intial guesses for R's
self.t_const[i] = KK_timeconst(
w=self.df[i].w, num_RC=int(self.number_RC[i])
) # Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit[i] = minimize(
KK_errorfunc,
self.Rparam[i],
method="leastsq",
args=(
self.df[i].w.values,
self.df[i].re.values,
self.df[i].im.values,
self.number_RC[i],
weight_func,
self.t_const[i],
),
) # maxfev=99
self.R_names[i] = KK_Rnam_val(
re=self.df[i].re,
re_start=self.df[i].re.idxmin(),
num_RC=int(self.number_RC[i]),
)[
1
] # creates R names
self.KK_R0 = np.delete(
np.array(self.KK_R0), np.s_[0 : len(self.KK_R0)]
)
self.KK_R0 = []
for q in range(len(self.df)):
for j in range(len(self.R_names[q])):
self.KK_R0.append(
self.Lin_KK_Fit[q].params.get(self.R_names[q][j]).value
)
self.KK_R_lim = np.cumsum(self.number_RC_sort) # used for KK_R[i]
self.KK_R[i] = self.KK_R0[
self.KK_R_lim[i] : self.KK_R_lim[i + 1]
] # assigns resistances from each spectra to their respective df
self.KK_Rgreater[i] = np.where(
np.array(self.KK_R[i]) >= 0, np.array(self.KK_R[i]), 0
)
self.KK_Rminor[i] = np.where(
np.array(self.KK_R[i]) < 0, np.array(self.KK_R[i]), 0
)
self.KK_u[i] = 1 - (
np.abs(np.sum(self.KK_Rminor[i]))
/ np.abs(np.sum(self.KK_Rgreater[i]))
)
else:
print(
"["
+ str(i + 1)
+ "]"
+ " "
+ str(self.number_RC[i]),
" " + str(np.round(self.KK_u[i], 2)),
)
elif num_RC != "auto": # hardwired number of RC-elements/decade
print("cycle || u")
self.decade = []
self.number_RC0 = []
self.number_RC = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
for i in range(len(self.df)):
self.decade.append(
np.log10(np.max(self.df[i].f)) - np.log10(np.min(self.df[i].f))
) # determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC0.append(np.round(num_RC * self.decade[i]))
self.number_RC.append(
np.round(num_RC * self.decade[i])
) # Creats the the number of -(RC)- circuits
self.Rparam.append(
KK_Rnam_val(
re=self.df[i].re,
re_start=self.df[i].re.idxmin(),
num_RC=int(self.number_RC0[i]),
)[0]
) # Creates intial guesses for R's
self.t_const.append(
KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC0[i]))
) # Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(
minimize(
KK_errorfunc,
self.Rparam[i],
method="leastsq",
args=(
self.df[i].w.values,
self.df[i].re.values,
self.df[i].im.values,
self.number_RC0[i],
weight_func,
self.t_const[i],
),
)
) # maxfev=99
self.R_names.append(
KK_Rnam_val(
re=self.df[i].re,
re_start=self.df[i].re.idxmin(),
num_RC=int(self.number_RC0[i]),
)[1]
) # creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(
self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value
)
self.number_RC0.insert(0, 0)
# print(report_fit(self.Lin_KK_Fit[i])) # prints fitting report
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
self.KK_Rgreater = []
self.KK_Rminor = []
self.KK_u = []
for i in range(len(self.df)):
self.KK_R.append(
self.KK_R0[
int(np.cumsum(self.number_RC0)[i]) : int(
np.cumsum(self.number_RC0)[i + 1]
)
]
) # assigns resistances from each spectra to their respective df
self.KK_Rx = np.array(self.KK_R)
self.KK_Rgreater.append(np.where(self.KK_Rx[i] >= 0, self.KK_Rx[i], 0))
self.KK_Rminor.append(np.where(self.KK_Rx[i] < 0, self.KK_Rx[i], 0))
self.KK_u.append(
1
- (
np.abs(np.sum(self.KK_Rminor[i]))
/ np.abs(np.sum(self.KK_Rgreater[i]))
)
) # currently gives incorrect values
print(
"[" + str(i + 1) + "]" + " " + str(np.round(self.KK_u[i], 2))
)
else:
print("num_RC incorrectly defined")
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
for i in range(len(self.df)):
if int(self.number_RC[i]) == 2:
self.KK_circuit_fit.append(
KK_RC2(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 3:
self.KK_circuit_fit.append(
KK_RC3(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 4:
self.KK_circuit_fit.append(
KK_RC4(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 5:
self.KK_circuit_fit.append(
KK_RC5(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 6:
self.KK_circuit_fit.append(
KK_RC6(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 7:
self.KK_circuit_fit.append(
KK_RC7(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 8:
self.KK_circuit_fit.append(
KK_RC8(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 9:
self.KK_circuit_fit.append(
KK_RC9(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 10:
self.KK_circuit_fit.append(
KK_RC10(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 11:
self.KK_circuit_fit.append(
KK_RC11(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 12:
self.KK_circuit_fit.append(
KK_RC12(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 13:
self.KK_circuit_fit.append(
KK_RC13(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 14:
self.KK_circuit_fit.append(
KK_RC14(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 15:
self.KK_circuit_fit.append(
KK_RC15(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 16:
self.KK_circuit_fit.append(
KK_RC16(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 17:
self.KK_circuit_fit.append(
KK_RC17(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 18:
self.KK_circuit_fit.append(
KK_RC18(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 19:
self.KK_circuit_fit.append(
KK_RC19(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 20:
self.KK_circuit_fit.append(
KK_RC20(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 21:
self.KK_circuit_fit.append(
KK_RC21(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 22:
self.KK_circuit_fit.append(
KK_RC22(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 23:
self.KK_circuit_fit.append(
KK_RC23(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 24:
self.KK_circuit_fit.append(
KK_RC24(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 25:
self.KK_circuit_fit.append(
KK_RC25(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 26:
self.KK_circuit_fit.append(
KK_RC26(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 27:
self.KK_circuit_fit.append(
KK_RC27(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 28:
self.KK_circuit_fit.append(
KK_RC28(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 29:
self.KK_circuit_fit.append(
KK_RC29(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 30:
self.KK_circuit_fit.append(
KK_RC30(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 31:
self.KK_circuit_fit.append(
KK_RC31(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 32:
self.KK_circuit_fit.append(
KK_RC32(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 33:
self.KK_circuit_fit.append(
KK_RC33(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 34:
self.KK_circuit_fit.append(
KK_RC34(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 35:
self.KK_circuit_fit.append(
KK_RC35(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 36:
self.KK_circuit_fit.append(
KK_RC36(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 37:
self.KK_circuit_fit.append(
KK_RC37(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 38:
self.KK_circuit_fit.append(
KK_RC38(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 39:
self.KK_circuit_fit.append(
KK_RC39(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 40:
self.KK_circuit_fit.append(
KK_RC40(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 41:
self.KK_circuit_fit.append(
KK_RC41(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 42:
self.KK_circuit_fit.append(
KK_RC42(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 43:
self.KK_circuit_fit.append(
KK_RC43(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 44:
self.KK_circuit_fit.append(
KK_RC44(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 45:
self.KK_circuit_fit.append(
KK_RC45(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 46:
self.KK_circuit_fit.append(
KK_RC46(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 47:
self.KK_circuit_fit.append(
KK_RC47(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 48:
self.KK_circuit_fit.append(
KK_RC48(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 49:
self.KK_circuit_fit.append(
KK_RC49(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 50:
self.KK_circuit_fit.append(
KK_RC50(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 51:
self.KK_circuit_fit.append(
KK_RC51(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 52:
self.KK_circuit_fit.append(
KK_RC52(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 53:
self.KK_circuit_fit.append(
KK_RC53(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 54:
self.KK_circuit_fit.append(
KK_RC54(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 55:
self.KK_circuit_fit.append(
KK_RC55(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 56:
self.KK_circuit_fit.append(
KK_RC56(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 57:
self.KK_circuit_fit.append(
KK_RC57(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 58:
self.KK_circuit_fit.append(
KK_RC58(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 59:
self.KK_circuit_fit.append(
KK_RC59(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 60:
self.KK_circuit_fit.append(
KK_RC60(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 61:
self.KK_circuit_fit.append(
KK_RC61(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 62:
self.KK_circuit_fit.append(
KK_RC62(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 63:
self.KK_circuit_fit.append(
KK_RC63(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 64:
self.KK_circuit_fit.append(
KK_RC64(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 65:
self.KK_circuit_fit.append(
KK_RC65(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 66:
self.KK_circuit_fit.append(
KK_RC66(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 67:
self.KK_circuit_fit.append(
KK_RC67(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 68:
self.KK_circuit_fit.append(
KK_RC68(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 69:
self.KK_circuit_fit.append(
KK_RC69(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 70:
self.KK_circuit_fit.append(
KK_RC70(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 71:
self.KK_circuit_fit.append(
KK_RC71(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 72:
self.KK_circuit_fit.append(
KK_RC72(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 73:
self.KK_circuit_fit.append(
KK_RC73(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 74:
self.KK_circuit_fit.append(
KK_RC74(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 75:
self.KK_circuit_fit.append(
KK_RC75(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 76:
self.KK_circuit_fit.append(
KK_RC76(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 77:
self.KK_circuit_fit.append(
KK_RC77(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 78:
self.KK_circuit_fit.append(
KK_RC78(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 79:
self.KK_circuit_fit.append(
KK_RC79(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 80:
self.KK_circuit_fit.append(
KK_RC80(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
else:
print("RC simulation circuit not defined")
print(" Number of RC = ", self.number_RC)
self.KK_rr_re.append(
residual_real(
re=self.df[i].re,
fit_re=self.KK_circuit_fit[i].real,
fit_im=-self.KK_circuit_fit[i].imag,
)
) # relative residuals for the real part
self.KK_rr_im.append(
residual_imag(
im=self.df[i].im,
fit_re=self.KK_circuit_fit[i].real,
fit_im=-self.KK_circuit_fit[i].imag,
)
) # relative residuals for the imag part
### Plotting Linear_kk results
##
#
### Label functions
self.label_re_1 = []
self.label_im_1 = []
self.label_cycleno = []
if legend == "on":
for i in range(len(self.df)):
self.label_re_1.append("Z' (#" + str(i + 1) + ")")
self.label_im_1.append("Z'' (#" + str(i + 1) + ")")
self.label_cycleno.append("#" + str(i + 1))
elif legend == "potential":
for i in range(len(self.df)):
self.label_re_1.append(
"Z' (" + str(np.round(np.average(self.df[i].E_avg), 2)) + " V)"
)
self.label_im_1.append(
"Z'' (" + str(np.round(np.average(self.df[i].E_avg), 2)) + " V)"
)
self.label_cycleno.append(
str(np.round(np.average(self.df[i].E_avg), 2)) + " V"
)
if plot == "w_data":
fig = figure(figsize=(6, 8), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(311, aspect="equal")
ax1 = fig.add_subplot(312)
ax2 = fig.add_subplot(313)
colors = sns.color_palette("colorblind", n_colors=len(self.df))
colors_real = sns.color_palette("Blues", n_colors=len(self.df) + 2)
colors_imag = sns.color_palette("Oranges", n_colors=len(self.df) + 2)
### Nyquist Plot
for i in range(len(self.df)):
ax.plot(
self.df[i].re,
self.df[i].im,
marker="o",
ms=4,
lw=2,
color=colors[i],
ls="-",
alpha=0.7,
label=self.label_cycleno[i],
)
### Bode Plot
if bode == "on":
for i in range(len(self.df)):
ax1.plot(
np.log10(self.df[i].f),
self.df[i].re,
color=colors_real[i + 1],
marker="D",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_re_1[i],
)
ax1.plot(
np.log10(self.df[i].f),
self.df[i].im,
color=colors_imag[i + 1],
marker="s",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_im_1[i],
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z', -Z'' [$\Omega$]")
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
elif bode == "re":
for i in range(len(self.df)):
ax1.plot(
np.log10(self.df[i].f),
self.df[i].re,
color=colors_real[i + 1],
marker="D",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_cycleno[i],
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z' [$\Omega$]")
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
elif bode == "log_re":
for i in range(len(self.df)):
ax1.plot(
np.log10(self.df[i].f),
np.log10(self.df[i].re),
color=colors_real[i + 1],
marker="D",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_cycleno[i],
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z') [$\Omega$]")
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
elif bode == "im":
for i in range(len(self.df)):
ax1.plot(
np.log10(self.df[i].f),
self.df[i].im,
color=colors_imag[i + 1],
marker="s",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_cycleno[i],
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("-Z'' [$\Omega$]")
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
elif bode == "log_im":
for i in range(len(self.df)):
ax1.plot(
np.log10(self.df[i].f),
np.log10(self.df[i].im),
color=colors_imag[i + 1],
marker="s",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_cycleno[i],
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(-Z'') [$\Omega$]")
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
elif bode == "log":
for i in range(len(self.df)):
ax1.plot(
np.log10(self.df[i].f),
np.log10(self.df[i].re),
color=colors_real[i + 1],
marker="D",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_re_1[i],
)
ax1.plot(
np.log10(self.df[i].f),
np.log10(self.df[i].im),
color=colors_imag[i + 1],
marker="s",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_im_1[i],
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z', -Z'') [$\Omega$]")
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
### Kramers-Kronig Relative Residuals
for i in range(len(self.df)):
ax2.plot(
np.log10(self.df[i].f),
self.KK_rr_re[i] * 100,
color=colors_real[i + 1],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label=self.label_re_1[i],
)
ax2.plot(
np.log10(self.df[i].f),
self.KK_rr_im[i] * 100,
color=colors_imag[i + 1],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label=self.label_im_1[i],
)
ax2.set_xlabel("log(f) [Hz]")
ax2.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == "on" or legend == "potential":
ax2.legend(loc="best", fontsize=10, frameon=False)
ax2.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if np.min(self.KK_rr_im_min) > np.min(self.KK_rr_re_min):
ax2.set_ylim(
np.min(self.KK_rr_re_min) * 100 * 1.5,
np.max(np.abs(self.KK_rr_re_min)) * 100 * 1.5,
)
ax2.annotate(
"Lin-KK",
xy=[
np.min(np.log10(self.df[0].f)),
np.max(self.KK_rr_re_max) * 100 * 0.9,
],
color="k",
fontweight="bold",
)
elif np.min(self.KK_rr_im_min) < np.min(self.KK_rr_re_min):
ax2.set_ylim(
np.min(self.KK_rr_im_min) * 100 * 1.5,
np.max(self.KK_rr_im_max) * 100 * 1.5,
)
ax2.annotate(
"Lin-KK",
xy=[
np.min(np.log10(self.df[0].f)),
np.max(self.KK_rr_im_max) * 100 * 0.9,
],
color="k",
fontweight="bold",
)
### Figure specifics
if legend == "on" or legend == "potential":
ax.legend(loc="best", fontsize=10, frameon=False)
ax.set_xlabel("Z' [$\Omega$]")
ax.set_ylabel("-Z'' [$\Omega$]")
if nyq_xlim != "none":
ax.set_xlim(nyq_xlim[0], nyq_xlim[1])
if nyq_ylim != "none":
ax.set_ylim(nyq_ylim[0], nyq_ylim[1])
# Save Figure
if savefig != "none":
fig.savefig(savefig)
### Illustrating residuals only
elif plot == "residuals":
colors = sns.color_palette("colorblind", n_colors=9)
colors_real = sns.color_palette("Blues", n_colors=9)
colors_imag = sns.color_palette("Oranges", n_colors=9)
### 1 Cycle
if len(self.df) == 1:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(
left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95
)
ax = fig.add_subplot(231)
ax.plot(
np.log10(self.df[0].f),
self.KK_rr_re[0] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax.plot(
np.log10(self.df[0].f),
self.KK_rr_im[0] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax.set_xlabel("log(f) [Hz]")
ax.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == "on" or legend == "potential":
ax.legend(loc="best", fontsize=10, frameon=False)
ax.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = np.min(self.KK_rr_im)
self.KK_rr_im_max = np.max(self.KK_rr_im)
self.KK_rr_re_min = np.min(self.KK_rr_re)
self.KK_rr_re_max = np.max(self.KK_rr_re)
if self.KK_rr_re_max > self.KK_rr_im_max:
self.KK_ymax = self.KK_rr_re_max
else:
self.KK_ymax = self.KK_rr_im_max
if self.KK_rr_re_min < self.KK_rr_im_min:
self.KK_ymin = self.KK_rr_re_min
else:
self.KK_ymin = self.KK_rr_im_min
if np.abs(self.KK_ymin) > self.KK_ymax:
ax.set_ylim(
self.KK_ymin * 100 * 1.5, np.abs(self.KK_ymin) * 100 * 1.5
)
if legend == "on":
ax.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin) < self.KK_ymax:
ax.set_ylim(
np.negative(self.KK_ymax) * 100 * 1.5,
np.abs(self.KK_ymax) * 100 * 1.5,
)
if legend == "on":
ax.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax * 100 * 1.3,
],
color="k",
fontweight="bold",
)
# Save Figure
if savefig != "none":
fig.savefig(savefig)
### 2 Cycles
elif len(self.df) == 2:
fig = figure(figsize=(12, 5), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(
left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95
)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
# cycle 1
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_re[0] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_im[0] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
ax1.axhline(0, ls="--", c="k", alpha=0.5)
# cycle 2
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_re[1] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_im[1] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax2.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax2.legend(loc="best", fontsize=10, frameon=False)
ax2.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(
self.KK_ymin[0] * 100 * 1.5, np.abs(self.KK_ymin[0]) * 100 * 1.5
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[0]) * 100 * 1.5,
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymax[0]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax[0] * 100 * 1.3,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(
self.KK_ymin[1] * 100 * 1.5, np.abs(self.KK_ymin[1]) * 100 * 1.5
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(
np.negative(self.KK_ymax[1]) * 100 * 1.5,
np.abs(self.KK_ymax[1]) * 100 * 1.5,
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.3,
],
color="k",
fontweight="bold",
)
# Save Figure
if savefig != "none":
fig.savefig(savefig)
### 3 Cycles
elif len(self.df) == 3:
fig = figure(figsize=(12, 5), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(
left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95
)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
# cycle 1
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_re[0] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_im[0] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
ax1.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 2
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_re[1] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_im[1] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax2.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax2.legend(loc="best", fontsize=10, frameon=False)
ax2.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 3
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_re[2] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_im[2] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax3.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax3.legend(loc="best", fontsize=10, frameon=False)
ax3.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(
self.KK_ymin[0] * 100 * 1.5, np.abs(self.KK_ymin[0]) * 100 * 1.5
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[0]) * 100 * 1.5,
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymax[0]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax[0] * 100 * 1.3,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(
self.KK_ymin[1] * 100 * 1.5, np.abs(self.KK_ymin[1]) * 100 * 1.5
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(
np.negative(self.KK_ymax[1]) * 100 * 1.5,
np.abs(self.KK_ymax[1]) * 100 * 1.5,
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.3,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(
self.KK_ymin[2] * 100 * 1.5, np.abs(self.KK_ymin[2]) * 100 * 1.5
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[2]) * 100 * 1.5,
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymax[2]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
self.KK_ymax[2] * 100 * 1.3,
],
color="k",
fontweight="bold",
)
# Save Figure
if savefig != "none":
fig.savefig(savefig)
### 4 Cycles
elif len(self.df) == 4:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(
left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95
)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
# cycle 1
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_re[0] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_im[0] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
ax1.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 2
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_re[1] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_im[1] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax2.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax2.legend(loc="best", fontsize=10, frameon=False)
ax2.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 3
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_re[2] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_im[2] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax3.set_xlabel("log(f) [Hz]")
ax3.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == "on" or legend == "potential":
ax3.legend(loc="best", fontsize=10, frameon=False)
ax3.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 4
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_re[3] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_im[3] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax4.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax4.legend(loc="best", fontsize=10, frameon=False)
ax4.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(
self.KK_ymin[0] * 100 * 1.5, np.abs(self.KK_ymin[0]) * 100 * 1.5
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[0]) * 100 * 1.5,
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymax[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax[0] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(
self.KK_ymin[1] * 100 * 1.5, np.abs(self.KK_ymin[1]) * 100 * 1.5
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(
np.negative(self.KK_ymax[1]) * 100 * 1.5,
np.abs(self.KK_ymax[1]) * 100 * 1.5,
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(
self.KK_ymin[2] * 100 * 1.5, np.abs(self.KK_ymin[2]) * 100 * 1.5
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[2]) * 100 * 1.5,
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymax[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
self.KK_ymax[2] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(
self.KK_ymin[3] * 100 * 1.5, np.abs(self.KK_ymin[3]) * 100 * 1.5
)
if legend == "on":
ax4.annotate(
"Lin-KK, #4",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymin[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax4.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[3].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymin[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(
np.negative(self.KK_ymax[3]) * 100 * 1.5,
np.abs(self.KK_ymax[3]) * 100 * 1.5,
)
if legend == "on":
ax4.annotate(
"Lin-KK, #4",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymax[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax4.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[3].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[3].f)),
self.KK_ymax[3] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
# Save Figure
if savefig != "none":
fig.savefig(savefig)
### 5 Cycles
elif len(self.df) == 5:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(
left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95
)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
# cycle 1
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_re[0] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_im[0] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
ax1.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 2
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_re[1] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_im[1] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
if legend == "on" or legend == "potential":
ax2.legend(loc="best", fontsize=10, frameon=False)
ax2.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 3
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_re[2] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_im[2] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax3.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax3.legend(loc="best", fontsize=10, frameon=False)
ax3.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 4
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_re[3] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_im[3] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
ax4.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax4.legend(loc="best", fontsize=10, frameon=False)
ax4.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 5
ax5.plot(
np.log10(self.df[4].f),
self.KK_rr_re[4] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax5.plot(
np.log10(self.df[4].f),
self.KK_rr_im[4] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax5.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax5.legend(loc="best", fontsize=10, frameon=False)
ax5.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(
self.KK_ymin[0] * 100 * 1.5, np.abs(self.KK_ymin[0]) * 100 * 1.5
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[0]) * 100 * 1.5,
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymax[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax[0] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(
self.KK_ymin[1] * 100 * 1.5, np.abs(self.KK_ymin[1]) * 100 * 1.5
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(
np.negative(self.KK_ymax[1]) * 100 * 1.5,
np.abs(self.KK_ymax[1]) * 100 * 1.5,
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(
self.KK_ymin[2] * 100 * 1.5, np.abs(self.KK_ymin[2]) * 100 * 1.5
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[2]) * 100 * 1.5,
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymax[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
self.KK_ymax[2] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(
self.KK_ymin[3] * 100 * 1.5, np.abs(self.KK_ymin[3]) * 100 * 1.5
)
if legend == "on":
ax4.annotate(
"Lin-KK, #4",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymin[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax4.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[3].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymin[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(
np.negative(self.KK_ymax[3]) * 100 * 1.5,
np.abs(self.KK_ymax[3]) * 100 * 1.5,
)
if legend == "on":
ax4.annotate(
"Lin-KK, #4",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymax[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax4.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[3].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[3].f)),
self.KK_ymax[3] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(
self.KK_ymin[4] * 100 * 1.5, np.abs(self.KK_ymin[4]) * 100 * 1.5
)
if legend == "on":
ax5.annotate(
"Lin-KK, #5",
xy=[
np.min(np.log10(self.df[4].f)),
np.abs(self.KK_ymin[4]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax5.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[4].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[4].f)),
np.abs(self.KK_ymin[4]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(
np.negative(self.KK_ymax[4]) * 100 * 1.5,
np.abs(self.KK_ymax[4]) * 100 * 1.5,
)
if legend == "on":
ax5.annotate(
"Lin-KK, #5",
xy=[
np.min(np.log10(self.df[4].f)),
np.abs(self.KK_ymax[4]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax5.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[4].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[4].f)),
self.KK_ymax[4] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
# Save Figure
if savefig != "none":
fig.savefig(savefig)
### 6 Cycles
elif len(self.df) == 6:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(
left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95
)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
ax6 = fig.add_subplot(236)
# cycle 1
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_re[0] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_im[0] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
ax1.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 2
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_re[1] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_im[1] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
if legend == "on" or legend == "potential":
ax2.legend(loc="best", fontsize=10, frameon=False)
ax2.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 3
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_re[2] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_im[2] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
if legend == "on" or legend == "potential":
ax3.legend(loc="best", fontsize=10, frameon=False)
ax3.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 4
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_re[3] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_im[3] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax4.set_xlabel("log(f) [Hz]")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == "on" or legend == "potential":
ax4.legend(loc="best", fontsize=10, frameon=False)
ax4.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 5
ax5.plot(
np.log10(self.df[4].f),
self.KK_rr_re[4] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax5.plot(
np.log10(self.df[4].f),
self.KK_rr_im[4] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax5.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax5.legend(loc="best", fontsize=10, frameon=False)
ax5.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 6
ax6.plot(
np.log10(self.df[5].f),
self.KK_rr_re[5] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax6.plot(
np.log10(self.df[5].f),
self.KK_rr_im[5] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax6.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax6.legend(loc="best", fontsize=10, frameon=False)
ax6.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(
self.KK_ymin[0] * 100 * 1.5, np.abs(self.KK_ymin[0]) * 100 * 1.5
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[0]) * 100 * 1.5,
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymax[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax[0] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(
self.KK_ymin[1] * 100 * 1.5, np.abs(self.KK_ymin[1]) * 100 * 1.5
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(
np.negative(self.KK_ymax[1]) * 100 * 1.5,
np.abs(self.KK_ymax[1]) * 100 * 1.5,
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(
self.KK_ymin[2] * 100 * 1.5, np.abs(self.KK_ymin[2]) * 100 * 1.5
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[2]) * 100 * 1.5,
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymax[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(
|
np.log10(self.df[2].f)
|
numpy.log10
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.